problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_5489
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-2005
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request: RVA-based breakpoints
**Is your feature request related to a problem? Please describe.**
I am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.
**Describe the solution you'd like**
A new command that would enable this, for example
````
bp libc.so+0x1337
````
**Describe alternatives you've considered**
The alternative is to manually find the image bases which is tedious.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/gdblib/functions.py`
Content:
```
1 """
2 Put all functions defined for gdb in here.
3
4 This file might be changed into a module in the future.
5 """
6
7 from __future__ import annotations
8
9 import functools
10 from typing import Any
11 from typing import Callable
12 from typing import List
13
14 import gdb
15
16 import pwndbg.gdblib.elf
17 import pwndbg.gdblib.proc
18
19 functions: List[_GdbFunction] = []
20
21
22 def GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:
23 return functools.partial(_GdbFunction, only_when_running=only_when_running)
24
25
26 class _GdbFunction(gdb.Function):
27 def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:
28 self.name = func.__name__
29 self.func = func
30 self.only_when_running = only_when_running
31
32 functions.append(self)
33
34 super().__init__(self.name)
35
36 functools.update_wrapper(self, func)
37 self.__doc__ = func.__doc__
38
39 def invoke(self, *args: gdb.Value) -> Any:
40 if self.only_when_running and not pwndbg.gdblib.proc.alive:
41 # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`
42 # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'
43 return ""
44
45 return self.func(*args)
46
47 def __call__(self, *args: gdb.Value) -> Any:
48 return self.invoke(*args)
49
50
51 @GdbFunction(only_when_running=True)
52 def rebase(addr: gdb.Value | int) -> int:
53 """Return rebased address."""
54 base = pwndbg.gdblib.elf.exe().address
55 return base + int(addr)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py
--- a/pwndbg/gdblib/functions.py
+++ b/pwndbg/gdblib/functions.py
@@ -53,3 +53,16 @@
"""Return rebased address."""
base = pwndbg.gdblib.elf.exe().address
return base + int(addr)
+
+
+@GdbFunction(only_when_running=True)
+def base(name_pattern: gdb.Value | str) -> int:
+ if isinstance(name_pattern, gdb.Value):
+ name = name_pattern.string()
+ else:
+ name = name_pattern
+
+ for p in pwndbg.gdblib.vmmap.get():
+ if name in p.objfile:
+ return p.vaddr
+ raise ValueError(f"No mapping named {name}")
|
{"golden_diff": "diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py\n--- a/pwndbg/gdblib/functions.py\n+++ b/pwndbg/gdblib/functions.py\n@@ -53,3 +53,16 @@\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n+\n+\n+@GdbFunction(only_when_running=True)\n+def base(name_pattern: gdb.Value | str) -> int:\n+ if isinstance(name_pattern, gdb.Value):\n+ name = name_pattern.string()\n+ else:\n+ name = name_pattern\n+\n+ for p in pwndbg.gdblib.vmmap.get():\n+ if name in p.objfile:\n+ return p.vaddr\n+ raise ValueError(f\"No mapping named {name}\")\n", "issue": "Feature request: RVA-based breakpoints\n**Is your feature request related to a problem? Please describe.**\r\nI am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.\r\n\r\n**Describe the solution you'd like**\r\nA new command that would enable this, for example\r\n\r\n````\r\nbp libc.so+0x1337\r\n````\r\n\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is to manually find the image bases which is tedious.\r\n\n", "before_files": [{"content": "\"\"\"\nPut all functions defined for gdb in here.\n\nThis file might be changed into a module in the future.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import List\n\nimport gdb\n\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.proc\n\nfunctions: List[_GdbFunction] = []\n\n\ndef GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:\n return functools.partial(_GdbFunction, only_when_running=only_when_running)\n\n\nclass _GdbFunction(gdb.Function):\n def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:\n self.name = func.__name__\n self.func = func\n self.only_when_running = only_when_running\n\n functions.append(self)\n\n super().__init__(self.name)\n\n functools.update_wrapper(self, func)\n self.__doc__ = func.__doc__\n\n def invoke(self, *args: gdb.Value) -> Any:\n if self.only_when_running and not pwndbg.gdblib.proc.alive:\n # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`\n # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'\n return \"\"\n\n return self.func(*args)\n\n def __call__(self, *args: gdb.Value) -> Any:\n return self.invoke(*args)\n\n\n@GdbFunction(only_when_running=True)\ndef rebase(addr: gdb.Value | int) -> int:\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n", "path": "pwndbg/gdblib/functions.py"}], "after_files": [{"content": "\"\"\"\nPut all functions defined for gdb in here.\n\nThis file might be changed into a module in the future.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import List\n\nimport gdb\n\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.proc\n\nfunctions: List[_GdbFunction] = []\n\n\ndef GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:\n return functools.partial(_GdbFunction, only_when_running=only_when_running)\n\n\nclass _GdbFunction(gdb.Function):\n def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:\n self.name = func.__name__\n self.func = func\n self.only_when_running = only_when_running\n\n functions.append(self)\n\n super().__init__(self.name)\n\n functools.update_wrapper(self, func)\n self.__doc__ = func.__doc__\n\n def invoke(self, *args: gdb.Value) -> Any:\n if self.only_when_running and not pwndbg.gdblib.proc.alive:\n # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`\n # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'\n return \"\"\n\n return self.func(*args)\n\n def __call__(self, *args: gdb.Value) -> Any:\n return self.invoke(*args)\n\n\n@GdbFunction(only_when_running=True)\ndef rebase(addr: gdb.Value | int) -> int:\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n\n\n@GdbFunction(only_when_running=True)\ndef base(name_pattern: gdb.Value | str) -> int:\n if isinstance(name_pattern, gdb.Value):\n name = name_pattern.string()\n else:\n name = name_pattern\n\n for p in pwndbg.gdblib.vmmap.get():\n if name in p.objfile:\n return p.vaddr\n raise ValueError(f\"No mapping named {name}\")\n", "path": "pwndbg/gdblib/functions.py"}]}
| 866 | 185 |
gh_patches_debug_20737
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-10723
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Say all on Desktop raises an error
### Steps to reproduce:
1. Focus the desktop.
2. Invoke caret say all
### Actual behavior:
The following error is raised:
```
ERROR - scriptHandler.executeScript (15:54:57.769):
error executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'
Traceback (most recent call last):
File "scriptHandler.pyc", line 190, in executeScript
File "globalCommands.pyc", line 1334, in script_sayAll
File "sayAllHandler.pyc", line 79, in readText
File "sayAllHandler.pyc", line 119, in nextLine
AttributeError: '_TextReader' object has no attribute 'reader'
ERROR - stderr (15:54:57.779):
Exception ignored in:
ERROR - stderr (15:54:57.790):
<function _TextReader.__del__ at 0x0462F390>
ERROR - stderr (15:54:57.803):
Traceback (most recent call last):
ERROR - stderr (15:54:57.815):
File "sayAllHandler.pyc", line 213, in __del__
ERROR - stderr (15:54:57.827):
File "sayAllHandler.pyc", line 206, in stop
ERROR - stderr (15:54:57.839):
AttributeError
ERROR - stderr (15:54:57.851):
:
ERROR - stderr (15:54:57.863):
'_TextReader' object has no attribute 'reader'
```
### Expected behavior:
NO error
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
threshold-18069
#### Windows version:
Windows 10 1903 build 18362.239
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/sayAllHandler.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2017 NV Access Limited
3 # This file may be used under the terms of the GNU General Public License, version 2 or later.
4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html
5
6 import weakref
7 import speech
8 import synthDriverHandler
9 from logHandler import log
10 import config
11 import controlTypes
12 import api
13 import textInfos
14 import queueHandler
15 import winKernel
16
17 CURSOR_CARET = 0
18 CURSOR_REVIEW = 1
19
20 lastSayAllMode = None
21 #: The active say all manager.
22 #: This is a weakref because the manager should be allowed to die once say all is complete.
23 _activeSayAll = lambda: None # Return None when called like a dead weakref.
24
25 def stop():
26 active = _activeSayAll()
27 if active:
28 active.stop()
29
30 def isRunning():
31 """Determine whether say all is currently running.
32 @return: C{True} if say all is currently running, C{False} if not.
33 @rtype: bool
34 """
35 return bool(_activeSayAll())
36
37 def readObjects(obj):
38 global _activeSayAll
39 reader = _ObjectsReader(obj)
40 _activeSayAll = weakref.ref(reader)
41 reader.next()
42
43 class _ObjectsReader(object):
44
45 def __init__(self, root):
46 self.walker = self.walk(root)
47 self.prevObj = None
48
49 def walk(self, obj):
50 yield obj
51 child=obj.simpleFirstChild
52 while child:
53 for descendant in self.walk(child):
54 yield descendant
55 child=child.simpleNext
56
57 def next(self):
58 if not self.walker:
59 # We were stopped.
60 return
61 if self.prevObj:
62 # We just started speaking this object, so move the navigator to it.
63 api.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)
64 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
65 # Move onto the next object.
66 self.prevObj = obj = next(self.walker, None)
67 if not obj:
68 return
69 # Call this method again when we start speaking this object.
70 callbackCommand = speech.CallbackCommand(self.next, name="say-all:next")
71 speech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)
72
73 def stop(self):
74 self.walker = None
75
76 def readText(cursor):
77 global lastSayAllMode, _activeSayAll
78 lastSayAllMode=cursor
79 reader = _TextReader(cursor)
80 _activeSayAll = weakref.ref(reader)
81 reader.nextLine()
82
83 class _TextReader(object):
84 """Manages continuous reading of text.
85 This is intended for internal use only.
86
87 The high level flow of control is as follows:
88 1. The constructor sets things up.
89 2. L{nextLine} is called to read the first line.
90 3. When it speaks a line, L{nextLine} request that L{lineReached} be called
91 when we start speaking this line, providing the position and state at this point.
92 4. When we start speaking a line, L{lineReached} is called
93 and moves the cursor to that line.
94 5. L{lineReached} calls L{nextLine}.
95 6. If there are more lines, L{nextLine} works as per steps 3 and 4.
96 7. Otherwise, if the object doesn't support page turns, we're finished.
97 8. If the object does support page turns,
98 we request that L{turnPage} be called when speech is finished.
99 9. L{turnPage} tries to turn the page.
100 10. If there are no more pages, we're finished.
101 11. If there is another page, L{turnPage} calls L{nextLine}.
102 """
103 MAX_BUFFERED_LINES = 10
104
105 def __init__(self, cursor):
106 self.cursor = cursor
107 self.trigger = SayAllProfileTrigger()
108 self.trigger.enter()
109 # Start at the cursor.
110 if cursor == CURSOR_CARET:
111 try:
112 self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
113 except (NotImplementedError, RuntimeError):
114 return
115 else:
116 self.reader = api.getReviewPosition()
117 self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
118 self.numBufferedLines = 0
119
120 def nextLine(self):
121 if not self.reader:
122 log.debug("no self.reader")
123 # We were stopped.
124 return
125 if not self.reader.obj:
126 log.debug("no self.reader.obj")
127 # The object died, so we should too.
128 self.finish()
129 return
130 bookmark = self.reader.bookmark
131 # Expand to the current line.
132 # We use move end rather than expand
133 # because the user might start in the middle of a line
134 # and we don't want to read from the start of the line in that case.
135 # For lines after the first, it's also more efficient because
136 # we're already at the start of the line, so there's no need to search backwards.
137 delta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint="end")
138 if delta <= 0:
139 # No more text.
140 if isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):
141 # Once the last line finishes reading, try turning the page.
142 cb = speech.CallbackCommand(self.turnPage, name="say-all:turnPage")
143 speech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])
144 else:
145 self.finish()
146 return
147
148 # Copy the speakTextInfoState so that speak callbackCommand
149 # and its associated callback are using a copy isolated to this specific line.
150 state = self.speakTextInfoState.copy()
151 # Call lineReached when we start speaking this line.
152 # lineReached will move the cursor and trigger reading of the next line.
153
154 def _onLineReached(obj=self.reader.obj, state=state):
155 self.lineReached(obj, bookmark, state)
156
157 cb = speech.CallbackCommand(
158 _onLineReached,
159 name="say-all:lineReached"
160 )
161
162 # Generate the speech sequence for the reader textInfo
163 # and insert the lineReached callback at the very beginning of the sequence.
164 # _linePrefix on speakTextInfo cannot be used here
165 # As it would be inserted in the sequence after all initial control starts which is too late.
166 speechGen = speech.getTextInfoSpeech(
167 self.reader,
168 unit=textInfos.UNIT_READINGCHUNK,
169 reason=controlTypes.REASON_SAYALL,
170 useCache=state
171 )
172 seq = list(speech._flattenNestedSequences(speechGen))
173 seq.insert(0, cb)
174 # Speak the speech sequence.
175 spoke = speech.speakWithoutPauses(seq)
176 # Update the textInfo state ready for when speaking the next line.
177 self.speakTextInfoState = state.copy()
178
179 # Collapse to the end of this line, ready to read the next.
180 try:
181 self.reader.collapse(end=True)
182 except RuntimeError:
183 # This occurs in Microsoft Word when the range covers the end of the document.
184 # without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.
185 self.finish()
186 return
187 if not spoke:
188 # This line didn't include a natural pause, so nothing was spoken.
189 self.numBufferedLines += 1
190 if self.numBufferedLines < self.MAX_BUFFERED_LINES:
191 # Move on to the next line.
192 # We queue this to allow the user a chance to stop say all.
193 queueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)
194 else:
195 # We don't want to buffer too much.
196 # Force speech. lineReached will resume things when speech catches up.
197 speech.speakWithoutPauses(None)
198 # The first buffered line has now started speaking.
199 self.numBufferedLines -= 1
200
201 def lineReached(self, obj, bookmark, state):
202 # We've just started speaking this line, so move the cursor there.
203 state.updateObj()
204 updater = obj.makeTextInfo(bookmark)
205 if self.cursor == CURSOR_CARET:
206 updater.updateCaret()
207 if self.cursor != CURSOR_CARET or config.conf["reviewCursor"]["followCaret"]:
208 api.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)
209 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
210 if self.numBufferedLines == 0:
211 # This was the last line spoken, so move on.
212 self.nextLine()
213 else:
214 self.numBufferedLines -= 1
215
216 def turnPage(self):
217 try:
218 self.reader.obj.turnPage()
219 except RuntimeError:
220 log.debug("No more pages")
221 # No more pages.
222 self.stop()
223 return
224 self.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
225 self.nextLine()
226
227 def finish(self):
228 # There is no more text.
229 # Call stop to clean up, but only after speech completely finishes.
230 # Otherwise, if a different synth is being used for say all,
231 # we might switch synths too early and truncate the final speech.
232 # We do this by putting a CallbackCommand at the start of a new utterance.
233 cb = speech.CallbackCommand(self.stop, name="say-all:stop")
234 speech.speakWithoutPauses([
235 speech.EndUtteranceCommand(),
236 cb,
237 speech.EndUtteranceCommand()
238 ])
239
240 def stop(self):
241 if not self.reader:
242 return
243 self.reader = None
244 self.trigger.exit()
245 self.trigger = None
246
247 def __del__(self):
248 self.stop()
249
250 class SayAllProfileTrigger(config.ProfileTrigger):
251 """A configuration profile trigger for when say all is in progress.
252 """
253 spec = "sayAll"
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py
--- a/source/sayAllHandler.py
+++ b/source/sayAllHandler.py
@@ -76,7 +76,11 @@
def readText(cursor):
global lastSayAllMode, _activeSayAll
lastSayAllMode=cursor
- reader = _TextReader(cursor)
+ try:
+ reader = _TextReader(cursor)
+ except NotImplementedError:
+ log.debugWarning("Unable to make reader", exc_info=True)
+ return
_activeSayAll = weakref.ref(reader)
reader.nextLine()
@@ -110,8 +114,8 @@
if cursor == CURSOR_CARET:
try:
self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
- except (NotImplementedError, RuntimeError):
- return
+ except (NotImplementedError, RuntimeError) as e:
+ raise NotImplementedError("Unable to make TextInfo: " + str(e))
else:
self.reader = api.getReviewPosition()
self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
|
{"golden_diff": "diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py\n--- a/source/sayAllHandler.py\n+++ b/source/sayAllHandler.py\n@@ -76,7 +76,11 @@\n def readText(cursor):\r\n \tglobal lastSayAllMode, _activeSayAll\r\n \tlastSayAllMode=cursor\r\n-\treader = _TextReader(cursor)\r\n+\ttry:\r\n+\t\treader = _TextReader(cursor)\r\n+\texcept NotImplementedError:\r\n+\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n+\t\treturn\r\n \t_activeSayAll = weakref.ref(reader)\r\n \treader.nextLine()\r\n \r\n@@ -110,8 +114,8 @@\n \t\tif cursor == CURSOR_CARET:\r\n \t\t\ttry:\r\n \t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n-\t\t\texcept (NotImplementedError, RuntimeError):\r\n-\t\t\t\treturn\r\n+\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n+\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n \t\telse:\r\n \t\t\tself.reader = api.getReviewPosition()\r\n \t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\n", "issue": "Say all on Desktop raises an error\n### Steps to reproduce:\r\n1. Focus the desktop.\r\n2. Invoke caret say all\r\n\r\n### Actual behavior:\r\nThe following error is raised:\r\n\r\n```\r\nERROR - scriptHandler.executeScript (15:54:57.769):\r\nerror executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'\r\nTraceback (most recent call last):\r\n File \"scriptHandler.pyc\", line 190, in executeScript\r\n File \"globalCommands.pyc\", line 1334, in script_sayAll\r\n File \"sayAllHandler.pyc\", line 79, in readText\r\n File \"sayAllHandler.pyc\", line 119, in nextLine\r\nAttributeError: '_TextReader' object has no attribute 'reader'\r\nERROR - stderr (15:54:57.779):\r\nException ignored in:\r\nERROR - stderr (15:54:57.790):\r\n<function _TextReader.__del__ at 0x0462F390>\r\nERROR - stderr (15:54:57.803):\r\nTraceback (most recent call last):\r\nERROR - stderr (15:54:57.815):\r\n File \"sayAllHandler.pyc\", line 213, in __del__\r\nERROR - stderr (15:54:57.827):\r\n File \"sayAllHandler.pyc\", line 206, in stop\r\nERROR - stderr (15:54:57.839):\r\nAttributeError\r\nERROR - stderr (15:54:57.851):\r\n:\r\nERROR - stderr (15:54:57.863):\r\n'_TextReader' object has no attribute 'reader'\r\n```\r\n\r\n### Expected behavior:\r\nNO error\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nthreshold-18069\r\n\r\n#### Windows version:\r\nWindows 10 1903 build 18362.239\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\treader = _TextReader(cursor)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.trigger.enter()\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError):\r\n\t\t\t\treturn\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\ttry:\r\n\t\treader = _TextReader(cursor)\r\n\texcept NotImplementedError:\r\n\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n\t\treturn\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.trigger.enter()\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}]}
| 3,670 | 253 |
gh_patches_debug_34969
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2525
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature Request: !Sub equivalent of W1001
*cfn-lint version: 0.58.2*
I believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.
```
---
AWSTemplateFormatVersion: 2010-09-09
Conditions:
CreateContainerRepository: !Equals ["yes", "no"]
Resources:
Repository:
Type: AWS::ECR::Repository
Condition: CreateContainerRepository
SubCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Repository}
RefCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Repository
SubFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Fake}
RefFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Fake
```
* SubCondParam.Value shows no error or warning, I believe it should show a warning
* RefCondParam.Value shows W1001
* SubFakeParam.Value shows E1019
* RefFakeParam.Value shows E1012
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/RelationshipConditions.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.helpers import PSEUDOPARAMS
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class RelationshipConditions(CloudFormationLintRule):
10 """Check if Ref/GetAtt values are available via conditions"""
11
12 id = "W1001"
13 shortdesc = "Ref/GetAtt to resource that is available when conditions are applied"
14 description = (
15 "Check the Conditions that affect a Ref/GetAtt to make sure "
16 "the resource being related to is available when there is a resource "
17 "condition."
18 )
19 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
20 tags = ["conditions", "resources", "relationships", "ref", "getatt"]
21
22 def match(self, cfn):
23 """Check CloudFormation Ref/GetAtt for Conditions"""
24
25 matches = []
26
27 # Start with Ref checks
28 ref_objs = cfn.search_deep_keys(searchText="Ref", includeGlobals=False)
29 for ref_obj in ref_objs:
30 value = ref_obj[-1]
31 if value not in PSEUDOPARAMS:
32 scenarios = cfn.is_resource_available(ref_obj, value)
33 for scenario in scenarios:
34 # pylint: disable=consider-using-f-string
35 scenario_text = " and ".join(
36 [
37 'when condition "%s" is %s' % (k, v)
38 for (k, v) in scenario.items()
39 ]
40 )
41 message = (
42 'Ref to resource "{0}" that may not be available {1} at {2}'
43 )
44 matches.append(
45 RuleMatch(
46 ref_obj[:-1],
47 message.format(
48 value, scenario_text, "/".join(map(str, ref_obj[:-1]))
49 ),
50 )
51 )
52
53 # The do GetAtt
54 getatt_objs = cfn.search_deep_keys(
55 searchText="Fn::GetAtt", includeGlobals=False
56 )
57 for getatt_obj in getatt_objs:
58 value_obj = getatt_obj[-1]
59 value = None
60 if isinstance(value_obj, list):
61 value = value_obj[0]
62 elif isinstance(value_obj, str):
63 value = value_obj.split(".")[0]
64 if value:
65 if value not in PSEUDOPARAMS:
66 scenarios = cfn.is_resource_available(getatt_obj, value)
67 for scenario in scenarios:
68 scenario_text = " and ".join(
69 [
70 f'when condition "{k}" is {v}'
71 for (k, v) in scenario.items()
72 ]
73 )
74 message = 'GetAtt to resource "{0}" that may not be available {1} at {2}'
75 matches.append(
76 RuleMatch(
77 getatt_obj[:-1],
78 message.format(
79 value,
80 scenario_text,
81 "/".join(map(str, getatt_obj[:-1])),
82 ),
83 )
84 )
85
86 return matches
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py
--- a/src/cfnlint/rules/functions/RelationshipConditions.py
+++ b/src/cfnlint/rules/functions/RelationshipConditions.py
@@ -17,7 +17,7 @@
"condition."
)
source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
- tags = ["conditions", "resources", "relationships", "ref", "getatt"]
+ tags = ["conditions", "resources", "relationships", "ref", "getatt", "sub"]
def match(self, cfn):
"""Check CloudFormation Ref/GetAtt for Conditions"""
@@ -83,4 +83,46 @@
)
)
+ # The do Sub
+ sub_objs = cfn.search_deep_keys(searchText="Fn::Sub", includeGlobals=False)
+ for sub_obj in sub_objs:
+ sub_string = sub_obj[-1]
+ # Filter out bad types of sub_strings.
+ # Lists have two be two items and it can be just a string
+ if not isinstance(sub_string, (list, str)):
+ continue
+ if isinstance(sub_string, str):
+ sub_string = [sub_string, {}]
+ if len(sub_string) != 2:
+ continue
+ sub_params = sub_string[1]
+ string_params = cfn.get_sub_parameters(sub_string[0])
+
+ for string_param in string_params:
+ if string_param not in sub_params:
+ # deal with GetAtts by dropping everything after the .
+ string_param = string_param.split(".")[0]
+ if string_param in cfn.template.get("Resources", {}):
+ scenarios = cfn.is_resource_available(
+ sub_obj[:-1], string_param
+ )
+ for scenario in scenarios:
+ scenario_text = " and ".join(
+ [
+ f'when condition "{k}" is {v}'
+ for (k, v) in scenario.items()
+ ]
+ )
+ message = 'Fn::Sub to resource "{0}" that may not be available {1} at {2}'
+ matches.append(
+ RuleMatch(
+ sub_obj[:-1],
+ message.format(
+ string_param,
+ scenario_text,
+ "/".join(map(str, sub_obj[:-1])),
+ ),
+ )
+ )
+
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py\n--- a/src/cfnlint/rules/functions/RelationshipConditions.py\n+++ b/src/cfnlint/rules/functions/RelationshipConditions.py\n@@ -17,7 +17,7 @@\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n- tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n+ tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\", \"sub\"]\n \n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n@@ -83,4 +83,46 @@\n )\n )\n \n+ # The do Sub\n+ sub_objs = cfn.search_deep_keys(searchText=\"Fn::Sub\", includeGlobals=False)\n+ for sub_obj in sub_objs:\n+ sub_string = sub_obj[-1]\n+ # Filter out bad types of sub_strings.\n+ # Lists have two be two items and it can be just a string\n+ if not isinstance(sub_string, (list, str)):\n+ continue\n+ if isinstance(sub_string, str):\n+ sub_string = [sub_string, {}]\n+ if len(sub_string) != 2:\n+ continue\n+ sub_params = sub_string[1]\n+ string_params = cfn.get_sub_parameters(sub_string[0])\n+\n+ for string_param in string_params:\n+ if string_param not in sub_params:\n+ # deal with GetAtts by dropping everything after the .\n+ string_param = string_param.split(\".\")[0]\n+ if string_param in cfn.template.get(\"Resources\", {}):\n+ scenarios = cfn.is_resource_available(\n+ sub_obj[:-1], string_param\n+ )\n+ for scenario in scenarios:\n+ scenario_text = \" and \".join(\n+ [\n+ f'when condition \"{k}\" is {v}'\n+ for (k, v) in scenario.items()\n+ ]\n+ )\n+ message = 'Fn::Sub to resource \"{0}\" that may not be available {1} at {2}'\n+ matches.append(\n+ RuleMatch(\n+ sub_obj[:-1],\n+ message.format(\n+ string_param,\n+ scenario_text,\n+ \"/\".join(map(str, sub_obj[:-1])),\n+ ),\n+ )\n+ )\n+\n return matches\n", "issue": "Feature Request: !Sub equivalent of W1001\n*cfn-lint version: 0.58.2*\r\n\r\nI believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.\r\n\r\n```\r\n---\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nConditions:\r\n CreateContainerRepository: !Equals [\"yes\", \"no\"]\r\n\r\nResources:\r\n Repository:\r\n Type: AWS::ECR::Repository\r\n Condition: CreateContainerRepository\r\n\r\n SubCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Repository}\r\n\r\n RefCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Repository\r\n\r\n SubFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Fake}\r\n\r\n RefFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Fake\r\n```\r\n\r\n* SubCondParam.Value shows no error or warning, I believe it should show a warning\r\n* RefCondParam.Value shows W1001\r\n* SubFakeParam.Value shows E1019\r\n* RefFakeParam.Value shows E1012\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.helpers import PSEUDOPARAMS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RelationshipConditions(CloudFormationLintRule):\n \"\"\"Check if Ref/GetAtt values are available via conditions\"\"\"\n\n id = \"W1001\"\n shortdesc = \"Ref/GetAtt to resource that is available when conditions are applied\"\n description = (\n \"Check the Conditions that affect a Ref/GetAtt to make sure \"\n \"the resource being related to is available when there is a resource \"\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n\n matches = []\n\n # Start with Ref checks\n ref_objs = cfn.search_deep_keys(searchText=\"Ref\", includeGlobals=False)\n for ref_obj in ref_objs:\n value = ref_obj[-1]\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(ref_obj, value)\n for scenario in scenarios:\n # pylint: disable=consider-using-f-string\n scenario_text = \" and \".join(\n [\n 'when condition \"%s\" is %s' % (k, v)\n for (k, v) in scenario.items()\n ]\n )\n message = (\n 'Ref to resource \"{0}\" that may not be available {1} at {2}'\n )\n matches.append(\n RuleMatch(\n ref_obj[:-1],\n message.format(\n value, scenario_text, \"/\".join(map(str, ref_obj[:-1]))\n ),\n )\n )\n\n # The do GetAtt\n getatt_objs = cfn.search_deep_keys(\n searchText=\"Fn::GetAtt\", includeGlobals=False\n )\n for getatt_obj in getatt_objs:\n value_obj = getatt_obj[-1]\n value = None\n if isinstance(value_obj, list):\n value = value_obj[0]\n elif isinstance(value_obj, str):\n value = value_obj.split(\".\")[0]\n if value:\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(getatt_obj, value)\n for scenario in scenarios:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in scenario.items()\n ]\n )\n message = 'GetAtt to resource \"{0}\" that may not be available {1} at {2}'\n matches.append(\n RuleMatch(\n getatt_obj[:-1],\n message.format(\n value,\n scenario_text,\n \"/\".join(map(str, getatt_obj[:-1])),\n ),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/functions/RelationshipConditions.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.helpers import PSEUDOPARAMS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RelationshipConditions(CloudFormationLintRule):\n \"\"\"Check if Ref/GetAtt values are available via conditions\"\"\"\n\n id = \"W1001\"\n shortdesc = \"Ref/GetAtt to resource that is available when conditions are applied\"\n description = (\n \"Check the Conditions that affect a Ref/GetAtt to make sure \"\n \"the resource being related to is available when there is a resource \"\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\", \"sub\"]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n\n matches = []\n\n # Start with Ref checks\n ref_objs = cfn.search_deep_keys(searchText=\"Ref\", includeGlobals=False)\n for ref_obj in ref_objs:\n value = ref_obj[-1]\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(ref_obj, value)\n for scenario in scenarios:\n # pylint: disable=consider-using-f-string\n scenario_text = \" and \".join(\n [\n 'when condition \"%s\" is %s' % (k, v)\n for (k, v) in scenario.items()\n ]\n )\n message = (\n 'Ref to resource \"{0}\" that may not be available {1} at {2}'\n )\n matches.append(\n RuleMatch(\n ref_obj[:-1],\n message.format(\n value, scenario_text, \"/\".join(map(str, ref_obj[:-1]))\n ),\n )\n )\n\n # The do GetAtt\n getatt_objs = cfn.search_deep_keys(\n searchText=\"Fn::GetAtt\", includeGlobals=False\n )\n for getatt_obj in getatt_objs:\n value_obj = getatt_obj[-1]\n value = None\n if isinstance(value_obj, list):\n value = value_obj[0]\n elif isinstance(value_obj, str):\n value = value_obj.split(\".\")[0]\n if value:\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(getatt_obj, value)\n for scenario in scenarios:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in scenario.items()\n ]\n )\n message = 'GetAtt to resource \"{0}\" that may not be available {1} at {2}'\n matches.append(\n RuleMatch(\n getatt_obj[:-1],\n message.format(\n value,\n scenario_text,\n \"/\".join(map(str, getatt_obj[:-1])),\n ),\n )\n )\n\n # The do Sub\n sub_objs = cfn.search_deep_keys(searchText=\"Fn::Sub\", includeGlobals=False)\n for sub_obj in sub_objs:\n sub_string = sub_obj[-1]\n # Filter out bad types of sub_strings.\n # Lists have two be two items and it can be just a string\n if not isinstance(sub_string, (list, str)):\n continue\n if isinstance(sub_string, str):\n sub_string = [sub_string, {}]\n if len(sub_string) != 2:\n continue\n sub_params = sub_string[1]\n string_params = cfn.get_sub_parameters(sub_string[0])\n\n for string_param in string_params:\n if string_param not in sub_params:\n # deal with GetAtts by dropping everything after the .\n string_param = string_param.split(\".\")[0]\n if string_param in cfn.template.get(\"Resources\", {}):\n scenarios = cfn.is_resource_available(\n sub_obj[:-1], string_param\n )\n for scenario in scenarios:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in scenario.items()\n ]\n )\n message = 'Fn::Sub to resource \"{0}\" that may not be available {1} at {2}'\n matches.append(\n RuleMatch(\n sub_obj[:-1],\n message.format(\n string_param,\n scenario_text,\n \"/\".join(map(str, sub_obj[:-1])),\n ),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/functions/RelationshipConditions.py"}]}
| 1,417 | 548 |
gh_patches_debug_16419
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-16001
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`./pants run` crashes when `remote_cache_eager_fetch = false`
**Describe the bug**
After setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:
```
Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }
```
**Pants version**
`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`
**OS**
Linux
**Additional info**
Some BuildSense links where we've seen the failure:
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/base/exceptions.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6
7 class TargetDefinitionException(Exception):
8 """Indicates an invalid target definition.
9
10 :API: public
11 """
12
13 def __init__(self, target, msg):
14 """
15 :param target: the target in question
16 :param string msg: a description of the target misconfiguration
17 """
18 super().__init__(f"Invalid target {target}: {msg}")
19
20
21 class BuildConfigurationError(Exception):
22 """Indicates an error in a pants installation's configuration."""
23
24
25 class BackendConfigurationError(BuildConfigurationError):
26 """Indicates a plugin backend with a missing or malformed register module."""
27
28
29 class MappingError(Exception):
30 """Indicates an error mapping addressable objects."""
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py
--- a/src/python/pants/base/exceptions.py
+++ b/src/python/pants/base/exceptions.py
@@ -3,6 +3,11 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pants.engine.internals.native_engine import PyFailure
+
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition.
@@ -28,3 +33,15 @@
class MappingError(Exception):
"""Indicates an error mapping addressable objects."""
+
+
+class NativeEngineFailure(Exception):
+ """A wrapper around a `Failure` instance.
+
+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with
+ additional fields. See https://github.com/PyO3/pyo3/issues/295
+ """
+
+ def __init__(self, msg: str, failure: PyFailure) -> None:
+ super().__init__(msg)
+ self.failure = failure
|
{"golden_diff": "diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py\n--- a/src/python/pants/base/exceptions.py\n+++ b/src/python/pants/base/exceptions.py\n@@ -3,6 +3,11 @@\n \n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+if TYPE_CHECKING:\n+ from pants.engine.internals.native_engine import PyFailure\n+\n \n class TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n@@ -28,3 +33,15 @@\n \n class MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n+\n+\n+class NativeEngineFailure(Exception):\n+ \"\"\"A wrapper around a `Failure` instance.\n+\n+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n+ additional fields. See https://github.com/PyO3/pyo3/issues/295\n+ \"\"\"\n+\n+ def __init__(self, msg: str, failure: PyFailure) -> None:\n+ super().__init__(msg)\n+ self.failure = failure\n", "issue": "`./pants run` crashes when `remote_cache_eager_fetch = false`\n**Describe the bug**\r\n\r\nAfter setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:\r\n```\r\n Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nSome BuildSense links where we've seen the failure:\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n", "path": "src/python/pants/base/exceptions.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from pants.engine.internals.native_engine import PyFailure\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n\n\nclass NativeEngineFailure(Exception):\n \"\"\"A wrapper around a `Failure` instance.\n\n TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n additional fields. See https://github.com/PyO3/pyo3/issues/295\n \"\"\"\n\n def __init__(self, msg: str, failure: PyFailure) -> None:\n super().__init__(msg)\n self.failure = failure\n", "path": "src/python/pants/base/exceptions.py"}]}
| 942 | 243 |
gh_patches_debug_40698
|
rasdani/github-patches
|
git_diff
|
pyjanitor-devs__pyjanitor-761
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Latest requirements.txt seems to be over-specifying dependencies
This is the latest version requirements.txt file:
```
black>=19.3b0
darglint
hypothesis>=4.4.0
interrogate
ipykernel
isort>=4.3.18
jupyter_client
lxml
natsort
nbsphinx>=0.4.2
pandas-flavor
pandas-vet
pre-commit
pyspark
pytest-azurepipelines
pytest-cov
pytest>=3.4.2
scikit-learn
seaborn
setuptools>=38.5.2
sphinxcontrib-fulltoc==1.2.0
unyt
xarray
```
Those are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup script."""
2
3 import re
4 from pathlib import Path
5
6 from setuptools import find_packages, setup
7
8
9 def requirements():
10 """Reader for requirements.txt"""
11 with open("requirements.txt", "r+") as f:
12 return f.read()
13
14
15 def generate_long_description() -> str:
16 """
17 Extra chunks from README for PyPI description.
18
19 Target chunks must be contained within `.. pypi-doc` pair comments,
20 so there must be an even number of comments in README.
21
22 :returns: Extracted description from README.
23 :raises Exception: if odd number of ``.. pypi-doc`` comments
24 in README.
25 """
26 # Read the contents of README file
27 this_directory = Path(__file__).parent
28 with open(this_directory / "README.rst", encoding="utf-8") as f:
29 readme = f.read()
30
31 # Find pypi-doc comments in README
32 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
33 if len(indices) % 2 != 0:
34 raise Exception("Odd number of `.. pypi-doc` comments in README")
35
36 # Loop through pairs of comments and save text between pairs
37 long_description = ""
38 for i in range(0, len(indices), 2):
39 start_index = indices[i] + 11
40 end_index = indices[i + 1]
41 long_description += readme[start_index:end_index]
42 return long_description
43
44
45 extra_spark = ["pyspark"]
46 extra_biology = ["biopython"]
47 extra_chemistry = ["rdkit"]
48 extra_engineering = ["unyt"]
49 extra_all = extra_biology + extra_engineering + extra_spark
50
51 setup(
52 name="pyjanitor",
53 version="0.20.10",
54 description="Tools for cleaning pandas DataFrames",
55 author="Eric J. Ma",
56 author_email="ericmajinglong@gmail.com",
57 url="https://github.com/ericmjl/pyjanitor",
58 license="MIT",
59 # packages=["janitor", "janitor.xarray", "janitor.spark"],
60 packages=find_packages(),
61 install_requires=requirements(),
62 extras_require={
63 "all": extra_all,
64 "biology": extra_biology,
65 # "chemistry": extra_chemistry, should be inserted once rdkit
66 # fixes https://github.com/rdkit/rdkit/issues/1812
67 "engineering": extra_engineering,
68 "spark": extra_spark,
69 },
70 python_requires=">=3.6",
71 long_description=generate_long_description(),
72 long_description_content_type="text/x-rst",
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,82 @@
"""Setup script."""
-
+import codecs
+import os
import re
from pathlib import Path
+from pprint import pprint
from setuptools import find_packages, setup
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def read(*parts):
+ # intentionally *not* adding an encoding option to open
+ return codecs.open(os.path.join(HERE, *parts), "r").read()
+
-def requirements():
- """Reader for requirements.txt"""
- with open("requirements.txt", "r+") as f:
- return f.read()
+def read_requirements(*parts):
+ """
+ Return requirements from parts.
+
+ Given a requirements.txt (or similar style file),
+ returns a list of requirements.
+ Assumes anything after a single '#' on a line is a comment, and ignores
+ empty lines.
+
+ :param parts: list of filenames which contain the installation "parts",
+ i.e. submodule-specific installation requirements
+ :returns: A compiled list of requirements.
+ """
+ requirements = []
+ for line in read(*parts).splitlines():
+ new_line = re.sub( # noqa: PD005
+ r"(\s*)?#.*$", # the space immediately before the
+ # hash mark, the hash mark, and
+ # anything that follows it
+ "", # replace with a blank string
+ line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-r.*$", # link to another requirement file
+ "", # replace with a blank string
+ new_line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-e \..*$", # link to editable install
+ "", # replace with a blank string
+ new_line,
+ )
+ # print(line, "-->", new_line)
+ if new_line: # i.e. we have a non-zero-length string
+ requirements.append(new_line)
+ return requirements
+
+
+# pull from requirements.IN, requirements.TXT is generated from this
+INSTALL_REQUIRES = read_requirements(".requirements/base.in")
+
+EXTRA_REQUIRES = {
+ "dev": read_requirements(".requirements/dev.in"),
+ "docs": read_requirements(".requirements/docs.in"),
+ "test": read_requirements(".requirements/testing.in"),
+ "biology": read_requirements(".requirements/biology.in"),
+ "chemistry": read_requirements(".requirements/chemistry.in"),
+ "engineering": read_requirements(".requirements/engineering.in"),
+ "spark": read_requirements(".requirements/spark.in"),
+}
+
+# add 'all' key to EXTRA_REQUIRES
+all_requires = []
+for k, v in EXTRA_REQUIRES.items():
+ all_requires.extend(v)
+EXTRA_REQUIRES["all"] = set(all_requires)
+
+for k1 in ["biology", "chemistry", "engineering", "spark"]:
+ for v2 in EXTRA_REQUIRES[k1]:
+ EXTRA_REQUIRES["docs"].append(v2)
+
+pprint(EXTRA_REQUIRES)
def generate_long_description() -> str:
@@ -42,12 +109,6 @@
return long_description
-extra_spark = ["pyspark"]
-extra_biology = ["biopython"]
-extra_chemistry = ["rdkit"]
-extra_engineering = ["unyt"]
-extra_all = extra_biology + extra_engineering + extra_spark
-
setup(
name="pyjanitor",
version="0.20.10",
@@ -58,15 +119,8 @@
license="MIT",
# packages=["janitor", "janitor.xarray", "janitor.spark"],
packages=find_packages(),
- install_requires=requirements(),
- extras_require={
- "all": extra_all,
- "biology": extra_biology,
- # "chemistry": extra_chemistry, should be inserted once rdkit
- # fixes https://github.com/rdkit/rdkit/issues/1812
- "engineering": extra_engineering,
- "spark": extra_spark,
- },
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRA_REQUIRES,
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,15 +1,82 @@\n \"\"\"Setup script.\"\"\"\n-\n+import codecs\n+import os\n import re\n from pathlib import Path\n+from pprint import pprint\n \n from setuptools import find_packages, setup\n \n+HERE = os.path.abspath(os.path.dirname(__file__))\n+\n+\n+def read(*parts):\n+ # intentionally *not* adding an encoding option to open\n+ return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n+\n \n-def requirements():\n- \"\"\"Reader for requirements.txt\"\"\"\n- with open(\"requirements.txt\", \"r+\") as f:\n- return f.read()\n+def read_requirements(*parts):\n+ \"\"\"\n+ Return requirements from parts.\n+\n+ Given a requirements.txt (or similar style file),\n+ returns a list of requirements.\n+ Assumes anything after a single '#' on a line is a comment, and ignores\n+ empty lines.\n+\n+ :param parts: list of filenames which contain the installation \"parts\",\n+ i.e. submodule-specific installation requirements\n+ :returns: A compiled list of requirements.\n+ \"\"\"\n+ requirements = []\n+ for line in read(*parts).splitlines():\n+ new_line = re.sub( # noqa: PD005\n+ r\"(\\s*)?#.*$\", # the space immediately before the\n+ # hash mark, the hash mark, and\n+ # anything that follows it\n+ \"\", # replace with a blank string\n+ line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-r.*$\", # link to another requirement file\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-e \\..*$\", # link to editable install\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ # print(line, \"-->\", new_line)\n+ if new_line: # i.e. we have a non-zero-length string\n+ requirements.append(new_line)\n+ return requirements\n+\n+\n+# pull from requirements.IN, requirements.TXT is generated from this\n+INSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n+\n+EXTRA_REQUIRES = {\n+ \"dev\": read_requirements(\".requirements/dev.in\"),\n+ \"docs\": read_requirements(\".requirements/docs.in\"),\n+ \"test\": read_requirements(\".requirements/testing.in\"),\n+ \"biology\": read_requirements(\".requirements/biology.in\"),\n+ \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n+ \"engineering\": read_requirements(\".requirements/engineering.in\"),\n+ \"spark\": read_requirements(\".requirements/spark.in\"),\n+}\n+\n+# add 'all' key to EXTRA_REQUIRES\n+all_requires = []\n+for k, v in EXTRA_REQUIRES.items():\n+ all_requires.extend(v)\n+EXTRA_REQUIRES[\"all\"] = set(all_requires)\n+\n+for k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n+ for v2 in EXTRA_REQUIRES[k1]:\n+ EXTRA_REQUIRES[\"docs\"].append(v2)\n+\n+pprint(EXTRA_REQUIRES)\n \n \n def generate_long_description() -> str:\n@@ -42,12 +109,6 @@\n return long_description\n \n \n-extra_spark = [\"pyspark\"]\n-extra_biology = [\"biopython\"]\n-extra_chemistry = [\"rdkit\"]\n-extra_engineering = [\"unyt\"]\n-extra_all = extra_biology + extra_engineering + extra_spark\n-\n setup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n@@ -58,15 +119,8 @@\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n- install_requires=requirements(),\n- extras_require={\n- \"all\": extra_all,\n- \"biology\": extra_biology,\n- # \"chemistry\": extra_chemistry, should be inserted once rdkit\n- # fixes https://github.com/rdkit/rdkit/issues/1812\n- \"engineering\": extra_engineering,\n- \"spark\": extra_spark,\n- },\n+ install_requires=INSTALL_REQUIRES,\n+ extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "Latest requirements.txt seems to be over-specifying dependencies\nThis is the latest version requirements.txt file:\r\n\r\n```\r\nblack>=19.3b0\r\ndarglint\r\nhypothesis>=4.4.0\r\ninterrogate\r\nipykernel\r\nisort>=4.3.18\r\njupyter_client\r\nlxml\r\nnatsort\r\nnbsphinx>=0.4.2\r\npandas-flavor\r\npandas-vet\r\npre-commit\r\npyspark\r\npytest-azurepipelines\r\npytest-cov\r\npytest>=3.4.2\r\nscikit-learn\r\nseaborn\r\nsetuptools>=38.5.2\r\nsphinxcontrib-fulltoc==1.2.0\r\nunyt\r\nxarray\r\n```\r\n\r\nThose are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?\n", "before_files": [{"content": "\"\"\"Setup script.\"\"\"\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\n\ndef requirements():\n \"\"\"Reader for requirements.txt\"\"\"\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: if odd number of ``.. pypi-doc`` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"ericmajinglong@gmail.com\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup script.\"\"\"\nimport codecs\nimport os\nimport re\nfrom pathlib import Path\nfrom pprint import pprint\n\nfrom setuptools import find_packages, setup\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n # intentionally *not* adding an encoding option to open\n return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n\n\ndef read_requirements(*parts):\n \"\"\"\n Return requirements from parts.\n\n Given a requirements.txt (or similar style file),\n returns a list of requirements.\n Assumes anything after a single '#' on a line is a comment, and ignores\n empty lines.\n\n :param parts: list of filenames which contain the installation \"parts\",\n i.e. submodule-specific installation requirements\n :returns: A compiled list of requirements.\n \"\"\"\n requirements = []\n for line in read(*parts).splitlines():\n new_line = re.sub( # noqa: PD005\n r\"(\\s*)?#.*$\", # the space immediately before the\n # hash mark, the hash mark, and\n # anything that follows it\n \"\", # replace with a blank string\n line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-r.*$\", # link to another requirement file\n \"\", # replace with a blank string\n new_line,\n )\n new_line = re.sub( # noqa: PD005\n r\"-e \\..*$\", # link to editable install\n \"\", # replace with a blank string\n new_line,\n )\n # print(line, \"-->\", new_line)\n if new_line: # i.e. we have a non-zero-length string\n requirements.append(new_line)\n return requirements\n\n\n# pull from requirements.IN, requirements.TXT is generated from this\nINSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n\nEXTRA_REQUIRES = {\n \"dev\": read_requirements(\".requirements/dev.in\"),\n \"docs\": read_requirements(\".requirements/docs.in\"),\n \"test\": read_requirements(\".requirements/testing.in\"),\n \"biology\": read_requirements(\".requirements/biology.in\"),\n \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n \"engineering\": read_requirements(\".requirements/engineering.in\"),\n \"spark\": read_requirements(\".requirements/spark.in\"),\n}\n\n# add 'all' key to EXTRA_REQUIRES\nall_requires = []\nfor k, v in EXTRA_REQUIRES.items():\n all_requires.extend(v)\nEXTRA_REQUIRES[\"all\"] = set(all_requires)\n\nfor k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n for v2 in EXTRA_REQUIRES[k1]:\n EXTRA_REQUIRES[\"docs\"].append(v2)\n\npprint(EXTRA_REQUIRES)\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: if odd number of ``.. pypi-doc`` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"ericmajinglong@gmail.com\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]}
| 1,153 | 992 |
gh_patches_debug_3040
|
rasdani/github-patches
|
git_diff
|
mosaicml__composer-496
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move `ComposerTrainer` to top-level imports
Our most heavily used objects should be easily importable from `composer` via:
```
from composer import Trainer, ComposerModel
```
rather than remember their submodule:
```
from composer.models import ComposerModel
```
Especially the last one, its tricky to remember whether its `models` or `model`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/__init__.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from composer import algorithms as algorithms
4 from composer import callbacks as callbacks
5 from composer import datasets as datasets
6 from composer import loggers as loggers
7 from composer import models as models
8 from composer import optim as optim
9 from composer import profiler as profiler
10 from composer import trainer as trainer
11 from composer import utils as utils
12 from composer.core import Algorithm as Algorithm
13 from composer.core import Callback as Callback
14 from composer.core import DataSpec as DataSpec
15 from composer.core import Engine as Engine
16 from composer.core import Event as Event
17 from composer.core import Logger as Logger
18 from composer.core import State as State
19 from composer.core import Time as Time
20 from composer.core import Timer as Timer
21 from composer.core import TimeUnit as TimeUnit
22 from composer.core import types as types
23 from composer.trainer import Trainer as Trainer
24
25 __version__ = "0.3.1"
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/composer/__init__.py b/composer/__init__.py
--- a/composer/__init__.py
+++ b/composer/__init__.py
@@ -20,6 +20,7 @@
from composer.core import Timer as Timer
from composer.core import TimeUnit as TimeUnit
from composer.core import types as types
+from composer.models import ComposerModel as ComposerModel
from composer.trainer import Trainer as Trainer
__version__ = "0.3.1"
|
{"golden_diff": "diff --git a/composer/__init__.py b/composer/__init__.py\n--- a/composer/__init__.py\n+++ b/composer/__init__.py\n@@ -20,6 +20,7 @@\n from composer.core import Timer as Timer\n from composer.core import TimeUnit as TimeUnit\n from composer.core import types as types\n+from composer.models import ComposerModel as ComposerModel\n from composer.trainer import Trainer as Trainer\n \n __version__ = \"0.3.1\"\n", "issue": "Move `ComposerTrainer` to top-level imports\nOur most heavily used objects should be easily importable from `composer` via:\r\n```\r\nfrom composer import Trainer, ComposerModel\r\n```\r\nrather than remember their submodule:\r\n\r\n```\r\nfrom composer.models import ComposerModel\r\n```\r\n\r\nEspecially the last one, its tricky to remember whether its `models` or `model`\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom composer import algorithms as algorithms\nfrom composer import callbacks as callbacks\nfrom composer import datasets as datasets\nfrom composer import loggers as loggers\nfrom composer import models as models\nfrom composer import optim as optim\nfrom composer import profiler as profiler\nfrom composer import trainer as trainer\nfrom composer import utils as utils\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Event as Event\nfrom composer.core import Logger as Logger\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timer as Timer\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.trainer import Trainer as Trainer\n\n__version__ = \"0.3.1\"\n", "path": "composer/__init__.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom composer import algorithms as algorithms\nfrom composer import callbacks as callbacks\nfrom composer import datasets as datasets\nfrom composer import loggers as loggers\nfrom composer import models as models\nfrom composer import optim as optim\nfrom composer import profiler as profiler\nfrom composer import trainer as trainer\nfrom composer import utils as utils\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Event as Event\nfrom composer.core import Logger as Logger\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timer as Timer\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.models import ComposerModel as ComposerModel\nfrom composer.trainer import Trainer as Trainer\n\n__version__ = \"0.3.1\"\n", "path": "composer/__init__.py"}]}
| 569 | 103 |
gh_patches_debug_7982
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-5253
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Special characters in st2admin account causing st2 key failure
Just upgraded to version 3.4 and my keyvault is having problems. I believe it's due to my st2admin password containing special characters.
```
[root@stackstorm workflows]# st2 key list --scope=all
Traceback (most recent call last):
File "/bin/st2", line 10, in <module>
sys.exit(main())
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py", line 470, in main
return Shell().run(argv)
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py", line 385, in run
config = self._parse_config_file(args=args, validate_config_permissions=False)
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/base.py", line 183, in _parse_config_file
result = parser.parse()
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/config_parser.py", line 197, in parse
value = get_func(section, key)
File "/usr/lib64/python3.6/configparser.py", line 800, in get
d)
File "/usr/lib64/python3.6/configparser.py", line 394, in before_get
self._interpolate_some(parser, option, L, value, section, defaults, 1)
File "/usr/lib64/python3.6/configparser.py", line 444, in _interpolate_some
"found: %r" % (rest,))
configparser.InterpolationSyntaxError: '%' must be followed by '%' or '(', found: '%3C#V~Bvg%3E3t+'
```
This password above is what I used to install stackstorm. Or at least part of it. I've since changed the password via the documented htpasswd method, but the issue persists. Any tips? Left the password in for research purposes.
curl -sSL https://stackstorm.com/packages/install.sh | bash -s -- --user=st2admin --password='q7j/t%3C#V~Bvg%3E3t+'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2client/st2client/config_parser.py`
Content:
```
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """
17 Module for parsing CLI config file.
18 """
19
20 from __future__ import absolute_import
21
22 import logging
23 import os
24
25 from collections import defaultdict
26
27 import io
28
29 import six
30 from six.moves.configparser import ConfigParser
31
32
33 __all__ = [
34 "CLIConfigParser",
35 "ST2_CONFIG_DIRECTORY",
36 "ST2_CONFIG_PATH",
37 "CONFIG_DEFAULT_VALUES",
38 ]
39
40 ST2_CONFIG_DIRECTORY = "~/.st2"
41 ST2_CONFIG_DIRECTORY = os.path.abspath(os.path.expanduser(ST2_CONFIG_DIRECTORY))
42
43 ST2_CONFIG_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, "config"))
44
45 CONFIG_FILE_OPTIONS = {
46 "general": {
47 "base_url": {"type": "string", "default": None},
48 "api_version": {"type": "string", "default": None},
49 "cacert": {"type": "string", "default": None},
50 "silence_ssl_warnings": {"type": "bool", "default": False},
51 "silence_schema_output": {"type": "bool", "default": True},
52 },
53 "cli": {
54 "debug": {"type": "bool", "default": False},
55 "cache_token": {"type": "boolean", "default": True},
56 "timezone": {"type": "string", "default": "UTC"},
57 },
58 "credentials": {
59 "username": {"type": "string", "default": None},
60 "password": {"type": "string", "default": None},
61 "api_key": {"type": "string", "default": None},
62 },
63 "api": {"url": {"type": "string", "default": None}},
64 "auth": {"url": {"type": "string", "default": None}},
65 "stream": {"url": {"type": "string", "default": None}},
66 }
67
68 CONFIG_DEFAULT_VALUES = {}
69
70 for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):
71 CONFIG_DEFAULT_VALUES[section] = {}
72
73 for key, options in six.iteritems(keys):
74 default_value = options["default"]
75 CONFIG_DEFAULT_VALUES[section][key] = default_value
76
77
78 class CLIConfigParser(object):
79 def __init__(
80 self,
81 config_file_path,
82 validate_config_exists=True,
83 validate_config_permissions=True,
84 log=None,
85 ):
86 if validate_config_exists and not os.path.isfile(config_file_path):
87 raise ValueError('Config file "%s" doesn\'t exist')
88
89 if log is None:
90 log = logging.getLogger(__name__)
91 logging.basicConfig()
92
93 self.config_file_path = config_file_path
94 self.validate_config_permissions = validate_config_permissions
95 self.LOG = log
96
97 def parse(self):
98 """
99 Parse the config and return a dict with the parsed values.
100
101 :rtype: ``dict``
102 """
103 result = defaultdict(dict)
104
105 if not os.path.isfile(self.config_file_path):
106 # Config doesn't exist, return the default values
107 return CONFIG_DEFAULT_VALUES
108
109 config_dir_path = os.path.dirname(self.config_file_path)
110
111 if self.validate_config_permissions:
112 # Make sure the directory permissions == 0o770
113 if bool(os.stat(config_dir_path).st_mode & 0o7):
114 self.LOG.warn(
115 "The StackStorm configuration directory permissions are "
116 "insecure (too permissive): others have access."
117 )
118
119 # Make sure the setgid bit is set on the directory
120 if not bool(os.stat(config_dir_path).st_mode & 0o2000):
121 self.LOG.info(
122 "The SGID bit is not set on the StackStorm configuration "
123 "directory."
124 )
125
126 # Make sure the file permissions == 0o660
127 if bool(os.stat(self.config_file_path).st_mode & 0o7):
128 self.LOG.warn(
129 "The StackStorm configuration file permissions are "
130 "insecure: others have access."
131 )
132
133 config = ConfigParser()
134 with io.open(self.config_file_path, "r", encoding="utf8") as fp:
135 config.readfp(fp)
136
137 for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):
138 for key, options in six.iteritems(keys):
139 key_type = options["type"]
140 key_default_value = options["default"]
141
142 if config.has_option(section, key):
143 if key_type in ["str", "string"]:
144 get_func = config.get
145 elif key_type in ["int", "integer"]:
146 get_func = config.getint
147 elif key_type in ["float"]:
148 get_func = config.getfloat
149 elif key_type in ["bool", "boolean"]:
150 get_func = config.getboolean
151 else:
152 msg = 'Invalid type "%s" for option "%s"' % (key_type, key)
153 raise ValueError(msg)
154
155 value = get_func(section, key)
156 result[section][key] = value
157 else:
158 result[section][key] = key_default_value
159
160 return dict(result)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/st2client/st2client/config_parser.py b/st2client/st2client/config_parser.py
--- a/st2client/st2client/config_parser.py
+++ b/st2client/st2client/config_parser.py
@@ -152,7 +152,7 @@
msg = 'Invalid type "%s" for option "%s"' % (key_type, key)
raise ValueError(msg)
- value = get_func(section, key)
+ value = get_func(section, key, raw=True)
result[section][key] = value
else:
result[section][key] = key_default_value
|
{"golden_diff": "diff --git a/st2client/st2client/config_parser.py b/st2client/st2client/config_parser.py\n--- a/st2client/st2client/config_parser.py\n+++ b/st2client/st2client/config_parser.py\n@@ -152,7 +152,7 @@\n msg = 'Invalid type \"%s\" for option \"%s\"' % (key_type, key)\n raise ValueError(msg)\n \n- value = get_func(section, key)\n+ value = get_func(section, key, raw=True)\n result[section][key] = value\n else:\n result[section][key] = key_default_value\n", "issue": "Special characters in st2admin account causing st2 key failure\nJust upgraded to version 3.4 and my keyvault is having problems. I believe it's due to my st2admin password containing special characters.\r\n\r\n```\r\n[root@stackstorm workflows]# st2 key list --scope=all\r\nTraceback (most recent call last):\r\n File \"/bin/st2\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py\", line 470, in main\r\n return Shell().run(argv)\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py\", line 385, in run\r\n config = self._parse_config_file(args=args, validate_config_permissions=False)\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/base.py\", line 183, in _parse_config_file\r\n result = parser.parse()\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/config_parser.py\", line 197, in parse\r\n value = get_func(section, key)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 800, in get\r\n d)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 394, in before_get\r\n self._interpolate_some(parser, option, L, value, section, defaults, 1)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 444, in _interpolate_some\r\n \"found: %r\" % (rest,))\r\nconfigparser.InterpolationSyntaxError: '%' must be followed by '%' or '(', found: '%3C#V~Bvg%3E3t+'\r\n```\r\n\r\nThis password above is what I used to install stackstorm. Or at least part of it. I've since changed the password via the documented htpasswd method, but the issue persists. Any tips? Left the password in for research purposes.\r\n\r\ncurl -sSL https://stackstorm.com/packages/install.sh | bash -s -- --user=st2admin --password='q7j/t%3C#V~Bvg%3E3t+'\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for parsing CLI config file.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom collections import defaultdict\n\nimport io\n\nimport six\nfrom six.moves.configparser import ConfigParser\n\n\n__all__ = [\n \"CLIConfigParser\",\n \"ST2_CONFIG_DIRECTORY\",\n \"ST2_CONFIG_PATH\",\n \"CONFIG_DEFAULT_VALUES\",\n]\n\nST2_CONFIG_DIRECTORY = \"~/.st2\"\nST2_CONFIG_DIRECTORY = os.path.abspath(os.path.expanduser(ST2_CONFIG_DIRECTORY))\n\nST2_CONFIG_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, \"config\"))\n\nCONFIG_FILE_OPTIONS = {\n \"general\": {\n \"base_url\": {\"type\": \"string\", \"default\": None},\n \"api_version\": {\"type\": \"string\", \"default\": None},\n \"cacert\": {\"type\": \"string\", \"default\": None},\n \"silence_ssl_warnings\": {\"type\": \"bool\", \"default\": False},\n \"silence_schema_output\": {\"type\": \"bool\", \"default\": True},\n },\n \"cli\": {\n \"debug\": {\"type\": \"bool\", \"default\": False},\n \"cache_token\": {\"type\": \"boolean\", \"default\": True},\n \"timezone\": {\"type\": \"string\", \"default\": \"UTC\"},\n },\n \"credentials\": {\n \"username\": {\"type\": \"string\", \"default\": None},\n \"password\": {\"type\": \"string\", \"default\": None},\n \"api_key\": {\"type\": \"string\", \"default\": None},\n },\n \"api\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"auth\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"stream\": {\"url\": {\"type\": \"string\", \"default\": None}},\n}\n\nCONFIG_DEFAULT_VALUES = {}\n\nfor section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n CONFIG_DEFAULT_VALUES[section] = {}\n\n for key, options in six.iteritems(keys):\n default_value = options[\"default\"]\n CONFIG_DEFAULT_VALUES[section][key] = default_value\n\n\nclass CLIConfigParser(object):\n def __init__(\n self,\n config_file_path,\n validate_config_exists=True,\n validate_config_permissions=True,\n log=None,\n ):\n if validate_config_exists and not os.path.isfile(config_file_path):\n raise ValueError('Config file \"%s\" doesn\\'t exist')\n\n if log is None:\n log = logging.getLogger(__name__)\n logging.basicConfig()\n\n self.config_file_path = config_file_path\n self.validate_config_permissions = validate_config_permissions\n self.LOG = log\n\n def parse(self):\n \"\"\"\n Parse the config and return a dict with the parsed values.\n\n :rtype: ``dict``\n \"\"\"\n result = defaultdict(dict)\n\n if not os.path.isfile(self.config_file_path):\n # Config doesn't exist, return the default values\n return CONFIG_DEFAULT_VALUES\n\n config_dir_path = os.path.dirname(self.config_file_path)\n\n if self.validate_config_permissions:\n # Make sure the directory permissions == 0o770\n if bool(os.stat(config_dir_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration directory permissions are \"\n \"insecure (too permissive): others have access.\"\n )\n\n # Make sure the setgid bit is set on the directory\n if not bool(os.stat(config_dir_path).st_mode & 0o2000):\n self.LOG.info(\n \"The SGID bit is not set on the StackStorm configuration \"\n \"directory.\"\n )\n\n # Make sure the file permissions == 0o660\n if bool(os.stat(self.config_file_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration file permissions are \"\n \"insecure: others have access.\"\n )\n\n config = ConfigParser()\n with io.open(self.config_file_path, \"r\", encoding=\"utf8\") as fp:\n config.readfp(fp)\n\n for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n for key, options in six.iteritems(keys):\n key_type = options[\"type\"]\n key_default_value = options[\"default\"]\n\n if config.has_option(section, key):\n if key_type in [\"str\", \"string\"]:\n get_func = config.get\n elif key_type in [\"int\", \"integer\"]:\n get_func = config.getint\n elif key_type in [\"float\"]:\n get_func = config.getfloat\n elif key_type in [\"bool\", \"boolean\"]:\n get_func = config.getboolean\n else:\n msg = 'Invalid type \"%s\" for option \"%s\"' % (key_type, key)\n raise ValueError(msg)\n\n value = get_func(section, key)\n result[section][key] = value\n else:\n result[section][key] = key_default_value\n\n return dict(result)\n", "path": "st2client/st2client/config_parser.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for parsing CLI config file.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom collections import defaultdict\n\nimport io\n\nimport six\nfrom six.moves.configparser import ConfigParser\n\n\n__all__ = [\n \"CLIConfigParser\",\n \"ST2_CONFIG_DIRECTORY\",\n \"ST2_CONFIG_PATH\",\n \"CONFIG_DEFAULT_VALUES\",\n]\n\nST2_CONFIG_DIRECTORY = \"~/.st2\"\nST2_CONFIG_DIRECTORY = os.path.abspath(os.path.expanduser(ST2_CONFIG_DIRECTORY))\n\nST2_CONFIG_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, \"config\"))\n\nCONFIG_FILE_OPTIONS = {\n \"general\": {\n \"base_url\": {\"type\": \"string\", \"default\": None},\n \"api_version\": {\"type\": \"string\", \"default\": None},\n \"cacert\": {\"type\": \"string\", \"default\": None},\n \"silence_ssl_warnings\": {\"type\": \"bool\", \"default\": False},\n \"silence_schema_output\": {\"type\": \"bool\", \"default\": True},\n },\n \"cli\": {\n \"debug\": {\"type\": \"bool\", \"default\": False},\n \"cache_token\": {\"type\": \"boolean\", \"default\": True},\n \"timezone\": {\"type\": \"string\", \"default\": \"UTC\"},\n },\n \"credentials\": {\n \"username\": {\"type\": \"string\", \"default\": None},\n \"password\": {\"type\": \"string\", \"default\": None},\n \"api_key\": {\"type\": \"string\", \"default\": None},\n },\n \"api\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"auth\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"stream\": {\"url\": {\"type\": \"string\", \"default\": None}},\n}\n\nCONFIG_DEFAULT_VALUES = {}\n\nfor section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n CONFIG_DEFAULT_VALUES[section] = {}\n\n for key, options in six.iteritems(keys):\n default_value = options[\"default\"]\n CONFIG_DEFAULT_VALUES[section][key] = default_value\n\n\nclass CLIConfigParser(object):\n def __init__(\n self,\n config_file_path,\n validate_config_exists=True,\n validate_config_permissions=True,\n log=None,\n ):\n if validate_config_exists and not os.path.isfile(config_file_path):\n raise ValueError('Config file \"%s\" doesn\\'t exist')\n\n if log is None:\n log = logging.getLogger(__name__)\n logging.basicConfig()\n\n self.config_file_path = config_file_path\n self.validate_config_permissions = validate_config_permissions\n self.LOG = log\n\n def parse(self):\n \"\"\"\n Parse the config and return a dict with the parsed values.\n\n :rtype: ``dict``\n \"\"\"\n result = defaultdict(dict)\n\n if not os.path.isfile(self.config_file_path):\n # Config doesn't exist, return the default values\n return CONFIG_DEFAULT_VALUES\n\n config_dir_path = os.path.dirname(self.config_file_path)\n\n if self.validate_config_permissions:\n # Make sure the directory permissions == 0o770\n if bool(os.stat(config_dir_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration directory permissions are \"\n \"insecure (too permissive): others have access.\"\n )\n\n # Make sure the setgid bit is set on the directory\n if not bool(os.stat(config_dir_path).st_mode & 0o2000):\n self.LOG.info(\n \"The SGID bit is not set on the StackStorm configuration \"\n \"directory.\"\n )\n\n # Make sure the file permissions == 0o660\n if bool(os.stat(self.config_file_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration file permissions are \"\n \"insecure: others have access.\"\n )\n\n config = ConfigParser()\n with io.open(self.config_file_path, \"r\", encoding=\"utf8\") as fp:\n config.readfp(fp)\n\n for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n for key, options in six.iteritems(keys):\n key_type = options[\"type\"]\n key_default_value = options[\"default\"]\n\n if config.has_option(section, key):\n if key_type in [\"str\", \"string\"]:\n get_func = config.get\n elif key_type in [\"int\", \"integer\"]:\n get_func = config.getint\n elif key_type in [\"float\"]:\n get_func = config.getfloat\n elif key_type in [\"bool\", \"boolean\"]:\n get_func = config.getboolean\n else:\n msg = 'Invalid type \"%s\" for option \"%s\"' % (key_type, key)\n raise ValueError(msg)\n\n value = get_func(section, key, raw=True)\n result[section][key] = value\n else:\n result[section][key] = key_default_value\n\n return dict(result)\n", "path": "st2client/st2client/config_parser.py"}]}
| 2,338 | 135 |
gh_patches_debug_19660
|
rasdani/github-patches
|
git_diff
|
pypa__pip-1725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Traceback if any uppercase command entered.
c:\Python34\Scripts>pip3.exe -V
pip 1.5.2 from C:\Python34\lib\site-packages (python 3.4)
Windows 7.1
c:\Python34\Scripts>pip3.exe LIST
Traceback (most recent call last):
File "C:\Python34\lib\runpy.py", line 171, in _run_module_as_main
"__main__", mod_spec)
File "C:\Python34\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "c:\Python34\Scripts\pip3.exe__main__.py", line 9, in <module>
File "C:\Python34\lib\site-packages\pip__init__.py", line 177, in main
cmd_name, cmd_args = parseopts(initial_args)
File "C:\Python34\lib\site-packages\pip__init__.py", line 156, in parseopts
cmd_args.remove(args_else[0].lower())
ValueError: list.remove(x): x not in list
This was confirmed in response to my query on the main python mailing list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/commands/__init__.py`
Content:
```
1 """
2 Package containing all pip commands
3 """
4
5
6 from pip.commands.bundle import BundleCommand
7 from pip.commands.completion import CompletionCommand
8 from pip.commands.freeze import FreezeCommand
9 from pip.commands.help import HelpCommand
10 from pip.commands.list import ListCommand
11 from pip.commands.search import SearchCommand
12 from pip.commands.show import ShowCommand
13 from pip.commands.install import InstallCommand
14 from pip.commands.uninstall import UninstallCommand
15 from pip.commands.unzip import UnzipCommand
16 from pip.commands.zip import ZipCommand
17 from pip.commands.wheel import WheelCommand
18
19
20 commands = {
21 BundleCommand.name: BundleCommand,
22 CompletionCommand.name: CompletionCommand,
23 FreezeCommand.name: FreezeCommand,
24 HelpCommand.name: HelpCommand,
25 SearchCommand.name: SearchCommand,
26 ShowCommand.name: ShowCommand,
27 InstallCommand.name: InstallCommand,
28 UninstallCommand.name: UninstallCommand,
29 UnzipCommand.name: UnzipCommand,
30 ZipCommand.name: ZipCommand,
31 ListCommand.name: ListCommand,
32 WheelCommand.name: WheelCommand,
33 }
34
35
36 commands_order = [
37 InstallCommand,
38 UninstallCommand,
39 FreezeCommand,
40 ListCommand,
41 ShowCommand,
42 SearchCommand,
43 WheelCommand,
44 ZipCommand,
45 UnzipCommand,
46 BundleCommand,
47 HelpCommand,
48 ]
49
50
51 def get_summaries(ignore_hidden=True, ordered=True):
52 """Yields sorted (command name, command summary) tuples."""
53
54 if ordered:
55 cmditems = _sort_commands(commands, commands_order)
56 else:
57 cmditems = commands.items()
58
59 for name, command_class in cmditems:
60 if ignore_hidden and command_class.hidden:
61 continue
62
63 yield (name, command_class.summary)
64
65
66 def get_similar_commands(name):
67 """Command name auto-correct."""
68 from difflib import get_close_matches
69
70 close_commands = get_close_matches(name, commands.keys())
71
72 if close_commands:
73 guess = close_commands[0]
74 else:
75 guess = False
76
77 return guess
78
79
80 def _sort_commands(cmddict, order):
81 def keyfn(key):
82 try:
83 return order.index(key[1])
84 except ValueError:
85 # unordered items should come last
86 return 0xff
87
88 return sorted(cmddict.items(), key=keyfn)
89
```
Path: `pip/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import optparse
4
5 import sys
6 import re
7
8 from pip.exceptions import InstallationError, CommandError, PipError
9 from pip.log import logger
10 from pip.util import get_installed_distributions, get_prog
11 from pip.vcs import git, mercurial, subversion, bazaar # noqa
12 from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
13 from pip.commands import commands, get_summaries, get_similar_commands
14
15 # This fixes a peculiarity when importing via __import__ - as we are
16 # initialising the pip module, "from pip import cmdoptions" is recursive
17 # and appears not to work properly in that situation.
18 import pip.cmdoptions
19 cmdoptions = pip.cmdoptions
20
21 # The version as used in the setup.py and the docs conf.py
22 __version__ = "1.6.dev1"
23
24
25 def autocomplete():
26 """Command and option completion for the main option parser (and options)
27 and its subcommands (and options).
28
29 Enable by sourcing one of the completion shell scripts (bash or zsh).
30 """
31 # Don't complete if user hasn't sourced bash_completion file.
32 if 'PIP_AUTO_COMPLETE' not in os.environ:
33 return
34 cwords = os.environ['COMP_WORDS'].split()[1:]
35 cword = int(os.environ['COMP_CWORD'])
36 try:
37 current = cwords[cword - 1]
38 except IndexError:
39 current = ''
40
41 subcommands = [cmd for cmd, summary in get_summaries()]
42 options = []
43 # subcommand
44 try:
45 subcommand_name = [w for w in cwords if w in subcommands][0]
46 except IndexError:
47 subcommand_name = None
48
49 parser = create_main_parser()
50 # subcommand options
51 if subcommand_name:
52 # special case: 'help' subcommand has no options
53 if subcommand_name == 'help':
54 sys.exit(1)
55 # special case: list locally installed dists for uninstall command
56 if subcommand_name == 'uninstall' and not current.startswith('-'):
57 installed = []
58 lc = current.lower()
59 for dist in get_installed_distributions(local_only=True):
60 if dist.key.startswith(lc) and dist.key not in cwords[1:]:
61 installed.append(dist.key)
62 # if there are no dists installed, fall back to option completion
63 if installed:
64 for dist in installed:
65 print(dist)
66 sys.exit(1)
67
68 subcommand = commands[subcommand_name]()
69 options += [(opt.get_opt_string(), opt.nargs)
70 for opt in subcommand.parser.option_list_all
71 if opt.help != optparse.SUPPRESS_HELP]
72
73 # filter out previously specified options from available options
74 prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
75 options = [(x, v) for (x, v) in options if x not in prev_opts]
76 # filter options by current input
77 options = [(k, v) for k, v in options if k.startswith(current)]
78 for option in options:
79 opt_label = option[0]
80 # append '=' to options which require args
81 if option[1]:
82 opt_label += '='
83 print(opt_label)
84 else:
85 # show main parser options only when necessary
86 if current.startswith('-') or current.startswith('--'):
87 opts = [i.option_list for i in parser.option_groups]
88 opts.append(parser.option_list)
89 opts = (o for it in opts for o in it)
90
91 subcommands += [i.get_opt_string() for i in opts
92 if i.help != optparse.SUPPRESS_HELP]
93
94 print(' '.join([x for x in subcommands if x.startswith(current)]))
95 sys.exit(1)
96
97
98 def create_main_parser():
99 parser_kw = {
100 'usage': '\n%prog <command> [options]',
101 'add_help_option': False,
102 'formatter': UpdatingDefaultsHelpFormatter(),
103 'name': 'global',
104 'prog': get_prog(),
105 }
106
107 parser = ConfigOptionParser(**parser_kw)
108 parser.disable_interspersed_args()
109
110 pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
111 parser.version = 'pip %s from %s (python %s)' % (
112 __version__, pip_pkg_dir, sys.version[:3])
113
114 # add the general options
115 gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
116 parser.add_option_group(gen_opts)
117
118 parser.main = True # so the help formatter knows
119
120 # create command listing for description
121 command_summaries = get_summaries()
122 description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
123 parser.description = '\n'.join(description)
124
125 return parser
126
127
128 def parseopts(args):
129 parser = create_main_parser()
130
131 # Note: parser calls disable_interspersed_args(), so the result of this
132 # call is to split the initial args into the general options before the
133 # subcommand and everything else.
134 # For example:
135 # args: ['--timeout=5', 'install', '--user', 'INITools']
136 # general_options: ['--timeout==5']
137 # args_else: ['install', '--user', 'INITools']
138 general_options, args_else = parser.parse_args(args)
139
140 # --version
141 if general_options.version:
142 sys.stdout.write(parser.version)
143 sys.stdout.write(os.linesep)
144 sys.exit()
145
146 # pip || pip help -> print_help()
147 if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
148 parser.print_help()
149 sys.exit()
150
151 # the subcommand name
152 cmd_name = args_else[0].lower()
153
154 # all the args without the subcommand
155 cmd_args = args[:]
156 cmd_args.remove(args_else[0].lower())
157
158 if cmd_name not in commands:
159 guess = get_similar_commands(cmd_name)
160
161 msg = ['unknown command "%s"' % cmd_name]
162 if guess:
163 msg.append('maybe you meant "%s"' % guess)
164
165 raise CommandError(' - '.join(msg))
166
167 return cmd_name, cmd_args
168
169
170 def main(args=None):
171 if args is None:
172 args = sys.argv[1:]
173
174 autocomplete()
175
176 try:
177 cmd_name, cmd_args = parseopts(args)
178 except PipError as exc:
179 sys.stderr.write("ERROR: %s" % exc)
180 sys.stderr.write(os.linesep)
181 sys.exit(1)
182
183 command = commands[cmd_name]()
184 return command.main(cmd_args)
185
186
187 # ###########################################################
188 # # Writing freeze files
189
190 class FrozenRequirement(object):
191
192 def __init__(self, name, req, editable, comments=()):
193 self.name = name
194 self.req = req
195 self.editable = editable
196 self.comments = comments
197
198 _rev_re = re.compile(r'-r(\d+)$')
199 _date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
200
201 @classmethod
202 def from_dist(cls, dist, find_tags=False):
203 location = os.path.normcase(os.path.abspath(dist.location))
204 comments = []
205 from pip.vcs import vcs, get_src_requirement
206 if vcs.get_backend_name(location):
207 editable = True
208 try:
209 req = get_src_requirement(dist, location, find_tags)
210 except InstallationError as exc:
211 logger.warn(
212 "Error when trying to get requirement for VCS system %s, "
213 "falling back to uneditable format" % exc
214 )
215 req = None
216 if req is None:
217 logger.warn(
218 'Could not determine repository location of %s' % location
219 )
220 comments.append(
221 '## !! Could not determine repository location'
222 )
223 req = dist.as_requirement()
224 editable = False
225 else:
226 editable = False
227 req = dist.as_requirement()
228 specs = req.specs
229 assert len(specs) == 1 and specs[0][0] == '=='
230
231 return cls(dist.project_name, req, editable, comments)
232
233 @staticmethod
234 def egg_name(dist):
235 name = dist.egg_name()
236 match = re.search(r'-py\d\.\d$', name)
237 if match:
238 name = name[:match.start()]
239 return name
240
241 def __str__(self):
242 req = self.req
243 if self.editable:
244 req = '-e %s' % req
245 return '\n'.join(list(self.comments) + [str(req)]) + '\n'
246
247
248 if __name__ == '__main__':
249 sys.exit(main())
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pip/__init__.py b/pip/__init__.py
--- a/pip/__init__.py
+++ b/pip/__init__.py
@@ -149,11 +149,11 @@
sys.exit()
# the subcommand name
- cmd_name = args_else[0].lower()
+ cmd_name = args_else[0]
# all the args without the subcommand
cmd_args = args[:]
- cmd_args.remove(args_else[0].lower())
+ cmd_args.remove(args_else[0])
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
diff --git a/pip/commands/__init__.py b/pip/commands/__init__.py
--- a/pip/commands/__init__.py
+++ b/pip/commands/__init__.py
@@ -67,14 +67,14 @@
"""Command name auto-correct."""
from difflib import get_close_matches
+ name = name.lower()
+
close_commands = get_close_matches(name, commands.keys())
if close_commands:
- guess = close_commands[0]
+ return close_commands[0]
else:
- guess = False
-
- return guess
+ return False
def _sort_commands(cmddict, order):
|
{"golden_diff": "diff --git a/pip/__init__.py b/pip/__init__.py\n--- a/pip/__init__.py\n+++ b/pip/__init__.py\n@@ -149,11 +149,11 @@\n sys.exit()\n \n # the subcommand name\n- cmd_name = args_else[0].lower()\n+ cmd_name = args_else[0]\n \n # all the args without the subcommand\n cmd_args = args[:]\n- cmd_args.remove(args_else[0].lower())\n+ cmd_args.remove(args_else[0])\n \n if cmd_name not in commands:\n guess = get_similar_commands(cmd_name)\ndiff --git a/pip/commands/__init__.py b/pip/commands/__init__.py\n--- a/pip/commands/__init__.py\n+++ b/pip/commands/__init__.py\n@@ -67,14 +67,14 @@\n \"\"\"Command name auto-correct.\"\"\"\n from difflib import get_close_matches\n \n+ name = name.lower()\n+\n close_commands = get_close_matches(name, commands.keys())\n \n if close_commands:\n- guess = close_commands[0]\n+ return close_commands[0]\n else:\n- guess = False\n-\n- return guess\n+ return False\n \n \n def _sort_commands(cmddict, order):\n", "issue": "Traceback if any uppercase command entered.\nc:\\Python34\\Scripts>pip3.exe -V\npip 1.5.2 from C:\\Python34\\lib\\site-packages (python 3.4)\nWindows 7.1\n\nc:\\Python34\\Scripts>pip3.exe LIST\nTraceback (most recent call last):\n File \"C:\\Python34\\lib\\runpy.py\", line 171, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"C:\\Python34\\lib\\runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"c:\\Python34\\Scripts\\pip3.exe__main__.py\", line 9, in <module>\n File \"C:\\Python34\\lib\\site-packages\\pip__init__.py\", line 177, in main\n cmd_name, cmd_args = parseopts(initial_args)\n File \"C:\\Python34\\lib\\site-packages\\pip__init__.py\", line 156, in parseopts\n cmd_args.remove(args_else[0].lower())\nValueError: list.remove(x): x not in list\n\nThis was confirmed in response to my query on the main python mailing list.\n\n", "before_files": [{"content": "\"\"\"\nPackage containing all pip commands\n\"\"\"\n\n\nfrom pip.commands.bundle import BundleCommand\nfrom pip.commands.completion import CompletionCommand\nfrom pip.commands.freeze import FreezeCommand\nfrom pip.commands.help import HelpCommand\nfrom pip.commands.list import ListCommand\nfrom pip.commands.search import SearchCommand\nfrom pip.commands.show import ShowCommand\nfrom pip.commands.install import InstallCommand\nfrom pip.commands.uninstall import UninstallCommand\nfrom pip.commands.unzip import UnzipCommand\nfrom pip.commands.zip import ZipCommand\nfrom pip.commands.wheel import WheelCommand\n\n\ncommands = {\n BundleCommand.name: BundleCommand,\n CompletionCommand.name: CompletionCommand,\n FreezeCommand.name: FreezeCommand,\n HelpCommand.name: HelpCommand,\n SearchCommand.name: SearchCommand,\n ShowCommand.name: ShowCommand,\n InstallCommand.name: InstallCommand,\n UninstallCommand.name: UninstallCommand,\n UnzipCommand.name: UnzipCommand,\n ZipCommand.name: ZipCommand,\n ListCommand.name: ListCommand,\n WheelCommand.name: WheelCommand,\n}\n\n\ncommands_order = [\n InstallCommand,\n UninstallCommand,\n FreezeCommand,\n ListCommand,\n ShowCommand,\n SearchCommand,\n WheelCommand,\n ZipCommand,\n UnzipCommand,\n BundleCommand,\n HelpCommand,\n]\n\n\ndef get_summaries(ignore_hidden=True, ordered=True):\n \"\"\"Yields sorted (command name, command summary) tuples.\"\"\"\n\n if ordered:\n cmditems = _sort_commands(commands, commands_order)\n else:\n cmditems = commands.items()\n\n for name, command_class in cmditems:\n if ignore_hidden and command_class.hidden:\n continue\n\n yield (name, command_class.summary)\n\n\ndef get_similar_commands(name):\n \"\"\"Command name auto-correct.\"\"\"\n from difflib import get_close_matches\n\n close_commands = get_close_matches(name, commands.keys())\n\n if close_commands:\n guess = close_commands[0]\n else:\n guess = False\n\n return guess\n\n\ndef _sort_commands(cmddict, order):\n def keyfn(key):\n try:\n return order.index(key[1])\n except ValueError:\n # unordered items should come last\n return 0xff\n\n return sorted(cmddict.items(), key=keyfn)\n", "path": "pip/commands/__init__.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport optparse\n\nimport sys\nimport re\n\nfrom pip.exceptions import InstallationError, CommandError, PipError\nfrom pip.log import logger\nfrom pip.util import get_installed_distributions, get_prog\nfrom pip.vcs import git, mercurial, subversion, bazaar # noqa\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.commands import commands, get_summaries, get_similar_commands\n\n# This fixes a peculiarity when importing via __import__ - as we are\n# initialising the pip module, \"from pip import cmdoptions\" is recursive\n# and appears not to work properly in that situation.\nimport pip.cmdoptions\ncmdoptions = pip.cmdoptions\n\n# The version as used in the setup.py and the docs conf.py\n__version__ = \"1.6.dev1\"\n\n\ndef autocomplete():\n \"\"\"Command and option completion for the main option parser (and options)\n and its subcommands (and options).\n\n Enable by sourcing one of the completion shell scripts (bash or zsh).\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for uninstall command\n if subcommand_name == 'uninstall' and not current.startswith('-'):\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands[subcommand_name]()\n options += [(opt.get_opt_string(), opt.nargs)\n for opt in subcommand.parser.option_list_all\n if opt.help != optparse.SUPPRESS_HELP]\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1]:\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n if current.startswith('-') or current.startswith('--'):\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n\n subcommands += [i.get_opt_string() for i in opts\n if i.help != optparse.SUPPRESS_HELP]\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef create_main_parser():\n parser_kw = {\n 'usage': '\\n%prog <command> [options]',\n 'add_help_option': False,\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'name': 'global',\n 'prog': get_prog(),\n }\n\n parser = ConfigOptionParser(**parser_kw)\n parser.disable_interspersed_args()\n\n pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.version = 'pip %s from %s (python %s)' % (\n __version__, pip_pkg_dir, sys.version[:3])\n\n # add the general options\n gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)\n parser.add_option_group(gen_opts)\n\n parser.main = True # so the help formatter knows\n\n # create command listing for description\n command_summaries = get_summaries()\n description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]\n parser.description = '\\n'.join(description)\n\n return parser\n\n\ndef parseopts(args):\n parser = create_main_parser()\n\n # Note: parser calls disable_interspersed_args(), so the result of this\n # call is to split the initial args into the general options before the\n # subcommand and everything else.\n # For example:\n # args: ['--timeout=5', 'install', '--user', 'INITools']\n # general_options: ['--timeout==5']\n # args_else: ['install', '--user', 'INITools']\n general_options, args_else = parser.parse_args(args)\n\n # --version\n if general_options.version:\n sys.stdout.write(parser.version)\n sys.stdout.write(os.linesep)\n sys.exit()\n\n # pip || pip help -> print_help()\n if not args_else or (args_else[0] == 'help' and len(args_else) == 1):\n parser.print_help()\n sys.exit()\n\n # the subcommand name\n cmd_name = args_else[0].lower()\n\n # all the args without the subcommand\n cmd_args = args[:]\n cmd_args.remove(args_else[0].lower())\n\n if cmd_name not in commands:\n guess = get_similar_commands(cmd_name)\n\n msg = ['unknown command \"%s\"' % cmd_name]\n if guess:\n msg.append('maybe you meant \"%s\"' % guess)\n\n raise CommandError(' - '.join(msg))\n\n return cmd_name, cmd_args\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n autocomplete()\n\n try:\n cmd_name, cmd_args = parseopts(args)\n except PipError as exc:\n sys.stderr.write(\"ERROR: %s\" % exc)\n sys.stderr.write(os.linesep)\n sys.exit(1)\n\n command = commands[cmd_name]()\n return command.main(cmd_args)\n\n\n# ###########################################################\n# # Writing freeze files\n\nclass FrozenRequirement(object):\n\n def __init__(self, name, req, editable, comments=()):\n self.name = name\n self.req = req\n self.editable = editable\n self.comments = comments\n\n _rev_re = re.compile(r'-r(\\d+)$')\n _date_re = re.compile(r'-(20\\d\\d\\d\\d\\d\\d)$')\n\n @classmethod\n def from_dist(cls, dist, find_tags=False):\n location = os.path.normcase(os.path.abspath(dist.location))\n comments = []\n from pip.vcs import vcs, get_src_requirement\n if vcs.get_backend_name(location):\n editable = True\n try:\n req = get_src_requirement(dist, location, find_tags)\n except InstallationError as exc:\n logger.warn(\n \"Error when trying to get requirement for VCS system %s, \"\n \"falling back to uneditable format\" % exc\n )\n req = None\n if req is None:\n logger.warn(\n 'Could not determine repository location of %s' % location\n )\n comments.append(\n '## !! Could not determine repository location'\n )\n req = dist.as_requirement()\n editable = False\n else:\n editable = False\n req = dist.as_requirement()\n specs = req.specs\n assert len(specs) == 1 and specs[0][0] == '=='\n\n return cls(dist.project_name, req, editable, comments)\n\n @staticmethod\n def egg_name(dist):\n name = dist.egg_name()\n match = re.search(r'-py\\d\\.\\d$', name)\n if match:\n name = name[:match.start()]\n return name\n\n def __str__(self):\n req = self.req\n if self.editable:\n req = '-e %s' % req\n return '\\n'.join(list(self.comments) + [str(req)]) + '\\n'\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pip/__init__.py"}], "after_files": [{"content": "\"\"\"\nPackage containing all pip commands\n\"\"\"\n\n\nfrom pip.commands.bundle import BundleCommand\nfrom pip.commands.completion import CompletionCommand\nfrom pip.commands.freeze import FreezeCommand\nfrom pip.commands.help import HelpCommand\nfrom pip.commands.list import ListCommand\nfrom pip.commands.search import SearchCommand\nfrom pip.commands.show import ShowCommand\nfrom pip.commands.install import InstallCommand\nfrom pip.commands.uninstall import UninstallCommand\nfrom pip.commands.unzip import UnzipCommand\nfrom pip.commands.zip import ZipCommand\nfrom pip.commands.wheel import WheelCommand\n\n\ncommands = {\n BundleCommand.name: BundleCommand,\n CompletionCommand.name: CompletionCommand,\n FreezeCommand.name: FreezeCommand,\n HelpCommand.name: HelpCommand,\n SearchCommand.name: SearchCommand,\n ShowCommand.name: ShowCommand,\n InstallCommand.name: InstallCommand,\n UninstallCommand.name: UninstallCommand,\n UnzipCommand.name: UnzipCommand,\n ZipCommand.name: ZipCommand,\n ListCommand.name: ListCommand,\n WheelCommand.name: WheelCommand,\n}\n\n\ncommands_order = [\n InstallCommand,\n UninstallCommand,\n FreezeCommand,\n ListCommand,\n ShowCommand,\n SearchCommand,\n WheelCommand,\n ZipCommand,\n UnzipCommand,\n BundleCommand,\n HelpCommand,\n]\n\n\ndef get_summaries(ignore_hidden=True, ordered=True):\n \"\"\"Yields sorted (command name, command summary) tuples.\"\"\"\n\n if ordered:\n cmditems = _sort_commands(commands, commands_order)\n else:\n cmditems = commands.items()\n\n for name, command_class in cmditems:\n if ignore_hidden and command_class.hidden:\n continue\n\n yield (name, command_class.summary)\n\n\ndef get_similar_commands(name):\n \"\"\"Command name auto-correct.\"\"\"\n from difflib import get_close_matches\n\n name = name.lower()\n\n close_commands = get_close_matches(name, commands.keys())\n\n if close_commands:\n return close_commands[0]\n else:\n return False\n\n\ndef _sort_commands(cmddict, order):\n def keyfn(key):\n try:\n return order.index(key[1])\n except ValueError:\n # unordered items should come last\n return 0xff\n\n return sorted(cmddict.items(), key=keyfn)\n", "path": "pip/commands/__init__.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport optparse\n\nimport sys\nimport re\n\nfrom pip.exceptions import InstallationError, CommandError, PipError\nfrom pip.log import logger\nfrom pip.util import get_installed_distributions, get_prog\nfrom pip.vcs import git, mercurial, subversion, bazaar # noqa\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.commands import commands, get_summaries, get_similar_commands\n\n# This fixes a peculiarity when importing via __import__ - as we are\n# initialising the pip module, \"from pip import cmdoptions\" is recursive\n# and appears not to work properly in that situation.\nimport pip.cmdoptions\ncmdoptions = pip.cmdoptions\n\n# The version as used in the setup.py and the docs conf.py\n__version__ = \"1.6.dev1\"\n\n\ndef autocomplete():\n \"\"\"Command and option completion for the main option parser (and options)\n and its subcommands (and options).\n\n Enable by sourcing one of the completion shell scripts (bash or zsh).\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for uninstall command\n if subcommand_name == 'uninstall' and not current.startswith('-'):\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands[subcommand_name]()\n options += [(opt.get_opt_string(), opt.nargs)\n for opt in subcommand.parser.option_list_all\n if opt.help != optparse.SUPPRESS_HELP]\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1]:\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n if current.startswith('-') or current.startswith('--'):\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n\n subcommands += [i.get_opt_string() for i in opts\n if i.help != optparse.SUPPRESS_HELP]\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef create_main_parser():\n parser_kw = {\n 'usage': '\\n%prog <command> [options]',\n 'add_help_option': False,\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'name': 'global',\n 'prog': get_prog(),\n }\n\n parser = ConfigOptionParser(**parser_kw)\n parser.disable_interspersed_args()\n\n pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.version = 'pip %s from %s (python %s)' % (\n __version__, pip_pkg_dir, sys.version[:3])\n\n # add the general options\n gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)\n parser.add_option_group(gen_opts)\n\n parser.main = True # so the help formatter knows\n\n # create command listing for description\n command_summaries = get_summaries()\n description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]\n parser.description = '\\n'.join(description)\n\n return parser\n\n\ndef parseopts(args):\n parser = create_main_parser()\n\n # Note: parser calls disable_interspersed_args(), so the result of this\n # call is to split the initial args into the general options before the\n # subcommand and everything else.\n # For example:\n # args: ['--timeout=5', 'install', '--user', 'INITools']\n # general_options: ['--timeout==5']\n # args_else: ['install', '--user', 'INITools']\n general_options, args_else = parser.parse_args(args)\n\n # --version\n if general_options.version:\n sys.stdout.write(parser.version)\n sys.stdout.write(os.linesep)\n sys.exit()\n\n # pip || pip help -> print_help()\n if not args_else or (args_else[0] == 'help' and len(args_else) == 1):\n parser.print_help()\n sys.exit()\n\n # the subcommand name\n cmd_name = args_else[0]\n\n # all the args without the subcommand\n cmd_args = args[:]\n cmd_args.remove(args_else[0])\n\n if cmd_name not in commands:\n guess = get_similar_commands(cmd_name)\n\n msg = ['unknown command \"%s\"' % cmd_name]\n if guess:\n msg.append('maybe you meant \"%s\"' % guess)\n\n raise CommandError(' - '.join(msg))\n\n return cmd_name, cmd_args\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n autocomplete()\n\n try:\n cmd_name, cmd_args = parseopts(args)\n except PipError as exc:\n sys.stderr.write(\"ERROR: %s\" % exc)\n sys.stderr.write(os.linesep)\n sys.exit(1)\n\n command = commands[cmd_name]()\n return command.main(cmd_args)\n\n\n# ###########################################################\n# # Writing freeze files\n\nclass FrozenRequirement(object):\n\n def __init__(self, name, req, editable, comments=()):\n self.name = name\n self.req = req\n self.editable = editable\n self.comments = comments\n\n _rev_re = re.compile(r'-r(\\d+)$')\n _date_re = re.compile(r'-(20\\d\\d\\d\\d\\d\\d)$')\n\n @classmethod\n def from_dist(cls, dist, find_tags=False):\n location = os.path.normcase(os.path.abspath(dist.location))\n comments = []\n from pip.vcs import vcs, get_src_requirement\n if vcs.get_backend_name(location):\n editable = True\n try:\n req = get_src_requirement(dist, location, find_tags)\n except InstallationError as exc:\n logger.warn(\n \"Error when trying to get requirement for VCS system %s, \"\n \"falling back to uneditable format\" % exc\n )\n req = None\n if req is None:\n logger.warn(\n 'Could not determine repository location of %s' % location\n )\n comments.append(\n '## !! Could not determine repository location'\n )\n req = dist.as_requirement()\n editable = False\n else:\n editable = False\n req = dist.as_requirement()\n specs = req.specs\n assert len(specs) == 1 and specs[0][0] == '=='\n\n return cls(dist.project_name, req, editable, comments)\n\n @staticmethod\n def egg_name(dist):\n name = dist.egg_name()\n match = re.search(r'-py\\d\\.\\d$', name)\n if match:\n name = name[:match.start()]\n return name\n\n def __str__(self):\n req = self.req\n if self.editable:\n req = '-e %s' % req\n return '\\n'.join(list(self.comments) + [str(req)]) + '\\n'\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pip/__init__.py"}]}
| 3,762 | 294 |
gh_patches_debug_31811
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__numpyro-342
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document behavior of primitive statements in README
Hello! I'll preface this issue by stating that I'm new to numpyro, so there's a significant chance that I'm not using `numpyro.sample` correctly.
When building models, I want to test draws from my random variables to make sure I'm passing parameters correctly, getting the kwargs correct, etc.; I'll lift a line out of a model function, [like this one](https://github.com/pyro-ppl/numpyro/blob/master/examples/baseball.py#L111), and run it in an IPython window to see if I get an error. It seems like I'm unable to do this by itself in an IPython console.
## Minimal example
```
In [1]: import numpyro
In [2]: import numpyro.distributions as d
In [3]: numpyro.__version__
Out[3]: '0.2.0'
In [4]: numpyro.sample("x", d.Normal(0, 1))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-4-21ce96e72ad0> in <module>
----> 1 numpyro.sample("x", d.Normal(0, 1))
~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/primitives.py in sample(name, fn, obs, sample_shape)
45 # if there are no active Messengers, we just draw a sample and return it as expected:
46 if not _PYRO_STACK:
---> 47 return fn(sample_shape=sample_shape)
48
49 # Otherwise, we initialize a message...
~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/distributions/distribution.py in __call__(self, *args, **kwargs)
161
162 def __call__(self, *args, **kwargs):
--> 163 key = kwargs.pop('random_state')
164 sample_intermediates = kwargs.pop('sample_intermediates', False)
165 if sample_intermediates:
KeyError: 'random_state'
```
## Thoughts
I can see that in [numpyro.sample](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/primitives.py#L47), we return `fn(sample_shape=sample_shape)`, which in this case attempts to call `dist.Normal(0, 1)(sample_shape=sample_shape)`. However, looking at [distributions.py](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/distribution.py#L163), it seems that `Distribution.sample` expects a `random_state` kwarg that isn't getting passed.
When I do the following, everything is fine:
```
In [5]: import jax.random as random
In [6]: d.Normal(0, 1).sample(random.PRNGKey(12))
/Users/chtu8001/miniconda3/envs/fusion/lib/python3.7/site-packages/jax/lib/xla_bridge.py:114: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Out[6]: DeviceArray(-0.5135873, dtype=float32)
In [7]: d.Normal(0, 1).sample(random.PRNGKey(12), sample_shape=(4, ))
Out[7]:
DeviceArray([-1.31179953, -0.70821768, 0.18907626, -1.09385514],
dtype=float32)
```
so I expect that we can't use `numpyro.sample` outside a model (although it's not totally clear to me how that is defined, something to do with Messengers and PYRO_STACK). I'm wondering if this is by design and I should just use the second, working example, or if I'm misunderstanding how `numpyro.sample` should be used.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpyro/primitives.py`
Content:
```
1 from collections import namedtuple
2 import functools
3
4 import jax
5 from jax import lax
6
7 import numpyro
8 from numpyro.distributions.discrete import PRNGIdentity
9
10 _PYRO_STACK = []
11
12
13 CondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])
14
15
16 def apply_stack(msg):
17 pointer = 0
18 for pointer, handler in enumerate(reversed(_PYRO_STACK)):
19 handler.process_message(msg)
20 # When a Messenger sets the "stop" field of a message,
21 # it prevents any Messengers above it on the stack from being applied.
22 if msg.get("stop"):
23 break
24 if msg['value'] is None:
25 if msg['type'] == 'sample':
26 msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],
27 sample_intermediates=True,
28 **msg['kwargs'])
29 else:
30 msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])
31
32 # A Messenger that sets msg["stop"] == True also prevents application
33 # of postprocess_message by Messengers above it on the stack
34 # via the pointer variable from the process_message loop
35 for handler in _PYRO_STACK[-pointer-1:]:
36 handler.postprocess_message(msg)
37 return msg
38
39
40 class Messenger(object):
41 def __init__(self, fn=None):
42 self.fn = fn
43 functools.update_wrapper(self, fn, updated=[])
44
45 def __enter__(self):
46 _PYRO_STACK.append(self)
47
48 def __exit__(self, *args, **kwargs):
49 assert _PYRO_STACK[-1] is self
50 _PYRO_STACK.pop()
51
52 def process_message(self, msg):
53 pass
54
55 def postprocess_message(self, msg):
56 pass
57
58 def __call__(self, *args, **kwargs):
59 with self:
60 return self.fn(*args, **kwargs)
61
62
63 def sample(name, fn, obs=None, sample_shape=()):
64 """
65 Returns a random sample from the stochastic function `fn`. This can have
66 additional side effects when wrapped inside effect handlers like
67 :class:`~numpyro.handlers.substitute`.
68
69 :param str name: name of the sample site
70 :param fn: Python callable
71 :param numpy.ndarray obs: observed value
72 :param sample_shape: Shape of samples to be drawn.
73 :return: sample from the stochastic `fn`.
74 """
75 # if there are no active Messengers, we just draw a sample and return it as expected:
76 if not _PYRO_STACK:
77 return fn(sample_shape=sample_shape)
78
79 # Otherwise, we initialize a message...
80 initial_msg = {
81 'type': 'sample',
82 'name': name,
83 'fn': fn,
84 'args': (),
85 'kwargs': {'sample_shape': sample_shape},
86 'value': obs,
87 'scale': 1.0,
88 'is_observed': obs is not None,
89 'intermediates': [],
90 'cond_indep_stack': [],
91 }
92
93 # ...and use apply_stack to send it to the Messengers
94 msg = apply_stack(initial_msg)
95 return msg['value']
96
97
98 def identity(x, *args, **kwargs):
99 return x
100
101
102 def param(name, init_value=None, **kwargs):
103 """
104 Annotate the given site as an optimizable parameter for use with
105 :mod:`jax.experimental.optimizers`. For an example of how `param` statements
106 can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.
107
108 :param str name: name of site.
109 :param numpy.ndarray init_value: initial value specified by the user. Note that
110 the onus of using this to initialize the optimizer is on the user /
111 inference algorithm, since there is no global parameter store in
112 NumPyro.
113 :return: value for the parameter. Unless wrapped inside a
114 handler like :class:`~numpyro.handlers.substitute`, this will simply
115 return the initial value.
116 """
117 # if there are no active Messengers, we just draw a sample and return it as expected:
118 if not _PYRO_STACK:
119 return init_value
120
121 # Otherwise, we initialize a message...
122 initial_msg = {
123 'type': 'param',
124 'name': name,
125 'fn': identity,
126 'args': (init_value,),
127 'kwargs': kwargs,
128 'value': None,
129 'scale': 1.0,
130 'cond_indep_stack': [],
131 }
132
133 # ...and use apply_stack to send it to the Messengers
134 msg = apply_stack(initial_msg)
135 return msg['value']
136
137
138 def module(name, nn, input_shape=None):
139 """
140 Declare a :mod:`~jax.experimental.stax` style neural network inside a
141 model so that its parameters are registered for optimization via
142 :func:`~numpyro.primitives.param` statements.
143
144 :param str name: name of the module to be registered.
145 :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`
146 constructor function.
147 :param tuple input_shape: shape of the input taken by the
148 neural network.
149 :return: a `apply_fn` with bound parameters that takes an array
150 as an input and returns the neural network transformed output
151 array.
152 """
153 module_key = name + '$params'
154 nn_init, nn_apply = nn
155 nn_params = param(module_key)
156 if nn_params is None:
157 if input_shape is None:
158 raise ValueError('Valid value for `input_size` needed to initialize.')
159 rng = numpyro.sample(name + '$rng', PRNGIdentity())
160 _, nn_params = nn_init(rng, input_shape)
161 param(module_key, nn_params)
162 return jax.partial(nn_apply, nn_params)
163
164
165 class plate(Messenger):
166 """
167 Construct for annotating conditionally independent variables. Within a
168 `plate` context manager, `sample` sites will be automatically broadcasted to
169 the size of the plate. Additionally, a scale factor might be applied by
170 certain inference algorithms if `subsample_size` is specified.
171
172 :param str name: Name of the plate.
173 :param int size: Size of the plate.
174 :param int subsample_size: Optional argument denoting the size of the mini-batch.
175 This can be used to apply a scaling factor by inference algorithms. e.g.
176 when computing ELBO using a mini-batch.
177 :param int dim: Optional argument to specify which dimension in the tensor
178 is used as the plate dim. If `None` (default), the leftmost available dim
179 is allocated.
180 """
181 def __init__(self, name, size, subsample_size=None, dim=None):
182 self.name = name
183 self.size = size
184 self.subsample_size = size if subsample_size is None else subsample_size
185 if dim is not None and dim >= 0:
186 raise ValueError('dim arg must be negative.')
187 self.dim = dim
188 self._validate_and_set_dim()
189 super(plate, self).__init__()
190
191 def _validate_and_set_dim(self):
192 msg = {
193 'type': 'plate',
194 'is_observed': False,
195 'fn': identity,
196 'name': self.name,
197 'args': (None,),
198 'kwargs': {},
199 'value': None,
200 'scale': 1.0,
201 'cond_indep_stack': [],
202 }
203 apply_stack(msg)
204 cond_indep_stack = msg['cond_indep_stack']
205 occupied_dims = {f.dim for f in cond_indep_stack}
206 dim = -1
207 while True:
208 if dim not in occupied_dims:
209 break
210 dim -= 1
211 if self.dim is None:
212 self.dim = dim
213 else:
214 assert self.dim not in occupied_dims
215
216 @staticmethod
217 def _get_batch_shape(cond_indep_stack):
218 n_dims = max(-f.dim for f in cond_indep_stack)
219 batch_shape = [1] * n_dims
220 for f in cond_indep_stack:
221 batch_shape[f.dim] = f.size
222 return tuple(batch_shape)
223
224 def process_message(self, msg):
225 cond_indep_stack = msg['cond_indep_stack']
226 frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
227 cond_indep_stack.append(frame)
228 batch_shape = self._get_batch_shape(cond_indep_stack)
229 if 'sample_shape' in msg['kwargs']:
230 batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
231 msg['kwargs']['sample_shape'] = batch_shape
232 msg['scale'] = msg['scale'] * self.size / self.subsample_size
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numpyro/primitives.py b/numpyro/primitives.py
--- a/numpyro/primitives.py
+++ b/numpyro/primitives.py
@@ -60,21 +60,28 @@
return self.fn(*args, **kwargs)
-def sample(name, fn, obs=None, sample_shape=()):
+def sample(name, fn, obs=None, random_state=None, sample_shape=()):
"""
Returns a random sample from the stochastic function `fn`. This can have
additional side effects when wrapped inside effect handlers like
:class:`~numpyro.handlers.substitute`.
+ .. note::
+ By design, `sample` primitive is meant to be used inside a NumPyro model.
+ Then :class:`~numpyro.handlers.seed` handler is used to inject a random
+ state to `fn`. In those situations, `random_state` keyword will take no
+ effect.
+
:param str name: name of the sample site
:param fn: Python callable
:param numpy.ndarray obs: observed value
+ :param jax.random.PRNGKey random_state: an optional random key for `fn`.
:param sample_shape: Shape of samples to be drawn.
:return: sample from the stochastic `fn`.
"""
# if there are no active Messengers, we just draw a sample and return it as expected:
if not _PYRO_STACK:
- return fn(sample_shape=sample_shape)
+ return fn(random_state=random_state, sample_shape=sample_shape)
# Otherwise, we initialize a message...
initial_msg = {
@@ -82,7 +89,7 @@
'name': name,
'fn': fn,
'args': (),
- 'kwargs': {'sample_shape': sample_shape},
+ 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},
'value': obs,
'scale': 1.0,
'is_observed': obs is not None,
|
{"golden_diff": "diff --git a/numpyro/primitives.py b/numpyro/primitives.py\n--- a/numpyro/primitives.py\n+++ b/numpyro/primitives.py\n@@ -60,21 +60,28 @@\n return self.fn(*args, **kwargs)\n \n \n-def sample(name, fn, obs=None, sample_shape=()):\n+def sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n \n+ .. note::\n+ By design, `sample` primitive is meant to be used inside a NumPyro model.\n+ Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n+ state to `fn`. In those situations, `random_state` keyword will take no\n+ effect.\n+\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n+ :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n- return fn(sample_shape=sample_shape)\n+ return fn(random_state=random_state, sample_shape=sample_shape)\n \n # Otherwise, we initialize a message...\n initial_msg = {\n@@ -82,7 +89,7 @@\n 'name': name,\n 'fn': fn,\n 'args': (),\n- 'kwargs': {'sample_shape': sample_shape},\n+ 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n", "issue": "Document behavior of primitive statements in README\nHello! I'll preface this issue by stating that I'm new to numpyro, so there's a significant chance that I'm not using `numpyro.sample` correctly.\r\n\r\nWhen building models, I want to test draws from my random variables to make sure I'm passing parameters correctly, getting the kwargs correct, etc.; I'll lift a line out of a model function, [like this one](https://github.com/pyro-ppl/numpyro/blob/master/examples/baseball.py#L111), and run it in an IPython window to see if I get an error. It seems like I'm unable to do this by itself in an IPython console.\r\n\r\n## Minimal example\r\n\r\n```\r\nIn [1]: import numpyro\r\n\r\nIn [2]: import numpyro.distributions as d\r\n\r\nIn [3]: numpyro.__version__\r\nOut[3]: '0.2.0'\r\n\r\nIn [4]: numpyro.sample(\"x\", d.Normal(0, 1))\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-4-21ce96e72ad0> in <module>\r\n----> 1 numpyro.sample(\"x\", d.Normal(0, 1))\r\n\r\n~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/primitives.py in sample(name, fn, obs, sample_shape)\r\n 45 # if there are no active Messengers, we just draw a sample and return it as expected:\r\n 46 if not _PYRO_STACK:\r\n---> 47 return fn(sample_shape=sample_shape)\r\n 48\r\n 49 # Otherwise, we initialize a message...\r\n\r\n~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/distributions/distribution.py in __call__(self, *args, **kwargs)\r\n 161\r\n 162 def __call__(self, *args, **kwargs):\r\n--> 163 key = kwargs.pop('random_state')\r\n 164 sample_intermediates = kwargs.pop('sample_intermediates', False)\r\n 165 if sample_intermediates:\r\n\r\nKeyError: 'random_state'\r\n```\r\n\r\n## Thoughts\r\nI can see that in [numpyro.sample](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/primitives.py#L47), we return `fn(sample_shape=sample_shape)`, which in this case attempts to call `dist.Normal(0, 1)(sample_shape=sample_shape)`. However, looking at [distributions.py](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/distribution.py#L163), it seems that `Distribution.sample` expects a `random_state` kwarg that isn't getting passed.\r\n\r\nWhen I do the following, everything is fine:\r\n\r\n```\r\nIn [5]: import jax.random as random\r\n\r\nIn [6]: d.Normal(0, 1).sample(random.PRNGKey(12))\r\n/Users/chtu8001/miniconda3/envs/fusion/lib/python3.7/site-packages/jax/lib/xla_bridge.py:114: UserWarning: No GPU/TPU found, falling back to CPU.\r\n warnings.warn('No GPU/TPU found, falling back to CPU.')\r\nOut[6]: DeviceArray(-0.5135873, dtype=float32)\r\n\r\nIn [7]: d.Normal(0, 1).sample(random.PRNGKey(12), sample_shape=(4, ))\r\nOut[7]:\r\nDeviceArray([-1.31179953, -0.70821768, 0.18907626, -1.09385514],\r\n dtype=float32)\r\n```\r\n\r\nso I expect that we can't use `numpyro.sample` outside a model (although it's not totally clear to me how that is defined, something to do with Messengers and PYRO_STACK). I'm wondering if this is by design and I should just use the second, working example, or if I'm misunderstanding how `numpyro.sample` should be used.\r\n\r\nThanks!\n", "before_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'is_observed': False,\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n batch_shape = self._get_batch_shape(cond_indep_stack)\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}], "after_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `random_state` keyword will take no\n effect.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(random_state=random_state, sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'is_observed': False,\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n batch_shape = self._get_batch_shape(cond_indep_stack)\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}]}
| 3,664 | 434 |
gh_patches_debug_17140
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-nlp-997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a temperature argument to the Contrastive Sampler
Similar to our other samplers, we should add the temperature argument to the `Contrastive Sampler` along with the necessary tests.
The approach for accomplishing the same can be seen in #951.
I would like to take this up. Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/samplers/contrastive_sampler.py`
Content:
```
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Contrastive Sampler."""
15
16 import tensorflow as tf
17 from tensorflow import keras
18 from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
19
20 from keras_nlp.api_export import keras_nlp_export
21 from keras_nlp.samplers.sampler import Sampler
22 from keras_nlp.samplers.sampler import call_args_docstring
23 from keras_nlp.utils.python_utils import format_docstring
24
25
26 @format_docstring(call_args=call_args_docstring)
27 @keras_nlp_export("keras_nlp.samplers.ContrastiveSampler")
28 class ContrastiveSampler(Sampler):
29 """Contrastive Sampler class.
30
31 This sampler implements contrastive search algorithm. In short, the sampler
32 chooses the token having the max "score" as the next token. The "score" is
33 a weighted sum between token's probability and max similarity against
34 previous tokens. By using this joint score, contrastive sampler reduces the
35 behavior of duplicating seen tokens.
36
37 Args:
38 k: int, the `k` value of top-k. Next token will be chosen from k tokens.
39 alpha: float, the weight of minus max similarity in joint score
40 computation. The larger the value of `alpha`, the score relies more
41 on the similarity than the token probability.
42 seed: int, defaults to None. The random seed.
43
44 Call Args:
45 {{call_args}}
46
47 Examples:
48 ```python
49 # Use a simple alphabet of lowercase characters to [0, 26).
50 int_lookup = {i: chr(i + ord("a")) for i in range(26)}
51 char_lookup = {v: k for k, v in int_lookup.items()}
52 batch_size, length, vocab_size = 1, 12, len(int_lookup)
53 hidden_size = 5
54 index = 5
55
56 def next(prompt, cache, index):
57 prompt_batch_size = tf.shape(prompt)[0]
58 hidden_states = tf.ones((prompt_batch_size, hidden_size))
59 # A uniform distribution over our alphabet.
60 logits = tf.ones((prompt_batch_size, vocab_size))
61 return logits, hidden_states, cache
62
63 output = keras_nlp.samplers.ContrastiveSampler()(
64 next=next,
65 prompt=tf.fill((batch_size, length), char_lookup["z"]),
66 index=index,
67 hidden_states=tf.ones([batch_size, index, hidden_size]),
68 )
69 print(["".join([int_lookup[i] for i in s]) for s in output.numpy()])
70 # >>> "zzzzzeeeeeee"
71 ```
72 """
73
74 def __init__(
75 self,
76 k=5,
77 alpha=0.6,
78 seed=None,
79 ):
80 super().__init__()
81 self.k = k
82 self.alpha = alpha
83 self.seed = seed
84
85 def __call__(
86 self,
87 next,
88 prompt,
89 cache=None,
90 index=0,
91 mask=None,
92 end_token_id=None,
93 hidden_states=None,
94 ):
95 if hidden_states is None:
96 raise ValueError(
97 "`ContrastiveSampler` requires passing a `hidden_states`, but"
98 "received `None`."
99 )
100 batch_size, max_length = tf.shape(prompt)[0], tf.shape(prompt)[1]
101 # Make sure max length and start index are the same dtype.
102 index = tf.cast(index, max_length.dtype)
103
104 def create_beams(x):
105 """Add initial beam state."""
106 x = tf.repeat(x, self.k, axis=0)
107 flat_shape = [batch_size * self.k] + x.shape.as_list()[1:]
108 return tf.reshape(x, shape=flat_shape)
109
110 def flatten_beams(x):
111 """Combine the beam dim and batch dim."""
112 flat_shape = [batch_size * self.k] + x.shape.as_list()[2:]
113 return tf.reshape(x, shape=flat_shape)
114
115 def unflatten_beams(x):
116 """Separate the beam dim and batch dim."""
117 unflat_shape = [batch_size, self.k] + x.shape.as_list()[1:]
118 return tf.reshape(x, shape=unflat_shape)
119
120 mask = tf.zeros_like(prompt, dtype=tf.bool) if mask is None else mask
121 # Compute initial logits.
122 logits, _, cache = next(prompt, cache, index)
123 # `tf.while_loop` will not accept `None` as a value for `loop_vars`.
124 cache = () if cache is None else cache
125
126 def cond(prompt, cache, index, logits, hidden_states):
127 if end_token_id is None:
128 return True
129 # Stop if all sequences have produced a *new* end_token_id.
130 end_tokens = (prompt == end_token_id) & (~mask)
131 prompt_done = tf.reduce_any(end_tokens, axis=-1)
132 return not tf.reduce_all(prompt_done)
133
134 def body(prompt, cache, index, logits, hidden_states):
135 # Compute the softmax distribution for the next token.
136 probabilities = keras.activations.softmax(logits)
137
138 # Replicate for `self.k` times to find the best token in top-k
139 # candidates.
140 prompt_beams = create_beams(prompt)
141 mask_beams = create_beams(mask)
142 hidden_states_beams = create_beams(hidden_states)
143 cache_beams = tf.nest.map_structure(create_beams, cache)
144
145 # Get top-k candidate tokens and their probabilities.
146 top_k_probabilities, top_k_indices = tf.math.top_k(
147 probabilities, k=self.k, sorted=False
148 )
149 next_token_probabilities = flatten_beams(top_k_probabilities)
150 next_token = flatten_beams(top_k_indices)
151 next_token = tf.cast(next_token, prompt.dtype)
152 next_token = tf.where(
153 mask_beams[:, index], prompt_beams[:, index], next_token
154 )
155
156 # Update the prompt with the next token.
157 next_token = next_token[:, tf.newaxis]
158 prompt_beams = dynamic_update_slice(
159 prompt_beams, next_token, [0, index]
160 )
161
162 # Compute the logits and hidden states for top-k candidate tokens.
163 next_logits, next_hidden_states_beams, cache_beams = next(
164 prompt_beams, cache_beams, index + 1
165 )
166
167 # Compute the max similarity score for top-k candidate tokens
168 # against previous tokens.
169 similarity_scores = self.similarity(
170 hidden_states_beams, next_hidden_states_beams
171 )
172 max_similarity_scores = tf.cast(
173 tf.reduce_max(similarity_scores[:, :index], axis=1),
174 dtype=next_token_probabilities.dtype,
175 )
176 if index == 0:
177 # If the index is 0, there is no previous states so we set
178 # `max_similarity_scores` the same for all beams.
179 max_similarity_scores = tf.zeros_like(max_similarity_scores)
180 # The final score of each candidate token is weighted sum of
181 # probability and similarity against previous tokens.
182 accumulated_scores = (
183 (1 - self.alpha) * next_token_probabilities
184 - self.alpha * max_similarity_scores
185 )
186 # Unflatten varibles to shape [batch_size, self.k, ...] for
187 # gather purpose.
188 unflat_score = unflatten_beams(accumulated_scores)
189 unflat_prompt = unflatten_beams(prompt_beams)
190 unflat_next_logits = unflatten_beams(next_logits)
191 unflat_next_hidden_states = unflatten_beams(
192 next_hidden_states_beams
193 )
194 unflat_cache = tf.nest.map_structure(unflatten_beams, cache_beams)
195 best_token_indices = tf.math.argmax(unflat_score, axis=1)
196
197 def gather_best_token(beams):
198 return tf.gather(
199 beams,
200 best_token_indices,
201 axis=1,
202 batch_dims=1,
203 )
204
205 prompt = gather_best_token(unflat_prompt)
206 # We avoid recomputing forward pass for each token by updating the
207 # cache/hidden_states using the output, and pass the logits to
208 # next iteration step.
209 logits = gather_best_token(unflat_next_logits)
210 next_hidden_states = gather_best_token(unflat_next_hidden_states)
211 cache = tf.nest.map_structure(gather_best_token, unflat_cache)
212
213 hidden_states = dynamic_update_slice(
214 hidden_states,
215 next_hidden_states[:, tf.newaxis, :],
216 [0, index, 0],
217 )
218 return (prompt, cache, index + 1, logits, hidden_states)
219
220 prompt, _, _, _, _ = tf.while_loop(
221 cond=cond,
222 body=body,
223 loop_vars=(prompt, cache, index, logits, hidden_states),
224 maximum_iterations=(max_length - index),
225 )
226 return prompt
227
228 def similarity(self, h1, h2):
229 h2 = h2[..., tf.newaxis]
230 return tf.squeeze(tf.matmul(h1, h2), axis=-1) / (
231 tf.norm(h1, axis=-1) * tf.norm(h2, axis=-2)
232 )
233
234 def get_config(self):
235 config = super().get_config()
236 config.update(
237 {
238 "k": self.k,
239 "alpha": self.alpha,
240 "seed": self.seed,
241 }
242 )
243 return config
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras_nlp/samplers/contrastive_sampler.py b/keras_nlp/samplers/contrastive_sampler.py
--- a/keras_nlp/samplers/contrastive_sampler.py
+++ b/keras_nlp/samplers/contrastive_sampler.py
@@ -76,8 +76,9 @@
k=5,
alpha=0.6,
seed=None,
+ **kwargs,
):
- super().__init__()
+ super().__init__(**kwargs)
self.k = k
self.alpha = alpha
self.seed = seed
@@ -133,7 +134,7 @@
def body(prompt, cache, index, logits, hidden_states):
# Compute the softmax distribution for the next token.
- probabilities = keras.activations.softmax(logits)
+ probabilities = keras.activations.softmax(logits / self.temperature)
# Replicate for `self.k` times to find the best token in top-k
# candidates.
|
{"golden_diff": "diff --git a/keras_nlp/samplers/contrastive_sampler.py b/keras_nlp/samplers/contrastive_sampler.py\n--- a/keras_nlp/samplers/contrastive_sampler.py\n+++ b/keras_nlp/samplers/contrastive_sampler.py\n@@ -76,8 +76,9 @@\n k=5,\n alpha=0.6,\n seed=None,\n+ **kwargs,\n ):\n- super().__init__()\n+ super().__init__(**kwargs)\n self.k = k\n self.alpha = alpha\n self.seed = seed\n@@ -133,7 +134,7 @@\n \n def body(prompt, cache, index, logits, hidden_states):\n # Compute the softmax distribution for the next token.\n- probabilities = keras.activations.softmax(logits)\n+ probabilities = keras.activations.softmax(logits / self.temperature)\n \n # Replicate for `self.k` times to find the best token in top-k\n # candidates.\n", "issue": "Add a temperature argument to the Contrastive Sampler \nSimilar to our other samplers, we should add the temperature argument to the `Contrastive Sampler` along with the necessary tests.\r\nThe approach for accomplishing the same can be seen in #951. \r\n\r\nI would like to take this up. Thanks!\r\n\n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contrastive Sampler.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.samplers.sampler import Sampler\nfrom keras_nlp.samplers.sampler import call_args_docstring\nfrom keras_nlp.utils.python_utils import format_docstring\n\n\n@format_docstring(call_args=call_args_docstring)\n@keras_nlp_export(\"keras_nlp.samplers.ContrastiveSampler\")\nclass ContrastiveSampler(Sampler):\n \"\"\"Contrastive Sampler class.\n\n This sampler implements contrastive search algorithm. In short, the sampler\n chooses the token having the max \"score\" as the next token. The \"score\" is\n a weighted sum between token's probability and max similarity against\n previous tokens. By using this joint score, contrastive sampler reduces the\n behavior of duplicating seen tokens.\n\n Args:\n k: int, the `k` value of top-k. Next token will be chosen from k tokens.\n alpha: float, the weight of minus max similarity in joint score\n computation. The larger the value of `alpha`, the score relies more\n on the similarity than the token probability.\n seed: int, defaults to None. The random seed.\n\n Call Args:\n {{call_args}}\n\n Examples:\n ```python\n # Use a simple alphabet of lowercase characters to [0, 26).\n int_lookup = {i: chr(i + ord(\"a\")) for i in range(26)}\n char_lookup = {v: k for k, v in int_lookup.items()}\n batch_size, length, vocab_size = 1, 12, len(int_lookup)\n hidden_size = 5\n index = 5\n\n def next(prompt, cache, index):\n prompt_batch_size = tf.shape(prompt)[0]\n hidden_states = tf.ones((prompt_batch_size, hidden_size))\n # A uniform distribution over our alphabet.\n logits = tf.ones((prompt_batch_size, vocab_size))\n return logits, hidden_states, cache\n\n output = keras_nlp.samplers.ContrastiveSampler()(\n next=next,\n prompt=tf.fill((batch_size, length), char_lookup[\"z\"]),\n index=index,\n hidden_states=tf.ones([batch_size, index, hidden_size]),\n )\n print([\"\".join([int_lookup[i] for i in s]) for s in output.numpy()])\n # >>> \"zzzzzeeeeeee\"\n ```\n \"\"\"\n\n def __init__(\n self,\n k=5,\n alpha=0.6,\n seed=None,\n ):\n super().__init__()\n self.k = k\n self.alpha = alpha\n self.seed = seed\n\n def __call__(\n self,\n next,\n prompt,\n cache=None,\n index=0,\n mask=None,\n end_token_id=None,\n hidden_states=None,\n ):\n if hidden_states is None:\n raise ValueError(\n \"`ContrastiveSampler` requires passing a `hidden_states`, but\"\n \"received `None`.\"\n )\n batch_size, max_length = tf.shape(prompt)[0], tf.shape(prompt)[1]\n # Make sure max length and start index are the same dtype.\n index = tf.cast(index, max_length.dtype)\n\n def create_beams(x):\n \"\"\"Add initial beam state.\"\"\"\n x = tf.repeat(x, self.k, axis=0)\n flat_shape = [batch_size * self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=flat_shape)\n\n def flatten_beams(x):\n \"\"\"Combine the beam dim and batch dim.\"\"\"\n flat_shape = [batch_size * self.k] + x.shape.as_list()[2:]\n return tf.reshape(x, shape=flat_shape)\n\n def unflatten_beams(x):\n \"\"\"Separate the beam dim and batch dim.\"\"\"\n unflat_shape = [batch_size, self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=unflat_shape)\n\n mask = tf.zeros_like(prompt, dtype=tf.bool) if mask is None else mask\n # Compute initial logits.\n logits, _, cache = next(prompt, cache, index)\n # `tf.while_loop` will not accept `None` as a value for `loop_vars`.\n cache = () if cache is None else cache\n\n def cond(prompt, cache, index, logits, hidden_states):\n if end_token_id is None:\n return True\n # Stop if all sequences have produced a *new* end_token_id.\n end_tokens = (prompt == end_token_id) & (~mask)\n prompt_done = tf.reduce_any(end_tokens, axis=-1)\n return not tf.reduce_all(prompt_done)\n\n def body(prompt, cache, index, logits, hidden_states):\n # Compute the softmax distribution for the next token.\n probabilities = keras.activations.softmax(logits)\n\n # Replicate for `self.k` times to find the best token in top-k\n # candidates.\n prompt_beams = create_beams(prompt)\n mask_beams = create_beams(mask)\n hidden_states_beams = create_beams(hidden_states)\n cache_beams = tf.nest.map_structure(create_beams, cache)\n\n # Get top-k candidate tokens and their probabilities.\n top_k_probabilities, top_k_indices = tf.math.top_k(\n probabilities, k=self.k, sorted=False\n )\n next_token_probabilities = flatten_beams(top_k_probabilities)\n next_token = flatten_beams(top_k_indices)\n next_token = tf.cast(next_token, prompt.dtype)\n next_token = tf.where(\n mask_beams[:, index], prompt_beams[:, index], next_token\n )\n\n # Update the prompt with the next token.\n next_token = next_token[:, tf.newaxis]\n prompt_beams = dynamic_update_slice(\n prompt_beams, next_token, [0, index]\n )\n\n # Compute the logits and hidden states for top-k candidate tokens.\n next_logits, next_hidden_states_beams, cache_beams = next(\n prompt_beams, cache_beams, index + 1\n )\n\n # Compute the max similarity score for top-k candidate tokens\n # against previous tokens.\n similarity_scores = self.similarity(\n hidden_states_beams, next_hidden_states_beams\n )\n max_similarity_scores = tf.cast(\n tf.reduce_max(similarity_scores[:, :index], axis=1),\n dtype=next_token_probabilities.dtype,\n )\n if index == 0:\n # If the index is 0, there is no previous states so we set\n # `max_similarity_scores` the same for all beams.\n max_similarity_scores = tf.zeros_like(max_similarity_scores)\n # The final score of each candidate token is weighted sum of\n # probability and similarity against previous tokens.\n accumulated_scores = (\n (1 - self.alpha) * next_token_probabilities\n - self.alpha * max_similarity_scores\n )\n # Unflatten varibles to shape [batch_size, self.k, ...] for\n # gather purpose.\n unflat_score = unflatten_beams(accumulated_scores)\n unflat_prompt = unflatten_beams(prompt_beams)\n unflat_next_logits = unflatten_beams(next_logits)\n unflat_next_hidden_states = unflatten_beams(\n next_hidden_states_beams\n )\n unflat_cache = tf.nest.map_structure(unflatten_beams, cache_beams)\n best_token_indices = tf.math.argmax(unflat_score, axis=1)\n\n def gather_best_token(beams):\n return tf.gather(\n beams,\n best_token_indices,\n axis=1,\n batch_dims=1,\n )\n\n prompt = gather_best_token(unflat_prompt)\n # We avoid recomputing forward pass for each token by updating the\n # cache/hidden_states using the output, and pass the logits to\n # next iteration step.\n logits = gather_best_token(unflat_next_logits)\n next_hidden_states = gather_best_token(unflat_next_hidden_states)\n cache = tf.nest.map_structure(gather_best_token, unflat_cache)\n\n hidden_states = dynamic_update_slice(\n hidden_states,\n next_hidden_states[:, tf.newaxis, :],\n [0, index, 0],\n )\n return (prompt, cache, index + 1, logits, hidden_states)\n\n prompt, _, _, _, _ = tf.while_loop(\n cond=cond,\n body=body,\n loop_vars=(prompt, cache, index, logits, hidden_states),\n maximum_iterations=(max_length - index),\n )\n return prompt\n\n def similarity(self, h1, h2):\n h2 = h2[..., tf.newaxis]\n return tf.squeeze(tf.matmul(h1, h2), axis=-1) / (\n tf.norm(h1, axis=-1) * tf.norm(h2, axis=-2)\n )\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"k\": self.k,\n \"alpha\": self.alpha,\n \"seed\": self.seed,\n }\n )\n return config\n", "path": "keras_nlp/samplers/contrastive_sampler.py"}], "after_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contrastive Sampler.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.samplers.sampler import Sampler\nfrom keras_nlp.samplers.sampler import call_args_docstring\nfrom keras_nlp.utils.python_utils import format_docstring\n\n\n@format_docstring(call_args=call_args_docstring)\n@keras_nlp_export(\"keras_nlp.samplers.ContrastiveSampler\")\nclass ContrastiveSampler(Sampler):\n \"\"\"Contrastive Sampler class.\n\n This sampler implements contrastive search algorithm. In short, the sampler\n chooses the token having the max \"score\" as the next token. The \"score\" is\n a weighted sum between token's probability and max similarity against\n previous tokens. By using this joint score, contrastive sampler reduces the\n behavior of duplicating seen tokens.\n\n Args:\n k: int, the `k` value of top-k. Next token will be chosen from k tokens.\n alpha: float, the weight of minus max similarity in joint score\n computation. The larger the value of `alpha`, the score relies more\n on the similarity than the token probability.\n seed: int, defaults to None. The random seed.\n\n Call Args:\n {{call_args}}\n\n Examples:\n ```python\n # Use a simple alphabet of lowercase characters to [0, 26).\n int_lookup = {i: chr(i + ord(\"a\")) for i in range(26)}\n char_lookup = {v: k for k, v in int_lookup.items()}\n batch_size, length, vocab_size = 1, 12, len(int_lookup)\n hidden_size = 5\n index = 5\n\n def next(prompt, cache, index):\n prompt_batch_size = tf.shape(prompt)[0]\n hidden_states = tf.ones((prompt_batch_size, hidden_size))\n # A uniform distribution over our alphabet.\n logits = tf.ones((prompt_batch_size, vocab_size))\n return logits, hidden_states, cache\n\n output = keras_nlp.samplers.ContrastiveSampler()(\n next=next,\n prompt=tf.fill((batch_size, length), char_lookup[\"z\"]),\n index=index,\n hidden_states=tf.ones([batch_size, index, hidden_size]),\n )\n print([\"\".join([int_lookup[i] for i in s]) for s in output.numpy()])\n # >>> \"zzzzzeeeeeee\"\n ```\n \"\"\"\n\n def __init__(\n self,\n k=5,\n alpha=0.6,\n seed=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.k = k\n self.alpha = alpha\n self.seed = seed\n\n def __call__(\n self,\n next,\n prompt,\n cache=None,\n index=0,\n mask=None,\n end_token_id=None,\n hidden_states=None,\n ):\n if hidden_states is None:\n raise ValueError(\n \"`ContrastiveSampler` requires passing a `hidden_states`, but\"\n \"received `None`.\"\n )\n batch_size, max_length = tf.shape(prompt)[0], tf.shape(prompt)[1]\n # Make sure max length and start index are the same dtype.\n index = tf.cast(index, max_length.dtype)\n\n def create_beams(x):\n \"\"\"Add initial beam state.\"\"\"\n x = tf.repeat(x, self.k, axis=0)\n flat_shape = [batch_size * self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=flat_shape)\n\n def flatten_beams(x):\n \"\"\"Combine the beam dim and batch dim.\"\"\"\n flat_shape = [batch_size * self.k] + x.shape.as_list()[2:]\n return tf.reshape(x, shape=flat_shape)\n\n def unflatten_beams(x):\n \"\"\"Separate the beam dim and batch dim.\"\"\"\n unflat_shape = [batch_size, self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=unflat_shape)\n\n mask = tf.zeros_like(prompt, dtype=tf.bool) if mask is None else mask\n # Compute initial logits.\n logits, _, cache = next(prompt, cache, index)\n # `tf.while_loop` will not accept `None` as a value for `loop_vars`.\n cache = () if cache is None else cache\n\n def cond(prompt, cache, index, logits, hidden_states):\n if end_token_id is None:\n return True\n # Stop if all sequences have produced a *new* end_token_id.\n end_tokens = (prompt == end_token_id) & (~mask)\n prompt_done = tf.reduce_any(end_tokens, axis=-1)\n return not tf.reduce_all(prompt_done)\n\n def body(prompt, cache, index, logits, hidden_states):\n # Compute the softmax distribution for the next token.\n probabilities = keras.activations.softmax(logits / self.temperature)\n\n # Replicate for `self.k` times to find the best token in top-k\n # candidates.\n prompt_beams = create_beams(prompt)\n mask_beams = create_beams(mask)\n hidden_states_beams = create_beams(hidden_states)\n cache_beams = tf.nest.map_structure(create_beams, cache)\n\n # Get top-k candidate tokens and their probabilities.\n top_k_probabilities, top_k_indices = tf.math.top_k(\n probabilities, k=self.k, sorted=False\n )\n next_token_probabilities = flatten_beams(top_k_probabilities)\n next_token = flatten_beams(top_k_indices)\n next_token = tf.cast(next_token, prompt.dtype)\n next_token = tf.where(\n mask_beams[:, index], prompt_beams[:, index], next_token\n )\n\n # Update the prompt with the next token.\n next_token = next_token[:, tf.newaxis]\n prompt_beams = dynamic_update_slice(\n prompt_beams, next_token, [0, index]\n )\n\n # Compute the logits and hidden states for top-k candidate tokens.\n next_logits, next_hidden_states_beams, cache_beams = next(\n prompt_beams, cache_beams, index + 1\n )\n\n # Compute the max similarity score for top-k candidate tokens\n # against previous tokens.\n similarity_scores = self.similarity(\n hidden_states_beams, next_hidden_states_beams\n )\n max_similarity_scores = tf.cast(\n tf.reduce_max(similarity_scores[:, :index], axis=1),\n dtype=next_token_probabilities.dtype,\n )\n if index == 0:\n # If the index is 0, there is no previous states so we set\n # `max_similarity_scores` the same for all beams.\n max_similarity_scores = tf.zeros_like(max_similarity_scores)\n # The final score of each candidate token is weighted sum of\n # probability and similarity against previous tokens.\n accumulated_scores = (\n (1 - self.alpha) * next_token_probabilities\n - self.alpha * max_similarity_scores\n )\n # Unflatten varibles to shape [batch_size, self.k, ...] for\n # gather purpose.\n unflat_score = unflatten_beams(accumulated_scores)\n unflat_prompt = unflatten_beams(prompt_beams)\n unflat_next_logits = unflatten_beams(next_logits)\n unflat_next_hidden_states = unflatten_beams(\n next_hidden_states_beams\n )\n unflat_cache = tf.nest.map_structure(unflatten_beams, cache_beams)\n best_token_indices = tf.math.argmax(unflat_score, axis=1)\n\n def gather_best_token(beams):\n return tf.gather(\n beams,\n best_token_indices,\n axis=1,\n batch_dims=1,\n )\n\n prompt = gather_best_token(unflat_prompt)\n # We avoid recomputing forward pass for each token by updating the\n # cache/hidden_states using the output, and pass the logits to\n # next iteration step.\n logits = gather_best_token(unflat_next_logits)\n next_hidden_states = gather_best_token(unflat_next_hidden_states)\n cache = tf.nest.map_structure(gather_best_token, unflat_cache)\n\n hidden_states = dynamic_update_slice(\n hidden_states,\n next_hidden_states[:, tf.newaxis, :],\n [0, index, 0],\n )\n return (prompt, cache, index + 1, logits, hidden_states)\n\n prompt, _, _, _, _ = tf.while_loop(\n cond=cond,\n body=body,\n loop_vars=(prompt, cache, index, logits, hidden_states),\n maximum_iterations=(max_length - index),\n )\n return prompt\n\n def similarity(self, h1, h2):\n h2 = h2[..., tf.newaxis]\n return tf.squeeze(tf.matmul(h1, h2), axis=-1) / (\n tf.norm(h1, axis=-1) * tf.norm(h2, axis=-2)\n )\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"k\": self.k,\n \"alpha\": self.alpha,\n \"seed\": self.seed,\n }\n )\n return config\n", "path": "keras_nlp/samplers/contrastive_sampler.py"}]}
| 3,063 | 219 |
gh_patches_debug_1704
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2972
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FTS doesn't perform OCR on attachments for keyword extraction
If one send a PDF composed of images, no keywords will be extracted/indexed.
To fix that, we could do some OCR:
- https://github.com/tesseract-ocr/tesseract
or if we want to get fancy, something like
- https://github.com/JaidedAI/EasyOCR
Or the built-in integration with
- https://tika.apache.org/
There is a security argument going for that option: it's written in java (memory safe language) and would remove the need for decode2text and friends.
This is a follow-up to #2184 and #2972
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/configuration.py`
Content:
```
1 import os
2
3 from datetime import timedelta
4 import ipaddress
5
6 DEFAULT_CONFIG = {
7 # Specific to the admin UI
8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
9 'BABEL_DEFAULT_LOCALE': 'en',
10 'BABEL_DEFAULT_TIMEZONE': 'UTC',
11 'BOOTSTRAP_SERVE_LOCAL': True,
12 'RATELIMIT_STORAGE_URL': '',
13 'DEBUG': False,
14 'DEBUG_PROFILER': False,
15 'DEBUG_TB_INTERCEPT_REDIRECTS': False,
16 'DEBUG_ASSETS': '',
17 'DOMAIN_REGISTRATION': False,
18 'TEMPLATES_AUTO_RELOAD': True,
19 'MEMORY_SESSIONS': False,
20 'FETCHMAIL_ENABLED': True,
21 'MAILU_VERSION': 'unknown',
22 # Database settings
23 'DB_FLAVOR': None,
24 'DB_USER': 'mailu',
25 'DB_PW': None,
26 'DB_HOST': 'database',
27 'DB_NAME': 'mailu',
28 'SQLITE_DATABASE_FILE': 'data/main.db',
29 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
30 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',
31 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
32 # Statistics management
33 'INSTANCE_ID_PATH': '/data/instance',
34 'STATS_ENDPOINT': '20.{}.stats.mailu.io',
35 # Common configuration variables
36 'SECRET_KEY': 'changeMe',
37 'DOMAIN': 'mailu.io',
38 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
39 'POSTMASTER': 'postmaster',
40 'WILDCARD_SENDERS': '',
41 'TLS_FLAVOR': 'cert',
42 'INBOUND_TLS_ENFORCE': False,
43 'DEFER_ON_TLS_ERROR': True,
44 'AUTH_RATELIMIT_IP': '5/hour',
45 'AUTH_RATELIMIT_IP_V4_MASK': 24,
46 'AUTH_RATELIMIT_IP_V6_MASK': 48,
47 'AUTH_RATELIMIT_USER': '50/day',
48 'AUTH_RATELIMIT_EXEMPTION': '',
49 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
50 'DISABLE_STATISTICS': False,
51 # Mail settings
52 'DMARC_RUA': None,
53 'DMARC_RUF': None,
54 'WELCOME': False,
55 'WELCOME_SUBJECT': 'Dummy welcome topic',
56 'WELCOME_BODY': 'Dummy welcome body',
57 'DKIM_SELECTOR': 'dkim',
58 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
59 'DEFAULT_QUOTA': 1000000000,
60 'MESSAGE_RATELIMIT': '200/day',
61 'MESSAGE_RATELIMIT_EXEMPTION': '',
62 'RECIPIENT_DELIMITER': '',
63 # Web settings
64 'SITENAME': 'Mailu',
65 'WEBSITE': 'https://mailu.io',
66 'ADMIN': 'none',
67 'WEB_ADMIN': '/admin',
68 'WEB_WEBMAIL': '/webmail',
69 'WEBMAIL': 'none',
70 'RECAPTCHA_PUBLIC_KEY': '',
71 'RECAPTCHA_PRIVATE_KEY': '',
72 'LOGO_URL': None,
73 'LOGO_BACKGROUND': None,
74 # Advanced settings
75 'API': False,
76 'WEB_API': '/api',
77 'API_TOKEN': None,
78 'LOG_LEVEL': 'INFO',
79 'SESSION_KEY_BITS': 128,
80 'SESSION_TIMEOUT': 3600,
81 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
82 'SESSION_COOKIE_SECURE': None,
83 'CREDENTIAL_ROUNDS': 12,
84 'TLS_PERMISSIVE': True,
85 'TZ': 'Etc/UTC',
86 'DEFAULT_SPAM_THRESHOLD': 80,
87 'PROXY_AUTH_WHITELIST': '',
88 'PROXY_AUTH_HEADER': 'X-Auth-Email',
89 'PROXY_AUTH_CREATE': False,
90 'PROXY_AUTH_LOGOUT_URL': None,
91 'SUBNET': '192.168.203.0/24',
92 'SUBNET6': None,
93 }
94
95 class ConfigManager:
96 """ Naive configuration manager that uses environment only
97 """
98
99 DB_TEMPLATES = {
100 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
101 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
102 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
103 }
104
105 def __init__(self):
106 self.config = dict()
107
108 def __get_env(self, key, value):
109 key_file = key + "_FILE"
110 if key_file in os.environ:
111 with open(os.environ.get(key_file)) as file:
112 value_from_file = file.read()
113 return value_from_file.strip()
114 else:
115 return os.environ.get(key, value)
116
117 def __coerce_value(self, value):
118 if isinstance(value, str) and value.lower() in ('true','yes'):
119 return True
120 elif isinstance(value, str) and value.lower() in ('false', 'no'):
121 return False
122 return value
123
124 def init_app(self, app):
125 # get current app config
126 self.config.update(app.config)
127 # get environment variables
128 for key in os.environ:
129 if key.endswith('_ADDRESS'):
130 self.config[key] = os.environ[key]
131
132 self.config.update({
133 key: self.__coerce_value(self.__get_env(key, value))
134 for key, value in DEFAULT_CONFIG.items()
135 })
136
137 # automatically set the sqlalchemy string
138 if self.config['DB_FLAVOR']:
139 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
140 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
141
142 if not self.config.get('RATELIMIT_STORAGE_URL'):
143 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
144
145 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
146 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
147 self.config['SESSION_COOKIE_HTTPONLY'] = True
148 if self.config['SESSION_COOKIE_SECURE'] is None:
149 self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'
150 self.config['SESSION_PERMANENT'] = True
151 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
152 self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])
153 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
154 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
155 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
156 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
157 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
158 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
159 self.config['HOSTNAMES'] = ','.join(hostnames)
160 self.config['HOSTNAME'] = hostnames[0]
161 self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
162 self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
163 try:
164 self.config['MAILU_VERSION'] = open('/version', 'r').read()
165 except FileNotFoundError:
166 pass
167
168 # update the app config
169 app.config.update(self.config)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -75,6 +75,8 @@
'API': False,
'WEB_API': '/api',
'API_TOKEN': None,
+ 'FULL_TEXT_SEARCH': 'en',
+ 'FULL_TEXT_SEARCH_ATTACHMENTS': False,
'LOG_LEVEL': 'INFO',
'SESSION_KEY_BITS': 128,
'SESSION_TIMEOUT': 3600,
|
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -75,6 +75,8 @@\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n+ 'FULL_TEXT_SEARCH': 'en',\n+ 'FULL_TEXT_SEARCH_ATTACHMENTS': False,\n 'LOG_LEVEL': 'INFO',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n", "issue": "FTS doesn't perform OCR on attachments for keyword extraction\nIf one send a PDF composed of images, no keywords will be extracted/indexed.\r\n\r\nTo fix that, we could do some OCR:\r\n- https://github.com/tesseract-ocr/tesseract\r\n\r\nor if we want to get fancy, something like\r\n- https://github.com/JaidedAI/EasyOCR\r\n\r\nOr the built-in integration with\r\n- https://tika.apache.org/\r\nThere is a security argument going for that option: it's written in java (memory safe language) and would remove the need for decode2text and friends.\r\n\r\nThis is a follow-up to #2184 and #2972\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'INFO',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'FULL_TEXT_SEARCH': 'en',\n 'FULL_TEXT_SEARCH_ATTACHMENTS': False,\n 'LOG_LEVEL': 'INFO',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}]}
| 2,556 | 120 |
gh_patches_debug_4750
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DefaultWorker crashes in non-deterministic mode
```sh
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "examples/torch/sac_metaworldv2_test.py", line 102, in get_args
sac_metaworldv2_test()
File "/home/rjulian/garage/src/garage/experiment/experiment.py", line 553, in __call__
result = self.function(ctxt, **kwargs)
File "examples/torch/sac_metaworldv2_test.py", line 100, in sac_metaworldv2_test
runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)
File "/home/rjulian/garage/src/garage/experiment/local_runner.py", line 268, in setup
worker_args=worker_args)
File "/home/rjulian/garage/src/garage/experiment/local_runner.py", line 215, in make_sampler
envs=self._env)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 66, in from_worker_factory
return cls(worker_factory, agents, envs)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 38, in __init__
worker_factory(i) for i in range(worker_factory.n_workers)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 38, in <listcomp>
worker_factory(i) for i in range(worker_factory.n_workers)
File "/home/rjulian/garage/src/garage/sampler/worker_factory.py", line 109, in __call__
**self._worker_args)
File "/home/rjulian/garage/src/garage/sampler/default_worker.py", line 51, in __init__
self.worker_init()
File "/home/rjulian/garage/src/garage/sampler/default_worker.py", line 55, in worker_init
deterministic.set_seed(self._seed + self._worker_number)
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/garage/sampler/default_worker.py`
Content:
```
1 """Default Worker class."""
2 from collections import defaultdict
3
4 import gym
5 import numpy as np
6
7 from garage import TrajectoryBatch
8 from garage.experiment import deterministic
9 from garage.sampler.env_update import EnvUpdate
10 from garage.sampler.worker import Worker
11
12
13 class DefaultWorker(Worker):
14 """Initialize a worker.
15
16 Args:
17 seed(int): The seed to use to intialize random number generators.
18 max_path_length(int or float): The maximum length paths which will
19 be sampled. Can be (floating point) infinity.
20 worker_number(int): The number of the worker where this update is
21 occurring. This argument is used to set a different seed for each
22 worker.
23
24 Attributes:
25 agent(Policy or None): The worker's agent.
26 env(gym.Env or None): The worker's environment.
27
28 """
29
30 def __init__(
31 self,
32 *, # Require passing by keyword, since everything's an int.
33 seed,
34 max_path_length,
35 worker_number):
36 super().__init__(seed=seed,
37 max_path_length=max_path_length,
38 worker_number=worker_number)
39 self.agent = None
40 self.env = None
41 self._observations = []
42 self._last_observations = []
43 self._actions = []
44 self._rewards = []
45 self._terminals = []
46 self._lengths = []
47 self._agent_infos = defaultdict(list)
48 self._env_infos = defaultdict(list)
49 self._prev_obs = None
50 self._path_length = 0
51 self.worker_init()
52
53 def worker_init(self):
54 """Initialize a worker."""
55 deterministic.set_seed(self._seed + self._worker_number)
56
57 def update_agent(self, agent_update):
58 """Update an agent, assuming it implements garage.Policy.
59
60 Args:
61 agent_update (np.ndarray or dict or garage.Policy): If a
62 tuple, dict, or np.ndarray, these should be parameters to
63 agent, which should have been generated by calling
64 `policy.get_param_values`. Alternatively, a policy itself. Note
65 that other implementations of `Worker` may take different types
66 for this parameter.
67
68 """
69 if isinstance(agent_update, (dict, tuple, np.ndarray)):
70 self.agent.set_param_values(agent_update)
71 elif agent_update is not None:
72 self.agent = agent_update
73
74 def update_env(self, env_update):
75 """Use any non-None env_update as a new environment.
76
77 A simple env update function. If env_update is not None, it should be
78 the complete new environment.
79
80 This allows changing environments by passing the new environment as
81 `env_update` into `obtain_samples`.
82
83 Args:
84 env_update(gym.Env or EnvUpdate or None): The environment to
85 replace the existing env with. Note that other implementations
86 of `Worker` may take different types for this parameter.
87
88 Raises:
89 TypeError: If env_update is not one of the documented types.
90
91 """
92 if env_update is not None:
93 if isinstance(env_update, EnvUpdate):
94 self.env = env_update(self.env)
95 elif isinstance(env_update, gym.Env):
96 if self.env is not None:
97 self.env.close()
98 self.env = env_update
99 else:
100 raise TypeError('Uknown environment update type.')
101
102 def start_rollout(self):
103 """Begin a new rollout."""
104 self._path_length = 0
105 self._prev_obs = self.env.reset()
106 self.agent.reset()
107
108 def step_rollout(self):
109 """Take a single time-step in the current rollout.
110
111 Returns:
112 bool: True iff the path is done, either due to the environment
113 indicating termination of due to reaching `max_path_length`.
114
115 """
116 if self._path_length < self._max_path_length:
117 a, agent_info = self.agent.get_action(self._prev_obs)
118 next_o, r, d, env_info = self.env.step(a)
119 self._observations.append(self._prev_obs)
120 self._rewards.append(r)
121 self._actions.append(a)
122 for k, v in agent_info.items():
123 self._agent_infos[k].append(v)
124 for k, v in env_info.items():
125 self._env_infos[k].append(v)
126 self._path_length += 1
127 self._terminals.append(d)
128 if not d:
129 self._prev_obs = next_o
130 return False
131 self._lengths.append(self._path_length)
132 self._last_observations.append(self._prev_obs)
133 return True
134
135 def collect_rollout(self):
136 """Collect the current rollout, clearing the internal buffer.
137
138 Returns:
139 garage.TrajectoryBatch: A batch of the trajectories completed since
140 the last call to collect_rollout().
141
142 """
143 observations = self._observations
144 self._observations = []
145 last_observations = self._last_observations
146 self._last_observations = []
147 actions = self._actions
148 self._actions = []
149 rewards = self._rewards
150 self._rewards = []
151 terminals = self._terminals
152 self._terminals = []
153 env_infos = self._env_infos
154 self._env_infos = defaultdict(list)
155 agent_infos = self._agent_infos
156 self._agent_infos = defaultdict(list)
157 for k, v in agent_infos.items():
158 agent_infos[k] = np.asarray(v)
159 for k, v in env_infos.items():
160 env_infos[k] = np.asarray(v)
161 lengths = self._lengths
162 self._lengths = []
163 return TrajectoryBatch(self.env.spec, np.asarray(observations),
164 np.asarray(last_observations),
165 np.asarray(actions), np.asarray(rewards),
166 np.asarray(terminals), dict(env_infos),
167 dict(agent_infos), np.asarray(lengths,
168 dtype='i'))
169
170 def rollout(self):
171 """Sample a single rollout of the agent in the environment.
172
173 Returns:
174 garage.TrajectoryBatch: The collected trajectory.
175
176 """
177 self.start_rollout()
178 while not self.step_rollout():
179 pass
180 return self.collect_rollout()
181
182 def shutdown(self):
183 """Close the worker's environment."""
184 self.env.close()
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/garage/sampler/default_worker.py b/src/garage/sampler/default_worker.py
--- a/src/garage/sampler/default_worker.py
+++ b/src/garage/sampler/default_worker.py
@@ -52,7 +52,8 @@
def worker_init(self):
"""Initialize a worker."""
- deterministic.set_seed(self._seed + self._worker_number)
+ if self._seed is not None:
+ deterministic.set_seed(self._seed + self._worker_number)
def update_agent(self, agent_update):
"""Update an agent, assuming it implements garage.Policy.
|
{"golden_diff": "diff --git a/src/garage/sampler/default_worker.py b/src/garage/sampler/default_worker.py\n--- a/src/garage/sampler/default_worker.py\n+++ b/src/garage/sampler/default_worker.py\n@@ -52,7 +52,8 @@\n \n def worker_init(self):\n \"\"\"Initialize a worker.\"\"\"\n- deterministic.set_seed(self._seed + self._worker_number)\n+ if self._seed is not None:\n+ deterministic.set_seed(self._seed + self._worker_number)\n \n def update_agent(self, agent_update):\n \"\"\"Update an agent, assuming it implements garage.Policy.\n", "issue": "DefaultWorker crashes in non-deterministic mode\n```sh\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"examples/torch/sac_metaworldv2_test.py\", line 102, in get_args\r\n sac_metaworldv2_test()\r\n File \"/home/rjulian/garage/src/garage/experiment/experiment.py\", line 553, in __call__\r\n result = self.function(ctxt, **kwargs)\r\n File \"examples/torch/sac_metaworldv2_test.py\", line 100, in sac_metaworldv2_test\r\n runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)\r\n File \"/home/rjulian/garage/src/garage/experiment/local_runner.py\", line 268, in setup\r\n worker_args=worker_args)\r\n File \"/home/rjulian/garage/src/garage/experiment/local_runner.py\", line 215, in make_sampler\r\n envs=self._env)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 66, in from_worker_factory\r\n return cls(worker_factory, agents, envs)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 38, in __init__\r\n worker_factory(i) for i in range(worker_factory.n_workers)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 38, in <listcomp>\r\n worker_factory(i) for i in range(worker_factory.n_workers)\r\n File \"/home/rjulian/garage/src/garage/sampler/worker_factory.py\", line 109, in __call__\r\n **self._worker_args)\r\n File \"/home/rjulian/garage/src/garage/sampler/default_worker.py\", line 51, in __init__\r\n self.worker_init()\r\n File \"/home/rjulian/garage/src/garage/sampler/default_worker.py\", line 55, in worker_init\r\n deterministic.set_seed(self._seed + self._worker_number)\r\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'int'\r\n```\n", "before_files": [{"content": "\"\"\"Default Worker class.\"\"\"\nfrom collections import defaultdict\n\nimport gym\nimport numpy as np\n\nfrom garage import TrajectoryBatch\nfrom garage.experiment import deterministic\nfrom garage.sampler.env_update import EnvUpdate\nfrom garage.sampler.worker import Worker\n\n\nclass DefaultWorker(Worker):\n \"\"\"Initialize a worker.\n\n Args:\n seed(int): The seed to use to intialize random number generators.\n max_path_length(int or float): The maximum length paths which will\n be sampled. Can be (floating point) infinity.\n worker_number(int): The number of the worker where this update is\n occurring. This argument is used to set a different seed for each\n worker.\n\n Attributes:\n agent(Policy or None): The worker's agent.\n env(gym.Env or None): The worker's environment.\n\n \"\"\"\n\n def __init__(\n self,\n *, # Require passing by keyword, since everything's an int.\n seed,\n max_path_length,\n worker_number):\n super().__init__(seed=seed,\n max_path_length=max_path_length,\n worker_number=worker_number)\n self.agent = None\n self.env = None\n self._observations = []\n self._last_observations = []\n self._actions = []\n self._rewards = []\n self._terminals = []\n self._lengths = []\n self._agent_infos = defaultdict(list)\n self._env_infos = defaultdict(list)\n self._prev_obs = None\n self._path_length = 0\n self.worker_init()\n\n def worker_init(self):\n \"\"\"Initialize a worker.\"\"\"\n deterministic.set_seed(self._seed + self._worker_number)\n\n def update_agent(self, agent_update):\n \"\"\"Update an agent, assuming it implements garage.Policy.\n\n Args:\n agent_update (np.ndarray or dict or garage.Policy): If a\n tuple, dict, or np.ndarray, these should be parameters to\n agent, which should have been generated by calling\n `policy.get_param_values`. Alternatively, a policy itself. Note\n that other implementations of `Worker` may take different types\n for this parameter.\n\n \"\"\"\n if isinstance(agent_update, (dict, tuple, np.ndarray)):\n self.agent.set_param_values(agent_update)\n elif agent_update is not None:\n self.agent = agent_update\n\n def update_env(self, env_update):\n \"\"\"Use any non-None env_update as a new environment.\n\n A simple env update function. If env_update is not None, it should be\n the complete new environment.\n\n This allows changing environments by passing the new environment as\n `env_update` into `obtain_samples`.\n\n Args:\n env_update(gym.Env or EnvUpdate or None): The environment to\n replace the existing env with. Note that other implementations\n of `Worker` may take different types for this parameter.\n\n Raises:\n TypeError: If env_update is not one of the documented types.\n\n \"\"\"\n if env_update is not None:\n if isinstance(env_update, EnvUpdate):\n self.env = env_update(self.env)\n elif isinstance(env_update, gym.Env):\n if self.env is not None:\n self.env.close()\n self.env = env_update\n else:\n raise TypeError('Uknown environment update type.')\n\n def start_rollout(self):\n \"\"\"Begin a new rollout.\"\"\"\n self._path_length = 0\n self._prev_obs = self.env.reset()\n self.agent.reset()\n\n def step_rollout(self):\n \"\"\"Take a single time-step in the current rollout.\n\n Returns:\n bool: True iff the path is done, either due to the environment\n indicating termination of due to reaching `max_path_length`.\n\n \"\"\"\n if self._path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(self._prev_obs)\n next_o, r, d, env_info = self.env.step(a)\n self._observations.append(self._prev_obs)\n self._rewards.append(r)\n self._actions.append(a)\n for k, v in agent_info.items():\n self._agent_infos[k].append(v)\n for k, v in env_info.items():\n self._env_infos[k].append(v)\n self._path_length += 1\n self._terminals.append(d)\n if not d:\n self._prev_obs = next_o\n return False\n self._lengths.append(self._path_length)\n self._last_observations.append(self._prev_obs)\n return True\n\n def collect_rollout(self):\n \"\"\"Collect the current rollout, clearing the internal buffer.\n\n Returns:\n garage.TrajectoryBatch: A batch of the trajectories completed since\n the last call to collect_rollout().\n\n \"\"\"\n observations = self._observations\n self._observations = []\n last_observations = self._last_observations\n self._last_observations = []\n actions = self._actions\n self._actions = []\n rewards = self._rewards\n self._rewards = []\n terminals = self._terminals\n self._terminals = []\n env_infos = self._env_infos\n self._env_infos = defaultdict(list)\n agent_infos = self._agent_infos\n self._agent_infos = defaultdict(list)\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n lengths = self._lengths\n self._lengths = []\n return TrajectoryBatch(self.env.spec, np.asarray(observations),\n np.asarray(last_observations),\n np.asarray(actions), np.asarray(rewards),\n np.asarray(terminals), dict(env_infos),\n dict(agent_infos), np.asarray(lengths,\n dtype='i'))\n\n def rollout(self):\n \"\"\"Sample a single rollout of the agent in the environment.\n\n Returns:\n garage.TrajectoryBatch: The collected trajectory.\n\n \"\"\"\n self.start_rollout()\n while not self.step_rollout():\n pass\n return self.collect_rollout()\n\n def shutdown(self):\n \"\"\"Close the worker's environment.\"\"\"\n self.env.close()\n", "path": "src/garage/sampler/default_worker.py"}], "after_files": [{"content": "\"\"\"Default Worker class.\"\"\"\nfrom collections import defaultdict\n\nimport gym\nimport numpy as np\n\nfrom garage import TrajectoryBatch\nfrom garage.experiment import deterministic\nfrom garage.sampler.env_update import EnvUpdate\nfrom garage.sampler.worker import Worker\n\n\nclass DefaultWorker(Worker):\n \"\"\"Initialize a worker.\n\n Args:\n seed(int): The seed to use to intialize random number generators.\n max_path_length(int or float): The maximum length paths which will\n be sampled. Can be (floating point) infinity.\n worker_number(int): The number of the worker where this update is\n occurring. This argument is used to set a different seed for each\n worker.\n\n Attributes:\n agent(Policy or None): The worker's agent.\n env(gym.Env or None): The worker's environment.\n\n \"\"\"\n\n def __init__(\n self,\n *, # Require passing by keyword, since everything's an int.\n seed,\n max_path_length,\n worker_number):\n super().__init__(seed=seed,\n max_path_length=max_path_length,\n worker_number=worker_number)\n self.agent = None\n self.env = None\n self._observations = []\n self._last_observations = []\n self._actions = []\n self._rewards = []\n self._terminals = []\n self._lengths = []\n self._agent_infos = defaultdict(list)\n self._env_infos = defaultdict(list)\n self._prev_obs = None\n self._path_length = 0\n self.worker_init()\n\n def worker_init(self):\n \"\"\"Initialize a worker.\"\"\"\n if self._seed is not None:\n deterministic.set_seed(self._seed + self._worker_number)\n\n def update_agent(self, agent_update):\n \"\"\"Update an agent, assuming it implements garage.Policy.\n\n Args:\n agent_update (np.ndarray or dict or garage.Policy): If a\n tuple, dict, or np.ndarray, these should be parameters to\n agent, which should have been generated by calling\n `policy.get_param_values`. Alternatively, a policy itself. Note\n that other implementations of `Worker` may take different types\n for this parameter.\n\n \"\"\"\n if isinstance(agent_update, (dict, tuple, np.ndarray)):\n self.agent.set_param_values(agent_update)\n elif agent_update is not None:\n self.agent = agent_update\n\n def update_env(self, env_update):\n \"\"\"Use any non-None env_update as a new environment.\n\n A simple env update function. If env_update is not None, it should be\n the complete new environment.\n\n This allows changing environments by passing the new environment as\n `env_update` into `obtain_samples`.\n\n Args:\n env_update(gym.Env or EnvUpdate or None): The environment to\n replace the existing env with. Note that other implementations\n of `Worker` may take different types for this parameter.\n\n Raises:\n TypeError: If env_update is not one of the documented types.\n\n \"\"\"\n if env_update is not None:\n if isinstance(env_update, EnvUpdate):\n self.env = env_update(self.env)\n elif isinstance(env_update, gym.Env):\n if self.env is not None:\n self.env.close()\n self.env = env_update\n else:\n raise TypeError('Uknown environment update type.')\n\n def start_rollout(self):\n \"\"\"Begin a new rollout.\"\"\"\n self._path_length = 0\n self._prev_obs = self.env.reset()\n self.agent.reset()\n\n def step_rollout(self):\n \"\"\"Take a single time-step in the current rollout.\n\n Returns:\n bool: True iff the path is done, either due to the environment\n indicating termination of due to reaching `max_path_length`.\n\n \"\"\"\n if self._path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(self._prev_obs)\n next_o, r, d, env_info = self.env.step(a)\n self._observations.append(self._prev_obs)\n self._rewards.append(r)\n self._actions.append(a)\n for k, v in agent_info.items():\n self._agent_infos[k].append(v)\n for k, v in env_info.items():\n self._env_infos[k].append(v)\n self._path_length += 1\n self._terminals.append(d)\n if not d:\n self._prev_obs = next_o\n return False\n self._lengths.append(self._path_length)\n self._last_observations.append(self._prev_obs)\n return True\n\n def collect_rollout(self):\n \"\"\"Collect the current rollout, clearing the internal buffer.\n\n Returns:\n garage.TrajectoryBatch: A batch of the trajectories completed since\n the last call to collect_rollout().\n\n \"\"\"\n observations = self._observations\n self._observations = []\n last_observations = self._last_observations\n self._last_observations = []\n actions = self._actions\n self._actions = []\n rewards = self._rewards\n self._rewards = []\n terminals = self._terminals\n self._terminals = []\n env_infos = self._env_infos\n self._env_infos = defaultdict(list)\n agent_infos = self._agent_infos\n self._agent_infos = defaultdict(list)\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n lengths = self._lengths\n self._lengths = []\n return TrajectoryBatch(self.env.spec, np.asarray(observations),\n np.asarray(last_observations),\n np.asarray(actions), np.asarray(rewards),\n np.asarray(terminals), dict(env_infos),\n dict(agent_infos), np.asarray(lengths,\n dtype='i'))\n\n def rollout(self):\n \"\"\"Sample a single rollout of the agent in the environment.\n\n Returns:\n garage.TrajectoryBatch: The collected trajectory.\n\n \"\"\"\n self.start_rollout()\n while not self.step_rollout():\n pass\n return self.collect_rollout()\n\n def shutdown(self):\n \"\"\"Close the worker's environment.\"\"\"\n self.env.close()\n", "path": "src/garage/sampler/default_worker.py"}]}
| 2,684 | 134 |
gh_patches_debug_11199
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1937
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When attempting to generate a Finnish ssn of a specific age the provider crashes
* Faker version: 19.11.0
* OS: Windows 10
When attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.
For example:
```
if min_age == max_age:
age = datetime.timedelta(days=min_age * 365)
else:
age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
```
### Steps to reproduce
1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``
2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``
### Expected behavior
The faker generates an artificial Finnish ssn that is 50 years old.
### Actual behavior
``ValueError: empty range for randrange() (18250, 18250, 0)``
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/ssn/fi_FI/__init__.py`
Content:
```
1 import datetime
2
3 from .. import Provider as SsnProvider
4
5
6 class Provider(SsnProvider):
7 def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:
8 """
9 Returns 11 character Finnish personal identity code (Henkilötunnus,
10 HETU, Swedish: Personbeteckning). This function assigns random
11 gender to person.
12
13 HETU consists of eleven characters of the form DDMMYYCZZZQ, where
14 DDMMYY is the date of birth, C the century sign, ZZZ the individual
15 number and Q the control character (checksum). The sign for the
16 century is either + (1800–1899), - (1900–1999), or A (2000–2099).
17 The individual number ZZZ is odd for males and even for females.
18 For people born in Finland its range is 002-899
19 (larger numbers may be used in special cases).
20 An example of a valid code is 311280-888Y.
21
22 https://en.wikipedia.org/wiki/National_identification_number#Finland
23 """
24
25 def _checksum(hetu):
26 checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
27 return checksum_characters[int(hetu) % 31]
28
29 age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
30 birthday = datetime.date.today() - age
31 hetu_date = "%02d%02d%s" % (
32 birthday.day,
33 birthday.month,
34 str(birthday.year)[-2:],
35 )
36 range = (900, 999) if artificial is True else (2, 899)
37 suffix = str(self.generator.random.randrange(*range)).zfill(3)
38 checksum = _checksum(hetu_date + suffix)
39 separator = self._get_century_code(birthday.year)
40 hetu = "".join([hetu_date, separator, suffix, checksum])
41 return hetu
42
43 @staticmethod
44 def _get_century_code(year: int) -> str:
45 """Returns the century code for a given year"""
46 if 2000 <= year < 3000:
47 separator = "A"
48 elif 1900 <= year < 2000:
49 separator = "-"
50 elif 1800 <= year < 1900:
51 separator = "+"
52 else:
53 raise ValueError("Finnish SSN do not support people born before the year 1800 or after the year 2999")
54 return separator
55
56 vat_id_formats = ("FI########",)
57
58 def vat_id(self) -> str:
59 """
60 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
61 :return: A random Finnish VAT ID
62 """
63 return self.bothify(self.random_element(self.vat_id_formats))
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py
--- a/faker/providers/ssn/fi_FI/__init__.py
+++ b/faker/providers/ssn/fi_FI/__init__.py
@@ -26,7 +26,10 @@
checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
return checksum_characters[int(hetu) % 31]
- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
+ if min_age == max_age:
+ age = datetime.timedelta(days=min_age * 365)
+ else:
+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.date.today() - age
hetu_date = "%02d%02d%s" % (
birthday.day,
|
{"golden_diff": "diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py\n--- a/faker/providers/ssn/fi_FI/__init__.py\n+++ b/faker/providers/ssn/fi_FI/__init__.py\n@@ -26,7 +26,10 @@\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n \n- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n+ if min_age == max_age:\n+ age = datetime.timedelta(days=min_age * 365)\n+ else:\n+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n", "issue": "When attempting to generate a Finnish ssn of a specific age the provider crashes\n* Faker version: 19.11.0\r\n* OS: Windows 10\r\n\r\nWhen attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.\r\nFor example:\r\n```\r\nif min_age == max_age:\r\n age = datetime.timedelta(days=min_age * 365)\r\nelse:\r\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``\r\n2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``\r\n\r\n### Expected behavior\r\n\r\nThe faker generates an artificial Finnish ssn that is 50 years old. \r\n\r\n### Actual behavior\r\n\r\n``ValueError: empty range for randrange() (18250, 18250, 0)``\r\n\n", "before_files": [{"content": "import datetime\n\nfrom .. import Provider as SsnProvider\n\n\nclass Provider(SsnProvider):\n def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:\n \"\"\"\n Returns 11 character Finnish personal identity code (Henkil\u00f6tunnus,\n HETU, Swedish: Personbeteckning). This function assigns random\n gender to person.\n\n HETU consists of eleven characters of the form DDMMYYCZZZQ, where\n DDMMYY is the date of birth, C the century sign, ZZZ the individual\n number and Q the control character (checksum). The sign for the\n century is either + (1800\u20131899), - (1900\u20131999), or A (2000\u20132099).\n The individual number ZZZ is odd for males and even for females.\n For people born in Finland its range is 002-899\n (larger numbers may be used in special cases).\n An example of a valid code is 311280-888Y.\n\n https://en.wikipedia.org/wiki/National_identification_number#Finland\n \"\"\"\n\n def _checksum(hetu):\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n birthday.month,\n str(birthday.year)[-2:],\n )\n range = (900, 999) if artificial is True else (2, 899)\n suffix = str(self.generator.random.randrange(*range)).zfill(3)\n checksum = _checksum(hetu_date + suffix)\n separator = self._get_century_code(birthday.year)\n hetu = \"\".join([hetu_date, separator, suffix, checksum])\n return hetu\n\n @staticmethod\n def _get_century_code(year: int) -> str:\n \"\"\"Returns the century code for a given year\"\"\"\n if 2000 <= year < 3000:\n separator = \"A\"\n elif 1900 <= year < 2000:\n separator = \"-\"\n elif 1800 <= year < 1900:\n separator = \"+\"\n else:\n raise ValueError(\"Finnish SSN do not support people born before the year 1800 or after the year 2999\")\n return separator\n\n vat_id_formats = (\"FI########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Finnish VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/fi_FI/__init__.py"}], "after_files": [{"content": "import datetime\n\nfrom .. import Provider as SsnProvider\n\n\nclass Provider(SsnProvider):\n def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:\n \"\"\"\n Returns 11 character Finnish personal identity code (Henkil\u00f6tunnus,\n HETU, Swedish: Personbeteckning). This function assigns random\n gender to person.\n\n HETU consists of eleven characters of the form DDMMYYCZZZQ, where\n DDMMYY is the date of birth, C the century sign, ZZZ the individual\n number and Q the control character (checksum). The sign for the\n century is either + (1800\u20131899), - (1900\u20131999), or A (2000\u20132099).\n The individual number ZZZ is odd for males and even for females.\n For people born in Finland its range is 002-899\n (larger numbers may be used in special cases).\n An example of a valid code is 311280-888Y.\n\n https://en.wikipedia.org/wiki/National_identification_number#Finland\n \"\"\"\n\n def _checksum(hetu):\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n\n if min_age == max_age:\n age = datetime.timedelta(days=min_age * 365)\n else:\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n birthday.month,\n str(birthday.year)[-2:],\n )\n range = (900, 999) if artificial is True else (2, 899)\n suffix = str(self.generator.random.randrange(*range)).zfill(3)\n checksum = _checksum(hetu_date + suffix)\n separator = self._get_century_code(birthday.year)\n hetu = \"\".join([hetu_date, separator, suffix, checksum])\n return hetu\n\n @staticmethod\n def _get_century_code(year: int) -> str:\n \"\"\"Returns the century code for a given year\"\"\"\n if 2000 <= year < 3000:\n separator = \"A\"\n elif 1900 <= year < 2000:\n separator = \"-\"\n elif 1800 <= year < 1900:\n separator = \"+\"\n else:\n raise ValueError(\"Finnish SSN do not support people born before the year 1800 or after the year 2999\")\n return separator\n\n vat_id_formats = (\"FI########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Finnish VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/fi_FI/__init__.py"}]}
| 1,356 | 230 |
gh_patches_debug_25749
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-822
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scattering 2D doesn't work when using 2^J == image size (TensorFlow backend)
Similar issues have been reported before (#284, #363) and also fixed (#412) for Torch backend. However, still not working in the tensorflow backend:
test:
```python
import numpy as np
from kymatio.tensorflow import Scattering2D
scattering = Scattering2D(J=5, shape=(32, 32))
test_im = np.ones((1,1,32,32))
test = scattering.scattering(test_im)
```
Gives error:
```
7 scattering = Scattering2D(J=5, shape=(32, 32))
8 test_im = np.ones((1,1,32,32))
----> 9 test = scattering.scattering(test_im)
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/frontend/tensorflow_frontend.py in scattering(self, input)
48 input = tf.reshape(input, tf.concat(((-1,), signal_shape), 0))
49
---> 50 S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi,
51 self.max_order, self.out_type)
52
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/core/scattering2d.py in scattering2d(x, pad, unpad, backend, J, L, phi, psi, max_order, out_type)
13 out_S_0, out_S_1, out_S_2 = [], [], []
14
---> 15 U_r = pad(x)
16
17 U_0_c = fft(U_r, 'C2C')
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/backend/tensorflow_backend.py in __call__(self, x)
27 paddings = [[0, 0]] * len(x.shape[:-2])
28 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
---> 29 return tf.cast(tf.pad(x, paddings, mode="REFLECT"), tf.complex64)
30
31 def unpad(in_):
~/.local/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
~/.local/lib/python3.9/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
56 try:
57 ctx.ensure_initialized()
---> 58 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
59 inputs, attrs, num_outputs)
60 except core._NotOkStatusException as e:
InvalidArgumentError: paddings must be less than the dimension size: 32, 32 not less than 32 [Op:MirrorPad]
```
(speculation) So possibly problems with the order of the padding being different in tensorflow from torch.
Should also probably include some tests for these types of problems like the tests for implemented for Torch in #346
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kymatio/scattering2d/backend/tensorflow_backend.py`
Content:
```
1 import tensorflow as tf
2
3 from ...backend.tensorflow_backend import TensorFlowBackend
4
5
6 class Pad(object):
7 def __init__(self, pad_size, input_size):
8 """
9 Padding which allows to simultaneously pad in a reflection fashion
10 and map to complex.
11 Parameters
12 ----------
13 pad_size : list of 4 integers
14 size of padding to apply.
15 input_size : list of 2 integers
16 size of the original signal
17 """
18 self.pad_size = pad_size
19
20 def __call__(self, x):
21 paddings = [[0, 0]] * len(x.shape[:-2])
22 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
23 return tf.pad(x, paddings, mode="REFLECT")
24
25
26 class TensorFlowBackend2D(TensorFlowBackend):
27 Pad = Pad
28
29 @staticmethod
30 def unpad(in_):
31 """
32 Slices the input tensor at indices between 1::-1
33 Parameters
34 ----------
35 in_ : tensor_like
36 input tensor
37 Returns
38 -------
39 in_[..., 1:-1, 1:-1]
40 """
41 return in_[..., 1:-1, 1:-1]
42
43
44 @classmethod
45 def rfft(cls, x):
46 cls.real_check(x)
47 return tf.signal.fft2d(tf.cast(x, tf.complex64), name='rfft2d')
48
49 @classmethod
50 def irfft(cls, x):
51 cls.complex_check(x)
52 return tf.math.real(tf.signal.ifft2d(x, name='irfft2d'))
53
54
55 @classmethod
56 def ifft(cls, x):
57 cls.complex_check(x)
58 return tf.signal.ifft2d(x, name='ifft2d')
59
60 @classmethod
61 def subsample_fourier(cls, x, k):
62 """ Subsampling of a 2D image performed in the Fourier domain.
63
64 Subsampling in the spatial domain amounts to periodization
65 in the Fourier domain, hence the formula.
66
67 Parameters
68 ----------
69 x : tensor_like
70 input tensor with at least three dimensions.
71 k : int
72 integer such that x is subsampled by k along the spatial variables.
73
74 Returns
75 -------
76 out : tensor_like
77 Tensor such that its Fourier transform is the Fourier
78 transform of a subsampled version of x, i.e. in
79 F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]
80
81 """
82 cls.complex_check(x)
83
84 y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))
85
86 out = tf.reduce_mean(y, axis=(1, 3))
87 return out
88
89
90 backend = TensorFlowBackend2D
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kymatio/scattering2d/backend/tensorflow_backend.py b/kymatio/scattering2d/backend/tensorflow_backend.py
--- a/kymatio/scattering2d/backend/tensorflow_backend.py
+++ b/kymatio/scattering2d/backend/tensorflow_backend.py
@@ -16,11 +16,34 @@
size of the original signal
"""
self.pad_size = pad_size
+ self.input_size = input_size
def __call__(self, x):
+ pad_size = list(self.pad_size)
+
+ # Clone to avoid passing on modifications.
+ new_pad_size = list(pad_size)
+
+ # This handles the case where the padding is equal to the image size.
+ if pad_size[0] == self.input_size[0]:
+ new_pad_size[0] -= 1
+ new_pad_size[1] -= 1
+ if pad_size[2] == self.input_size[1]:
+ new_pad_size[2] -= 1
+ new_pad_size[3] -= 1
+
paddings = [[0, 0]] * len(x.shape[:-2])
- paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
- return tf.pad(x, paddings, mode="REFLECT")
+ paddings += [[new_pad_size[0], new_pad_size[1]], [new_pad_size[2], new_pad_size[3]]]
+
+ x_padded = tf.pad(x, paddings, mode="REFLECT")
+
+ # Again, special handling for when padding is the same as image size.
+ if pad_size[0] == self.input_size[0]:
+ x_padded = tf.concat([tf.expand_dims(x_padded[..., 1, :], axis=-2), x_padded, tf.expand_dims(x_padded[..., x_padded.shape[-2] -2, :], axis=-2)], axis=-2)
+ if pad_size[2] == self.input_size[1]:
+ x_padded = tf.concat([tf.expand_dims(x_padded[..., :, 1], axis=-1), x_padded, tf.expand_dims(x_padded[..., :, x_padded.shape[-1]-2], axis=-1)], axis=-1)
+
+ return x_padded
class TensorFlowBackend2D(TensorFlowBackend):
|
{"golden_diff": "diff --git a/kymatio/scattering2d/backend/tensorflow_backend.py b/kymatio/scattering2d/backend/tensorflow_backend.py\n--- a/kymatio/scattering2d/backend/tensorflow_backend.py\n+++ b/kymatio/scattering2d/backend/tensorflow_backend.py\n@@ -16,11 +16,34 @@\n size of the original signal\n \"\"\"\n self.pad_size = pad_size\n+ self.input_size = input_size\n \n def __call__(self, x):\n+ pad_size = list(self.pad_size)\n+\n+ # Clone to avoid passing on modifications.\n+ new_pad_size = list(pad_size)\n+\n+ # This handles the case where the padding is equal to the image size.\n+ if pad_size[0] == self.input_size[0]:\n+ new_pad_size[0] -= 1\n+ new_pad_size[1] -= 1\n+ if pad_size[2] == self.input_size[1]:\n+ new_pad_size[2] -= 1\n+ new_pad_size[3] -= 1\n+\n paddings = [[0, 0]] * len(x.shape[:-2])\n- paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\n- return tf.pad(x, paddings, mode=\"REFLECT\")\n+ paddings += [[new_pad_size[0], new_pad_size[1]], [new_pad_size[2], new_pad_size[3]]]\n+\n+ x_padded = tf.pad(x, paddings, mode=\"REFLECT\")\n+\n+ # Again, special handling for when padding is the same as image size.\n+ if pad_size[0] == self.input_size[0]:\n+ x_padded = tf.concat([tf.expand_dims(x_padded[..., 1, :], axis=-2), x_padded, tf.expand_dims(x_padded[..., x_padded.shape[-2] -2, :], axis=-2)], axis=-2)\n+ if pad_size[2] == self.input_size[1]:\n+ x_padded = tf.concat([tf.expand_dims(x_padded[..., :, 1], axis=-1), x_padded, tf.expand_dims(x_padded[..., :, x_padded.shape[-1]-2], axis=-1)], axis=-1)\n+\n+ return x_padded\n \n \n class TensorFlowBackend2D(TensorFlowBackend):\n", "issue": "Scattering 2D doesn't work when using 2^J == image size (TensorFlow backend)\nSimilar issues have been reported before (#284, #363) and also fixed (#412) for Torch backend. However, still not working in the tensorflow backend:\r\n\r\ntest:\r\n```python \r\nimport numpy as np\r\nfrom kymatio.tensorflow import Scattering2D\r\n\r\nscattering = Scattering2D(J=5, shape=(32, 32))\r\ntest_im = np.ones((1,1,32,32))\r\ntest = scattering.scattering(test_im)\r\n```\r\n\r\nGives error:\r\n```\r\n 7 scattering = Scattering2D(J=5, shape=(32, 32))\r\n 8 test_im = np.ones((1,1,32,32))\r\n----> 9 test = scattering.scattering(test_im)\r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/frontend/tensorflow_frontend.py in scattering(self, input)\r\n 48 input = tf.reshape(input, tf.concat(((-1,), signal_shape), 0))\r\n 49 \r\n---> 50 S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi,\r\n 51 self.max_order, self.out_type)\r\n 52 \r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/core/scattering2d.py in scattering2d(x, pad, unpad, backend, J, L, phi, psi, max_order, out_type)\r\n 13 out_S_0, out_S_1, out_S_2 = [], [], []\r\n 14 \r\n---> 15 U_r = pad(x)\r\n 16 \r\n 17 U_0_c = fft(U_r, 'C2C')\r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/backend/tensorflow_backend.py in __call__(self, x)\r\n 27 paddings = [[0, 0]] * len(x.shape[:-2])\r\n 28 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\r\n---> 29 return tf.cast(tf.pad(x, paddings, mode=\"REFLECT\"), tf.complex64)\r\n 30 \r\n 31 def unpad(in_):\r\n\r\n~/.local/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)\r\n 151 except Exception as e:\r\n 152 filtered_tb = _process_traceback_frames(e.__traceback__)\r\n--> 153 raise e.with_traceback(filtered_tb) from None\r\n 154 finally:\r\n 155 del filtered_tb\r\n\r\n~/.local/lib/python3.9/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)\r\n 56 try:\r\n 57 ctx.ensure_initialized()\r\n---> 58 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\r\n 59 inputs, attrs, num_outputs)\r\n 60 except core._NotOkStatusException as e:\r\n\r\nInvalidArgumentError: paddings must be less than the dimension size: 32, 32 not less than 32 [Op:MirrorPad]\r\n```\r\n\r\n(speculation) So possibly problems with the order of the padding being different in tensorflow from torch. \r\n\r\nShould also probably include some tests for these types of problems like the tests for implemented for Torch in #346 \n", "before_files": [{"content": "import tensorflow as tf\n\nfrom ...backend.tensorflow_backend import TensorFlowBackend\n\n\nclass Pad(object):\n def __init__(self, pad_size, input_size):\n \"\"\"\n Padding which allows to simultaneously pad in a reflection fashion\n and map to complex.\n Parameters\n ----------\n pad_size : list of 4 integers\n size of padding to apply.\n input_size : list of 2 integers\n size of the original signal\n \"\"\"\n self.pad_size = pad_size\n\n def __call__(self, x):\n paddings = [[0, 0]] * len(x.shape[:-2])\n paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\n return tf.pad(x, paddings, mode=\"REFLECT\")\n\n\nclass TensorFlowBackend2D(TensorFlowBackend):\n Pad = Pad\n\n @staticmethod\n def unpad(in_):\n \"\"\"\n Slices the input tensor at indices between 1::-1\n Parameters\n ----------\n in_ : tensor_like\n input tensor\n Returns\n -------\n in_[..., 1:-1, 1:-1]\n \"\"\"\n return in_[..., 1:-1, 1:-1]\n\n\n @classmethod\n def rfft(cls, x):\n cls.real_check(x)\n return tf.signal.fft2d(tf.cast(x, tf.complex64), name='rfft2d')\n\n @classmethod\n def irfft(cls, x):\n cls.complex_check(x)\n return tf.math.real(tf.signal.ifft2d(x, name='irfft2d'))\n\n\n @classmethod\n def ifft(cls, x):\n cls.complex_check(x)\n return tf.signal.ifft2d(x, name='ifft2d')\n\n @classmethod\n def subsample_fourier(cls, x, k):\n \"\"\" Subsampling of a 2D image performed in the Fourier domain.\n\n Subsampling in the spatial domain amounts to periodization\n in the Fourier domain, hence the formula.\n\n Parameters\n ----------\n x : tensor_like\n input tensor with at least three dimensions.\n k : int\n integer such that x is subsampled by k along the spatial variables.\n\n Returns\n -------\n out : tensor_like\n Tensor such that its Fourier transform is the Fourier\n transform of a subsampled version of x, i.e. in\n F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]\n\n \"\"\"\n cls.complex_check(x)\n\n y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))\n\n out = tf.reduce_mean(y, axis=(1, 3))\n return out\n\n\nbackend = TensorFlowBackend2D\n", "path": "kymatio/scattering2d/backend/tensorflow_backend.py"}], "after_files": [{"content": "import tensorflow as tf\n\nfrom ...backend.tensorflow_backend import TensorFlowBackend\n\n\nclass Pad(object):\n def __init__(self, pad_size, input_size):\n \"\"\"\n Padding which allows to simultaneously pad in a reflection fashion\n and map to complex.\n Parameters\n ----------\n pad_size : list of 4 integers\n size of padding to apply.\n input_size : list of 2 integers\n size of the original signal\n \"\"\"\n self.pad_size = pad_size\n self.input_size = input_size\n\n def __call__(self, x):\n pad_size = list(self.pad_size)\n\n # Clone to avoid passing on modifications.\n new_pad_size = list(pad_size)\n\n # This handles the case where the padding is equal to the image size.\n if pad_size[0] == self.input_size[0]:\n new_pad_size[0] -= 1\n new_pad_size[1] -= 1\n if pad_size[2] == self.input_size[1]:\n new_pad_size[2] -= 1\n new_pad_size[3] -= 1\n\n paddings = [[0, 0]] * len(x.shape[:-2])\n paddings += [[new_pad_size[0], new_pad_size[1]], [new_pad_size[2], new_pad_size[3]]]\n\n x_padded = tf.pad(x, paddings, mode=\"REFLECT\")\n\n # Again, special handling for when padding is the same as image size.\n if pad_size[0] == self.input_size[0]:\n x_padded = tf.concat([tf.expand_dims(x_padded[..., 1, :], axis=-2), x_padded, tf.expand_dims(x_padded[..., x_padded.shape[-2] -2, :], axis=-2)], axis=-2)\n if pad_size[2] == self.input_size[1]:\n x_padded = tf.concat([tf.expand_dims(x_padded[..., :, 1], axis=-1), x_padded, tf.expand_dims(x_padded[..., :, x_padded.shape[-1]-2], axis=-1)], axis=-1)\n\n return x_padded\n\n\nclass TensorFlowBackend2D(TensorFlowBackend):\n Pad = Pad\n\n @staticmethod\n def unpad(in_):\n \"\"\"\n Slices the input tensor at indices between 1::-1\n Parameters\n ----------\n in_ : tensor_like\n input tensor\n Returns\n -------\n in_[..., 1:-1, 1:-1]\n \"\"\"\n return in_[..., 1:-1, 1:-1]\n\n\n @classmethod\n def rfft(cls, x):\n cls.real_check(x)\n return tf.signal.fft2d(tf.cast(x, tf.complex64), name='rfft2d')\n\n @classmethod\n def irfft(cls, x):\n cls.complex_check(x)\n return tf.math.real(tf.signal.ifft2d(x, name='irfft2d'))\n\n\n @classmethod\n def ifft(cls, x):\n cls.complex_check(x)\n return tf.signal.ifft2d(x, name='ifft2d')\n\n @classmethod\n def subsample_fourier(cls, x, k):\n \"\"\" Subsampling of a 2D image performed in the Fourier domain.\n\n Subsampling in the spatial domain amounts to periodization\n in the Fourier domain, hence the formula.\n\n Parameters\n ----------\n x : tensor_like\n input tensor with at least three dimensions.\n k : int\n integer such that x is subsampled by k along the spatial variables.\n\n Returns\n -------\n out : tensor_like\n Tensor such that its Fourier transform is the Fourier\n transform of a subsampled version of x, i.e. in\n F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]\n\n \"\"\"\n cls.complex_check(x)\n\n y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))\n\n out = tf.reduce_mean(y, axis=(1, 3))\n return out\n\n\nbackend = TensorFlowBackend2D\n", "path": "kymatio/scattering2d/backend/tensorflow_backend.py"}]}
| 1,890 | 531 |
gh_patches_debug_17618
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-28
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix CSRF middleware
There is a [TODO in `settings.py` to 'fix the CSRF middleware'](https://github.com/CiviWiki/OpenCiviWiki/blob/dev/civiwiki/settings.py#L60). This issue is a placeholder to make sure we resolve the issue with CSRF middleware.
What is the issue that prevents us from using CSRF?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `civiwiki/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 from django.core.exceptions import ImproperlyConfigured
10 import dj_database_url
11
12
13 def get_env_variable(environment_variable, optional=False):
14 """Get the environment variable or return exception"""
15 try:
16 return os.environ[environment_variable]
17 except KeyError:
18 if optional:
19 return ''
20 else:
21 error = "Environment variable '{ev}' not found.".format(ev=environment_variable)
22 raise ImproperlyConfigured(error)
23
24
25 # Devlopment Environment Control
26 DEBUG = 'DEBUG' in os.environ
27
28 if 'DJANGO_HOST' in os.environ:
29 DJANGO_HOST = get_env_variable("DJANGO_HOST")
30 else:
31 DJANGO_HOST = 'LOCALHOST'
32
33
34 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
35 SECRET_KEY = get_env_variable("DJANGO_SECRET_KEY")
36 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost"]
37
38
39 INSTALLED_APPS = (
40 'django.contrib.admin',
41 'django.contrib.auth',
42 'django.contrib.contenttypes',
43 'django.contrib.sessions',
44 'django.contrib.messages',
45 'django.contrib.staticfiles',
46 'storages',
47 'channels',
48 'civiwiki',
49 'api',
50 'authentication',
51 'frontend_views',
52 'notifications',
53 'legislation',
54 )
55
56
57 MIDDLEWARE_CLASSES = (
58 'django.contrib.sessions.middleware.SessionMiddleware',
59 'django.middleware.common.CommonMiddleware',
60 # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually
61 'django.contrib.auth.middleware.AuthenticationMiddleware',
62 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
63 'django.contrib.messages.middleware.MessageMiddleware',
64 'django.middleware.clickjacking.XFrameOptionsMiddleware',
65 'django.middleware.security.SecurityMiddleware',
66 )
67
68
69 CORS_ORIGIN_ALLOW_ALL = True
70 ROOT_URLCONF = 'civiwiki.urls'
71 LOGIN_URL = '/login'
72
73
74 # SSL Setup
75 if DJANGO_HOST is not 'LOCALHOST':
76 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
77 SECURE_SSL_REDIRECT = True
78 SESSION_COOKIE_SECURE = True
79 CSRF_COOKIE_SECURE = True
80
81
82 # Internationalization & Localization
83 LANGUAGE_CODE = 'en-us'
84 TIME_ZONE = 'UTC'
85 USE_I18N = True
86 USE_L10N = True
87 USE_TZ = True
88
89
90 TEMPLATES = [
91 {
92 'BACKEND': 'django.template.backends.django.DjangoTemplates',
93 'DIRS': [os.path.join(BASE_DIR, "webapp/templates")], #TODO: Add non-webapp template directory
94 'APP_DIRS': True,
95 'OPTIONS': {
96 'context_processors': [
97 'django.template.context_processors.debug',
98 'django.template.context_processors.request',
99 'django.contrib.auth.context_processors.auth',
100 'django.contrib.messages.context_processors.messages',
101 ],
102 },
103 },
104 ]
105
106
107 WSGI_APPLICATION = 'civiwiki.wsgi.application'
108
109
110 # Apex Contact for Production Errors
111 ADMINS = [('Development Team', 'dev@civiwiki.org')]
112
113
114 # API keys
115 SUNLIGHT_API_KEY = get_env_variable("SUNLIGHT_API_KEY")
116 GOOGLE_API_KEY = get_env_variable("GOOGLE_MAP_API_KEY")
117
118 # Channels Setup
119 if 'REDIS_URL' in os.environ:
120 REDIS_URL = get_env_variable("REDIS_URL")
121 else:
122 REDIS_URL = 'redis://localhost:6379'
123 CHANNEL_LAYERS = {
124 "default": {
125 "BACKEND": "asgi_redis.RedisChannelLayer",
126 "CONFIG": {
127 "hosts": [REDIS_URL],
128 },
129 "ROUTING": "civiwiki.routing.channel_routing",
130 },
131 }
132
133
134 # Celery Task Runner Setup
135 CELERY_BROKER_URL = REDIS_URL + '/0'
136 CELERY_RESULT_BACKEND = CELERY_BROKER_URL
137 CELERY_ACCEPT_CONTENT = ['application/json']
138 CELERY_TASK_SERIALIZER = 'json'
139 CELERY_RESULT_SERIALIZER = 'json'
140 CELERY_TIME_ZONE = TIME_ZONE
141
142
143 # AWS S3 Setup
144 if 'AWS_STORAGE_BUCKET_NAME' not in os.environ:
145 MEDIA_URL = '/media/'
146 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
147 else:
148 AWS_STORAGE_BUCKET_NAME = get_env_variable("AWS_STORAGE_BUCKET_NAME")
149 AWS_S3_ACCESS_KEY_ID = get_env_variable("AWS_S3_ACCESS_KEY_ID")
150 AWS_S3_SECRET_ACCESS_KEY = get_env_variable("AWS_S3_SECRET_ACCESS_KEY")
151 DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
152 AWS_S3_SECURE_URLS = False
153 AWS_QUERYSTRING_AUTH = False
154
155 STATIC_URL = '/static/'
156 STATICFILES_DIRS = (
157 os.path.join(BASE_DIR, 'webapp/static'),
158 )
159 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
160
161
162 # Database
163 if 'CIVIWIKI_LOCAL_NAME' not in os.environ:
164 STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
165
166 DATABASES = {
167 'default': dj_database_url.parse(get_env_variable("DATABASE_URL"))
168 }
169 else:
170 DATABASES = {
171 'default': {
172 'HOST': 'localhost',
173 'PORT': '5432',
174 'NAME': get_env_variable("CIVIWIKI_LOCAL_NAME"),
175 'ENGINE': 'django.db.backends.postgresql_psycopg2',
176 'USER': get_env_variable("CIVIWIKI_LOCAL_USERNAME"),
177 'PASSWORD': get_env_variable("CIVIWIKI_LOCAL_PASSWORD"),
178 },
179 }
180
181
182 # Email Backend Setup
183 if 'EMAIL_HOST' not in os.environ:
184 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
185 EMAIL_HOST_USER = "test@civiwiki.org"
186 else:
187 EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
188 EMAIL_HOST = get_env_variable("EMAIL_HOST")
189 EMAIL_PORT = get_env_variable("EMAIL_PORT")
190 EMAIL_HOST_USER = get_env_variable("EMAIL_HOST_USER")
191 EMAIL_HOST_PASSWORD = get_env_variable("EMAIL_HOST_PASSWORD")
192 EMAIL_USE_SSL = True
193 DEFAULT_FROM_EMAIL = EMAIL_HOST
194
195
196 # Notification API Settings
197 NOTIFICATIONS_SOFT_DELETE = True
198 NOTIFICATIONS_USE_JSONFIELD = True
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/civiwiki/settings.py b/civiwiki/settings.py
--- a/civiwiki/settings.py
+++ b/civiwiki/settings.py
@@ -57,7 +57,7 @@
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
- # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually
+ 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
@@ -66,6 +66,8 @@
)
+CSRF_USE_SESSIONS = True # Store the CSRF token in the users session instead of in a cookie
+
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'civiwiki.urls'
LOGIN_URL = '/login'
|
{"golden_diff": "diff --git a/civiwiki/settings.py b/civiwiki/settings.py\n--- a/civiwiki/settings.py\n+++ b/civiwiki/settings.py\n@@ -57,7 +57,7 @@\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n- # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually\n+ 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n@@ -66,6 +66,8 @@\n )\n \n \n+CSRF_USE_SESSIONS = True # Store the CSRF token in the users session instead of in a cookie\n+\n CORS_ORIGIN_ALLOW_ALL = True\n ROOT_URLCONF = 'civiwiki.urls'\n LOGIN_URL = '/login'\n", "issue": " Fix CSRF middleware\nThere is a [TODO in `settings.py` to 'fix the CSRF middleware'](https://github.com/CiviWiki/OpenCiviWiki/blob/dev/civiwiki/settings.py#L60). This issue is a placeholder to make sure we resolve the issue with CSRF middleware.\r\n\r\nWhat is the issue that prevents us from using CSRF?\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\nfrom django.core.exceptions import ImproperlyConfigured\nimport dj_database_url\n\n\ndef get_env_variable(environment_variable, optional=False):\n \"\"\"Get the environment variable or return exception\"\"\"\n try:\n return os.environ[environment_variable]\n except KeyError:\n if optional:\n return ''\n else:\n error = \"Environment variable '{ev}' not found.\".format(ev=environment_variable)\n raise ImproperlyConfigured(error)\n\n\n# Devlopment Environment Control\nDEBUG = 'DEBUG' in os.environ\n\nif 'DJANGO_HOST' in os.environ:\n DJANGO_HOST = get_env_variable(\"DJANGO_HOST\")\nelse:\n DJANGO_HOST = 'LOCALHOST'\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = get_env_variable(\"DJANGO_SECRET_KEY\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\"]\n\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'storages',\n 'channels',\n 'civiwiki',\n 'api',\n 'authentication',\n 'frontend_views',\n 'notifications',\n 'legislation',\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = 'civiwiki.urls'\nLOGIN_URL = '/login'\n\n\n# SSL Setup\nif DJANGO_HOST is not 'LOCALHOST':\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n\n# Internationalization & Localization\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, \"webapp/templates\")], #TODO: Add non-webapp template directory\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = 'civiwiki.wsgi.application'\n\n\n# Apex Contact for Production Errors\nADMINS = [('Development Team', 'dev@civiwiki.org')]\n\n\n# API keys\nSUNLIGHT_API_KEY = get_env_variable(\"SUNLIGHT_API_KEY\")\nGOOGLE_API_KEY = get_env_variable(\"GOOGLE_MAP_API_KEY\")\n\n# Channels Setup\nif 'REDIS_URL' in os.environ:\n REDIS_URL = get_env_variable(\"REDIS_URL\")\nelse:\n REDIS_URL = 'redis://localhost:6379'\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgi_redis.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [REDIS_URL],\n },\n \"ROUTING\": \"civiwiki.routing.channel_routing\",\n },\n}\n\n\n# Celery Task Runner Setup\nCELERY_BROKER_URL = REDIS_URL + '/0'\nCELERY_RESULT_BACKEND = CELERY_BROKER_URL\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIME_ZONE = TIME_ZONE\n\n\n# AWS S3 Setup\nif 'AWS_STORAGE_BUCKET_NAME' not in os.environ:\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nelse:\n AWS_STORAGE_BUCKET_NAME = get_env_variable(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_ACCESS_KEY_ID = get_env_variable(\"AWS_S3_ACCESS_KEY_ID\")\n AWS_S3_SECRET_ACCESS_KEY = get_env_variable(\"AWS_S3_SECRET_ACCESS_KEY\")\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n AWS_S3_SECURE_URLS = False\n AWS_QUERYSTRING_AUTH = False\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'webapp/static'),\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\n\n# Database\nif 'CIVIWIKI_LOCAL_NAME' not in os.environ:\n STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\n\n DATABASES = {\n 'default': dj_database_url.parse(get_env_variable(\"DATABASE_URL\"))\n }\nelse:\n DATABASES = {\n 'default': {\n 'HOST': 'localhost',\n 'PORT': '5432',\n 'NAME': get_env_variable(\"CIVIWIKI_LOCAL_NAME\"),\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'USER': get_env_variable(\"CIVIWIKI_LOCAL_USERNAME\"),\n 'PASSWORD': get_env_variable(\"CIVIWIKI_LOCAL_PASSWORD\"),\n },\n }\n\n\n# Email Backend Setup\nif 'EMAIL_HOST' not in os.environ:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n EMAIL_HOST_USER = \"test@civiwiki.org\"\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = get_env_variable(\"EMAIL_HOST\")\n EMAIL_PORT = get_env_variable(\"EMAIL_PORT\")\n EMAIL_HOST_USER = get_env_variable(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = get_env_variable(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n", "path": "civiwiki/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\nfrom django.core.exceptions import ImproperlyConfigured\nimport dj_database_url\n\n\ndef get_env_variable(environment_variable, optional=False):\n \"\"\"Get the environment variable or return exception\"\"\"\n try:\n return os.environ[environment_variable]\n except KeyError:\n if optional:\n return ''\n else:\n error = \"Environment variable '{ev}' not found.\".format(ev=environment_variable)\n raise ImproperlyConfigured(error)\n\n\n# Devlopment Environment Control\nDEBUG = 'DEBUG' in os.environ\n\nif 'DJANGO_HOST' in os.environ:\n DJANGO_HOST = get_env_variable(\"DJANGO_HOST\")\nelse:\n DJANGO_HOST = 'LOCALHOST'\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = get_env_variable(\"DJANGO_SECRET_KEY\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\"]\n\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'storages',\n 'channels',\n 'civiwiki',\n 'api',\n 'authentication',\n 'frontend_views',\n 'notifications',\n 'legislation',\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\n\nCSRF_USE_SESSIONS = True # Store the CSRF token in the users session instead of in a cookie\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = 'civiwiki.urls'\nLOGIN_URL = '/login'\n\n\n# SSL Setup\nif DJANGO_HOST is not 'LOCALHOST':\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n\n# Internationalization & Localization\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, \"webapp/templates\")], #TODO: Add non-webapp template directory\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = 'civiwiki.wsgi.application'\n\n\n# Apex Contact for Production Errors\nADMINS = [('Development Team', 'dev@civiwiki.org')]\n\n\n# API keys\nSUNLIGHT_API_KEY = get_env_variable(\"SUNLIGHT_API_KEY\")\nGOOGLE_API_KEY = get_env_variable(\"GOOGLE_MAP_API_KEY\")\n\n# Channels Setup\nif 'REDIS_URL' in os.environ:\n REDIS_URL = get_env_variable(\"REDIS_URL\")\nelse:\n REDIS_URL = 'redis://localhost:6379'\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgi_redis.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [REDIS_URL],\n },\n \"ROUTING\": \"civiwiki.routing.channel_routing\",\n },\n}\n\n\n# Celery Task Runner Setup\nCELERY_BROKER_URL = REDIS_URL + '/0'\nCELERY_RESULT_BACKEND = CELERY_BROKER_URL\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIME_ZONE = TIME_ZONE\n\n\n# AWS S3 Setup\nif 'AWS_STORAGE_BUCKET_NAME' not in os.environ:\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nelse:\n AWS_STORAGE_BUCKET_NAME = get_env_variable(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_ACCESS_KEY_ID = get_env_variable(\"AWS_S3_ACCESS_KEY_ID\")\n AWS_S3_SECRET_ACCESS_KEY = get_env_variable(\"AWS_S3_SECRET_ACCESS_KEY\")\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n AWS_S3_SECURE_URLS = False\n AWS_QUERYSTRING_AUTH = False\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'webapp/static'),\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\n\n# Database\nif 'CIVIWIKI_LOCAL_NAME' not in os.environ:\n STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\n\n DATABASES = {\n 'default': dj_database_url.parse(get_env_variable(\"DATABASE_URL\"))\n }\nelse:\n DATABASES = {\n 'default': {\n 'HOST': 'localhost',\n 'PORT': '5432',\n 'NAME': get_env_variable(\"CIVIWIKI_LOCAL_NAME\"),\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'USER': get_env_variable(\"CIVIWIKI_LOCAL_USERNAME\"),\n 'PASSWORD': get_env_variable(\"CIVIWIKI_LOCAL_PASSWORD\"),\n },\n }\n\n\n# Email Backend Setup\nif 'EMAIL_HOST' not in os.environ:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n EMAIL_HOST_USER = \"test@civiwiki.org\"\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = get_env_variable(\"EMAIL_HOST\")\n EMAIL_PORT = get_env_variable(\"EMAIL_PORT\")\n EMAIL_HOST_USER = get_env_variable(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = get_env_variable(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n", "path": "civiwiki/settings.py"}]}
| 2,183 | 186 |
gh_patches_debug_5859
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-11755
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GraphQL returns all objects if tag doesn't exist
### NetBox version
v3.4.2
### Python version
3.10
### Steps to Reproduce
1. query GraphQL for device/interface with a tag that doesn't exist
### Expected Behavior
Resulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.
### Observed Behavior
All devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.
Example below
query:
`{
device_list(tag: "blablalba") {
id
name
tags {
name
}
}
interface_list(tag: "not-ipmi_exporter") {
id
device {
id
}
tags {
name
}
}
}
`
result:
`{
"data": {
"device_list": [
{
"id": "1",
"name": "device-A",
"tags": [
{
"name": "node_exporter"
}
]
},
{
"id": "2",
"name": "device-B",
"tags": [
{
"name": "node_exporter"
}
]
}
],
"interface_list": [
{
"id": "1",
"device": {
"id": "1"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
},
{
"id": "2",
"device": {
"id": "2"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
}
]
}
}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/graphql/fields.py`
Content:
```
1 from functools import partial
2
3 import graphene
4 from graphene_django import DjangoListField
5
6 from .utils import get_graphene_type
7
8 __all__ = (
9 'ObjectField',
10 'ObjectListField',
11 )
12
13
14 class ObjectField(graphene.Field):
15 """
16 Retrieve a single object, identified by its numeric ID.
17 """
18 def __init__(self, *args, **kwargs):
19
20 if 'id' not in kwargs:
21 kwargs['id'] = graphene.Int(required=True)
22
23 super().__init__(*args, **kwargs)
24
25 @staticmethod
26 def object_resolver(django_object_type, root, info, **args):
27 """
28 Return an object given its numeric ID.
29 """
30 manager = django_object_type._meta.model._default_manager
31 queryset = django_object_type.get_queryset(manager, info)
32
33 return queryset.get(**args)
34
35 def get_resolver(self, parent_resolver):
36 return partial(self.object_resolver, self._type)
37
38
39 class ObjectListField(DjangoListField):
40 """
41 Retrieve a list of objects, optionally filtered by one or more FilterSet filters.
42 """
43 def __init__(self, _type, *args, **kwargs):
44 filter_kwargs = {}
45
46 # Get FilterSet kwargs
47 filterset_class = getattr(_type._meta, 'filterset_class', None)
48 if filterset_class:
49 for filter_name, filter_field in filterset_class.get_filters().items():
50 field_type = get_graphene_type(type(filter_field))
51 filter_kwargs[filter_name] = graphene.Argument(field_type)
52
53 super().__init__(_type, args=filter_kwargs, *args, **kwargs)
54
55 @staticmethod
56 def list_resolver(django_object_type, resolver, default_manager, root, info, **args):
57 queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)
58
59 # Instantiate and apply the FilterSet, if defined
60 filterset_class = django_object_type._meta.filterset_class
61 if filterset_class:
62 filterset = filterset_class(data=args, queryset=queryset, request=info.context)
63 return filterset.qs
64
65 return queryset
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py
--- a/netbox/netbox/graphql/fields.py
+++ b/netbox/netbox/graphql/fields.py
@@ -60,6 +60,8 @@
filterset_class = django_object_type._meta.filterset_class
if filterset_class:
filterset = filterset_class(data=args, queryset=queryset, request=info.context)
+ if not filterset.is_valid():
+ return queryset.none()
return filterset.qs
return queryset
|
{"golden_diff": "diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py\n--- a/netbox/netbox/graphql/fields.py\n+++ b/netbox/netbox/graphql/fields.py\n@@ -60,6 +60,8 @@\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n+ if not filterset.is_valid():\n+ return queryset.none()\n return filterset.qs\n \n return queryset\n", "issue": "GraphQL returns all objects if tag doesn't exist\n### NetBox version\n\nv3.4.2\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. query GraphQL for device/interface with a tag that doesn't exist\n\n### Expected Behavior\n\nResulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.\n\n### Observed Behavior\n\nAll devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.\r\n\r\nExample below\r\n\r\nquery:\r\n`{\r\n device_list(tag: \"blablalba\") {\r\n id\r\n name\r\n tags {\r\n name\r\n }\r\n }\r\n interface_list(tag: \"not-ipmi_exporter\") {\r\n id\r\n device {\r\n id\r\n }\r\n tags {\r\n name\r\n }\r\n }\r\n}\r\n`\r\n\r\nresult:\r\n`{\r\n \"data\": {\r\n \"device_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"name\": \"device-A\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"name\": \"device-B\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n }\r\n ],\r\n \"interface_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"device\": {\r\n \"id\": \"1\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"device\": {\r\n \"id\": \"2\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n}`\n", "before_files": [{"content": "from functools import partial\n\nimport graphene\nfrom graphene_django import DjangoListField\n\nfrom .utils import get_graphene_type\n\n__all__ = (\n 'ObjectField',\n 'ObjectListField',\n)\n\n\nclass ObjectField(graphene.Field):\n \"\"\"\n Retrieve a single object, identified by its numeric ID.\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n if 'id' not in kwargs:\n kwargs['id'] = graphene.Int(required=True)\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def object_resolver(django_object_type, root, info, **args):\n \"\"\"\n Return an object given its numeric ID.\n \"\"\"\n manager = django_object_type._meta.model._default_manager\n queryset = django_object_type.get_queryset(manager, info)\n\n return queryset.get(**args)\n\n def get_resolver(self, parent_resolver):\n return partial(self.object_resolver, self._type)\n\n\nclass ObjectListField(DjangoListField):\n \"\"\"\n Retrieve a list of objects, optionally filtered by one or more FilterSet filters.\n \"\"\"\n def __init__(self, _type, *args, **kwargs):\n filter_kwargs = {}\n\n # Get FilterSet kwargs\n filterset_class = getattr(_type._meta, 'filterset_class', None)\n if filterset_class:\n for filter_name, filter_field in filterset_class.get_filters().items():\n field_type = get_graphene_type(type(filter_field))\n filter_kwargs[filter_name] = graphene.Argument(field_type)\n\n super().__init__(_type, args=filter_kwargs, *args, **kwargs)\n\n @staticmethod\n def list_resolver(django_object_type, resolver, default_manager, root, info, **args):\n queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)\n\n # Instantiate and apply the FilterSet, if defined\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n return filterset.qs\n\n return queryset\n", "path": "netbox/netbox/graphql/fields.py"}], "after_files": [{"content": "from functools import partial\n\nimport graphene\nfrom graphene_django import DjangoListField\n\nfrom .utils import get_graphene_type\n\n__all__ = (\n 'ObjectField',\n 'ObjectListField',\n)\n\n\nclass ObjectField(graphene.Field):\n \"\"\"\n Retrieve a single object, identified by its numeric ID.\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n if 'id' not in kwargs:\n kwargs['id'] = graphene.Int(required=True)\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def object_resolver(django_object_type, root, info, **args):\n \"\"\"\n Return an object given its numeric ID.\n \"\"\"\n manager = django_object_type._meta.model._default_manager\n queryset = django_object_type.get_queryset(manager, info)\n\n return queryset.get(**args)\n\n def get_resolver(self, parent_resolver):\n return partial(self.object_resolver, self._type)\n\n\nclass ObjectListField(DjangoListField):\n \"\"\"\n Retrieve a list of objects, optionally filtered by one or more FilterSet filters.\n \"\"\"\n def __init__(self, _type, *args, **kwargs):\n filter_kwargs = {}\n\n # Get FilterSet kwargs\n filterset_class = getattr(_type._meta, 'filterset_class', None)\n if filterset_class:\n for filter_name, filter_field in filterset_class.get_filters().items():\n field_type = get_graphene_type(type(filter_field))\n filter_kwargs[filter_name] = graphene.Argument(field_type)\n\n super().__init__(_type, args=filter_kwargs, *args, **kwargs)\n\n @staticmethod\n def list_resolver(django_object_type, resolver, default_manager, root, info, **args):\n queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)\n\n # Instantiate and apply the FilterSet, if defined\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n if not filterset.is_valid():\n return queryset.none()\n return filterset.qs\n\n return queryset\n", "path": "netbox/netbox/graphql/fields.py"}]}
| 1,245 | 121 |
gh_patches_debug_2601
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-2126
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Community Roles: Date range validation
Currently, an end date earlier than start date is allowed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/communityroles/forms.py`
Content:
```
1 from collections import defaultdict
2 from typing import Any, Optional
3
4 from django import forms
5 from django.core.exceptions import ObjectDoesNotExist, ValidationError
6
7 from workshops.fields import HeavySelect2Widget, ModelSelect2Widget
8 from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin
9
10 from .models import CommunityRole, CommunityRoleConfig
11
12
13 class CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):
14 class Meta:
15 model = CommunityRole
16 fields = (
17 "config",
18 "person",
19 "award",
20 "start",
21 "end",
22 "inactivation",
23 "membership",
24 "url",
25 "generic_relation_content_type",
26 "generic_relation_pk",
27 )
28 widgets = {
29 "config": HeavySelect2Widget(
30 data_view="api:communityroleconfig-list", attrs=SELECT2_SIDEBAR
31 ),
32 "person": ModelSelect2Widget(
33 data_view="person-lookup", attrs=SELECT2_SIDEBAR
34 ),
35 "award": ModelSelect2Widget(
36 data_view="award-lookup", attrs=SELECT2_SIDEBAR
37 ),
38 "membership": ModelSelect2Widget(
39 data_view="membership-lookup", attrs=SELECT2_SIDEBAR
40 ),
41 "generic_relation_content_type": forms.Select(
42 # "disabled" means the browsers will not send the field during POST.
43 # See how it's handled in `clean()` method below.
44 attrs={"disabled": ""},
45 ),
46 "generic_relation_pk": HeavySelect2Widget(
47 data_view="generic-object-lookup", attrs=SELECT2_SIDEBAR
48 ),
49 }
50 labels = {
51 "generic_relation_content_type": "Generic relation object type",
52 "generic_relation_pk": "Generic relation object",
53 }
54
55 class Media:
56 js = ("communityrole_form.js",)
57
58 def __init__(self, *args, **kwargs):
59 form_tag = kwargs.pop("form_tag", True)
60 super().__init__(*args, **kwargs)
61 bootstrap_kwargs = {
62 "add_cancel_button": False,
63 "form_tag": form_tag,
64 }
65 self.helper = BootstrapHelper(**bootstrap_kwargs)
66
67 def clean(self) -> dict[str, Any]:
68 """Validate form according to rules set up in related Community Role
69 configuration."""
70 cleaned_data = super().clean()
71 errors: defaultdict[str, list[ValidationError]] = defaultdict(list)
72 config: Optional[CommunityRoleConfig] = cleaned_data.get("config")
73
74 # Config is required, but field validation for 'config' should raise
75 # validation error first.
76 if not config:
77 return cleaned_data
78
79 # Award required?
80 if config.link_to_award and not cleaned_data.get("award"):
81 errors["award"].append(
82 ValidationError(f"Award is required with community role {config}")
83 )
84
85 # Specific award badge required?
86 if (badge := config.award_badge_limit) and (award := cleaned_data.get("award")):
87 if award.badge != badge:
88 errors["award"].append(
89 ValidationError(
90 f"Award badge must be {badge} for community role {config}"
91 )
92 )
93
94 # Membership required?
95 if config.link_to_membership and not cleaned_data.get("membership"):
96 errors["membership"].append(
97 ValidationError(f"Membership is required with community role {config}")
98 )
99
100 # Additional URL supported?
101 if not config.additional_url and cleaned_data.get("url"):
102 errors["url"].append(
103 ValidationError(f"URL is not supported for community role {config}")
104 )
105
106 # Widget for `generic_relation_content_type` is disabled in HTML, which
107 # makes browsers not send it. The code below sets the default value to
108 # the same value as in related config.
109 generic_relation_content_type = config.generic_relation_content_type
110
111 # Generic relation object must exist
112 if config.generic_relation_content_type and generic_relation_content_type:
113 model_class = generic_relation_content_type.model_class()
114 try:
115 model_class._base_manager.get(
116 pk=cleaned_data.get("generic_relation_pk")
117 )
118 except ObjectDoesNotExist:
119 errors["generic_relation_pk"].append(
120 ValidationError(
121 f"Generic relation object of model {model_class.__name__} "
122 "doesn't exist"
123 )
124 )
125
126 if errors:
127 raise ValidationError(errors)
128
129 return cleaned_data
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py
--- a/amy/communityroles/forms.py
+++ b/amy/communityroles/forms.py
@@ -127,3 +127,11 @@
raise ValidationError(errors)
return cleaned_data
+
+ def clean_end(self):
+ """Validate that end >= start"""
+ start = self.cleaned_data.get("start")
+ end = self.cleaned_data.get("end")
+ if start and end and end < start:
+ raise ValidationError("Must not be earlier than start date.")
+ return end
|
{"golden_diff": "diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py\n--- a/amy/communityroles/forms.py\n+++ b/amy/communityroles/forms.py\n@@ -127,3 +127,11 @@\n raise ValidationError(errors)\n \n return cleaned_data\n+\n+ def clean_end(self):\n+ \"\"\"Validate that end >= start\"\"\"\n+ start = self.cleaned_data.get(\"start\")\n+ end = self.cleaned_data.get(\"end\")\n+ if start and end and end < start:\n+ raise ValidationError(\"Must not be earlier than start date.\")\n+ return end\n", "issue": "Community Roles: Date range validation\nCurrently, an end date earlier than start date is allowed. \n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import Any, Optional\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\n\nfrom .models import CommunityRole, CommunityRoleConfig\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> dict[str, Any]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not cleaned_data.get(\"award\"):\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and (award := cleaned_data.get(\"award\")):\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n if errors:\n raise ValidationError(errors)\n\n return cleaned_data\n", "path": "amy/communityroles/forms.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom typing import Any, Optional\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\n\nfrom .models import CommunityRole, CommunityRoleConfig\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> dict[str, Any]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not cleaned_data.get(\"award\"):\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and (award := cleaned_data.get(\"award\")):\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n if errors:\n raise ValidationError(errors)\n\n return cleaned_data\n\n def clean_end(self):\n \"\"\"Validate that end >= start\"\"\"\n start = self.cleaned_data.get(\"start\")\n end = self.cleaned_data.get(\"end\")\n if start and end and end < start:\n raise ValidationError(\"Must not be earlier than start date.\")\n return end\n", "path": "amy/communityroles/forms.py"}]}
| 1,491 | 130 |
gh_patches_debug_9438
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-945
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[AC] AC not showing up with a "decorated" trigger
Note: check if only DYNAMIC_COMPLETIONS are affected.
When I supplement the completion by typing more characters, the completion widget goes away.

<hr>
I don't get a completion widget at all sometimes. In the gif below I expect to at least see a completion for the "time" variable:

I'm guessing this is due to incomplete completions. I'm not sure what the dynamic is here.
_Originally posted by @rwols in https://github.com/sublimelsp/LSP/pull/866#issuecomment-605441312_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/completion.py`
Content:
```
1 import sublime
2 import sublime_plugin
3
4 from .core.configurations import is_supported_syntax
5 from .core.edit import parse_text_edit
6 from .core.logging import debug
7 from .core.protocol import Request, Range, InsertTextFormat
8 from .core.registry import session_for_view, client_from_session, LSPViewEventListener
9 from .core.sessions import Session
10 from .core.settings import settings, client_configs
11 from .core.typing import Any, List, Dict, Optional, Union
12 from .core.views import range_to_region
13 from .core.views import text_document_position_params
14
15
16 completion_kinds = {
17 1: (sublime.KIND_ID_MARKUP, "Ξ", "Text"),
18 2: (sublime.KIND_ID_FUNCTION, "λ", "Method"),
19 3: (sublime.KIND_ID_FUNCTION, "λ", "Function"),
20 4: (sublime.KIND_ID_FUNCTION, "c", "Constructor"),
21 5: (sublime.KIND_ID_VARIABLE, "f", "Field"),
22 6: (sublime.KIND_ID_VARIABLE, "v", "Variable"),
23 7: (sublime.KIND_ID_TYPE, "c", "Class"),
24 8: (sublime.KIND_ID_TYPE, "i", "Interface"),
25 9: (sublime.KIND_ID_NAMESPACE, "◪", "Module"),
26 10: (sublime.KIND_ID_VARIABLE, "ρ", "Property"),
27 11: (sublime.KIND_ID_VARIABLE, "u", "Unit"),
28 12: (sublime.KIND_ID_VARIABLE, "ν", "Value"),
29 13: (sublime.KIND_ID_TYPE, "ε", "Enum"),
30 14: (sublime.KIND_ID_KEYWORD, "κ", "Keyword"),
31 15: (sublime.KIND_ID_SNIPPET, "s", "Snippet"),
32 16: (sublime.KIND_ID_AMBIGUOUS, "c", "Color"),
33 17: (sublime.KIND_ID_AMBIGUOUS, "#", "File"),
34 18: (sublime.KIND_ID_AMBIGUOUS, "⇢", "Reference"),
35 19: (sublime.KIND_ID_AMBIGUOUS, "ƒ", "Folder"),
36 20: (sublime.KIND_ID_TYPE, "ε", "EnumMember"),
37 21: (sublime.KIND_ID_VARIABLE, "π", "Constant"),
38 22: (sublime.KIND_ID_TYPE, "s", "Struct"),
39 23: (sublime.KIND_ID_FUNCTION, "e", "Event"),
40 24: (sublime.KIND_ID_KEYWORD, "ο", "Operator"),
41 25: (sublime.KIND_ID_TYPE, "τ", "Type Parameter")
42 }
43
44
45 class RestoreLines:
46 def __init__(self) -> None:
47 self.saved_lines = [] # type: List[dict]
48
49 def save_lines(self, locations: List[int], view: sublime.View) -> None:
50 change_id = view.change_id()
51
52 for point in locations:
53 line = view.line(point)
54 change_region = (line.begin(), line.end())
55 text = view.substr(line)
56
57 self.saved_lines.append({
58 "change_id": change_id,
59 "change_region": change_region,
60 "text": text,
61 # cursor will be use retore the cursor the te exact position
62 "cursor": point
63 })
64
65 def to_dict(self) -> Dict[str, Any]:
66 return {
67 "saved_lines": self.saved_lines
68 }
69
70 @staticmethod
71 def from_dict(dictionary: Dict[str, Any]) -> 'RestoreLines':
72 restore_lines = RestoreLines()
73 restore_lines.saved_lines = dictionary["saved_lines"]
74 return restore_lines
75
76 def restore_lines(self, edit: sublime.Edit, view: sublime.View) -> None:
77 # restore lines contents
78 # insert back lines from the bottom to top
79 for saved_line in reversed(self.saved_lines):
80 change_id = saved_line['change_id']
81 begin, end = saved_line['change_region']
82 change_region = sublime.Region(begin, end)
83
84 transform_region = view.transform_region_from(change_region, change_id)
85 view.erase(edit, transform_region)
86 view.insert(edit, transform_region.begin(), saved_line['text'])
87
88 # restore old cursor position
89 view.sel().clear()
90 for saved_line in self.saved_lines:
91 view.sel().add(saved_line["cursor"])
92
93
94 def format_completion(item: dict, restore_lines: RestoreLines) -> sublime.CompletionItem:
95 kind = sublime.KIND_AMBIGUOUS
96
97 item_kind = item.get("kind")
98 if item_kind:
99 kind = completion_kinds.get(item_kind, sublime.KIND_AMBIGUOUS)
100
101 is_deprecated = item.get("deprecated", False)
102 if is_deprecated:
103 list_kind = list(kind)
104 list_kind[1] = '⚠'
105 list_kind[2] = "⚠ {} - Deprecated".format(list_kind[2])
106 kind = tuple(list_kind) # type: ignore
107
108 return sublime.CompletionItem.command_completion(
109 trigger=item["label"],
110 command="lsp_select_completion_item",
111 args={
112 "item": item,
113 "restore_lines_dict": restore_lines.to_dict()
114 },
115 annotation=item.get('detail') or "",
116 kind=kind
117 )
118
119
120 class LspSelectCompletionItemCommand(sublime_plugin.TextCommand):
121 def run(self, edit: sublime.Edit, item: Any, restore_lines_dict: dict) -> None:
122 insert_text_format = item.get("insertTextFormat")
123
124 text_edit = item.get('textEdit')
125 if text_edit:
126 # restore the lines
127 # so we don't have to calculate the offset for the textEdit range
128 restore_lines = RestoreLines.from_dict(restore_lines_dict)
129 restore_lines.restore_lines(edit, self.view)
130
131 new_text = text_edit.get('newText')
132
133 range = Range.from_lsp(text_edit['range'])
134 edit_region = range_to_region(range, self.view)
135
136 # calculate offset by comparing cursor position with edit_region.begin.
137 # by applying the offset to all selections
138 # the TextEdit becomes valid for all selections
139 cursor = self.view.sel()[0].begin() # type: int
140
141 offset_start = cursor - edit_region.begin()
142 offset_length = edit_region.end() - edit_region.begin()
143
144 # erease regions from bottom to top
145 for sel in reversed(self.view.sel()):
146 begin = sel.begin() - offset_start
147 end = begin + offset_length
148 r = sublime.Region(begin, end)
149 self.view.erase(edit, r)
150
151 if insert_text_format == InsertTextFormat.Snippet:
152 self.view.run_command("insert_snippet", {"contents": new_text})
153 else:
154 # insert text from bottom to top
155 for sel in reversed(self.view.sel()):
156 self.view.insert(edit, sel.begin(), new_text)
157 else:
158 completion = item.get('insertText') or item.get('label') or ""
159 if insert_text_format == InsertTextFormat.Snippet:
160 self.view.run_command("insert_snippet", {"contents": completion})
161 else:
162 for sel in self.view.sel():
163 self.view.insert(edit, sel.begin(), completion)
164
165 # import statements, etc. some servers only return these after a resolve.
166 additional_edits = item.get('additionalTextEdits')
167 if additional_edits:
168 self.apply_additional_edits(additional_edits)
169 else:
170 self.do_resolve(item)
171
172 def do_resolve(self, item: dict) -> None:
173 session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())
174 if not session:
175 return
176
177 client = client_from_session(session)
178 if not client:
179 return
180
181 completion_provider = session.get_capability('completionProvider')
182 has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)
183 if has_resolve_provider:
184 client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)
185
186 def handle_resolve_response(self, response: Optional[dict]) -> None:
187 if response:
188 additional_edits = response.get('additionalTextEdits')
189 if additional_edits:
190 self.apply_additional_edits(additional_edits)
191
192 def apply_additional_edits(self, additional_edits: List[dict]) -> None:
193 edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)
194 debug('applying additional edits:', edits)
195 self.view.run_command("lsp_apply_document_edit", {'changes': edits})
196 sublime.status_message('Applied additional edits for completion')
197
198
199 class CompletionHandler(LSPViewEventListener):
200 def __init__(self, view: sublime.View) -> None:
201 super().__init__(view)
202 self.initialized = False
203 self.enabled = False
204
205 @classmethod
206 def is_applicable(cls, view_settings: dict) -> bool:
207 if 'completion' in settings.disabled_capabilities:
208 return False
209
210 syntax = view_settings.get('syntax')
211 return is_supported_syntax(syntax, client_configs.all) if syntax else False
212
213 def initialize(self) -> None:
214 self.initialized = True
215 session = session_for_view(self.view, 'completionProvider')
216 if session:
217 completionProvider = session.get_capability('completionProvider') or dict() # type: dict
218 # A language server may have an empty dict as CompletionOptions. In that case,
219 # no trigger characters will be registered but we'll still respond to Sublime's
220 # usual query for completions. So the explicit check for None is necessary.
221 self.enabled = True
222
223 trigger_chars = completionProvider.get(
224 'triggerCharacters') or []
225 if trigger_chars:
226 self.register_trigger_chars(session, trigger_chars)
227
228 def _view_language(self, config_name: str) -> Optional[str]:
229 languages = self.view.settings().get('lsp_language')
230 return languages.get(config_name) if languages else None
231
232 def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:
233 completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]
234 view_language = self._view_language(session.config.name)
235 if view_language:
236 for language in session.config.languages:
237 if language.id == view_language:
238 for scope in language.scopes:
239 # debug("registering", trigger_chars, "for", scope)
240 scope_trigger = next(
241 (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
242 None
243 )
244 if not scope_trigger: # do not override user's trigger settings.
245 completion_triggers.append({
246 'characters': "".join(trigger_chars),
247 'selector': scope
248 })
249
250 self.view.settings().set('auto_complete_triggers', completion_triggers)
251
252 def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
253 if not self.initialized:
254 self.initialize()
255 if not self.enabled:
256 return None
257 client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))
258 if not client:
259 return None
260 restore_lines = RestoreLines()
261 restore_lines.save_lines(locations, self.view)
262 self.manager.documents.purge_changes(self.view)
263 completion_list = sublime.CompletionList()
264 client.send_request(
265 Request.complete(text_document_position_params(self.view, locations[0])),
266 lambda res: self.handle_response(res, completion_list, restore_lines),
267 lambda res: self.handle_error(res, completion_list))
268 return completion_list
269
270 def handle_response(self, response: Optional[Union[dict, List]],
271 completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:
272 response_items = [] # type: List[Dict]
273 incomplete = False
274 if isinstance(response, dict):
275 response_items = response["items"] or []
276 incomplete = response.get("isIncomplete", False)
277 elif isinstance(response, list):
278 response_items = response
279 response_items = sorted(response_items, key=lambda item: item.get("sortText") or item["label"])
280
281 flags = 0
282 if settings.only_show_lsp_completions:
283 flags |= sublime.INHIBIT_WORD_COMPLETIONS
284 flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
285
286 if incomplete:
287 flags |= sublime.DYNAMIC_COMPLETIONS
288 completion_list.set_completions([format_completion(i, restore_lines) for i in response_items], flags)
289
290 def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:
291 completion_list.set_completions([])
292 sublime.status_message('Completion error: ' + str(error.get('message')))
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -224,6 +224,8 @@
'triggerCharacters') or []
if trigger_chars:
self.register_trigger_chars(session, trigger_chars)
+ # This is to make ST match with labels that have a weird prefix like a space character.
+ self.view.settings().set("auto_complete_preserve_order", "none")
def _view_language(self, config_name: str) -> Optional[str]:
languages = self.view.settings().get('lsp_language')
|
{"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -224,6 +224,8 @@\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n+ # This is to make ST match with labels that have a weird prefix like a space character.\n+ self.view.settings().set(\"auto_complete_preserve_order\", \"none\")\n \n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n", "issue": "[AC] AC not showing up with a \"decorated\" trigger\nNote: check if only DYNAMIC_COMPLETIONS are affected.\r\n\r\nWhen I supplement the completion by typing more characters, the completion widget goes away.\r\n\r\n\r\n<hr>\r\nI don't get a completion widget at all sometimes. In the gif below I expect to at least see a completion for the \"time\" variable:\r\n\r\n\r\n\r\nI'm guessing this is due to incomplete completions. I'm not sure what the dynamic is here.\r\n\r\n_Originally posted by @rwols in https://github.com/sublimelsp/LSP/pull/866#issuecomment-605441312_\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.edit import parse_text_edit\nfrom .core.logging import debug\nfrom .core.protocol import Request, Range, InsertTextFormat\nfrom .core.registry import session_for_view, client_from_session, LSPViewEventListener\nfrom .core.sessions import Session\nfrom .core.settings import settings, client_configs\nfrom .core.typing import Any, List, Dict, Optional, Union\nfrom .core.views import range_to_region\nfrom .core.views import text_document_position_params\n\n\ncompletion_kinds = {\n 1: (sublime.KIND_ID_MARKUP, \"\u039e\", \"Text\"),\n 2: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Method\"),\n 3: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Function\"),\n 4: (sublime.KIND_ID_FUNCTION, \"c\", \"Constructor\"),\n 5: (sublime.KIND_ID_VARIABLE, \"f\", \"Field\"),\n 6: (sublime.KIND_ID_VARIABLE, \"v\", \"Variable\"),\n 7: (sublime.KIND_ID_TYPE, \"c\", \"Class\"),\n 8: (sublime.KIND_ID_TYPE, \"i\", \"Interface\"),\n 9: (sublime.KIND_ID_NAMESPACE, \"\u25ea\", \"Module\"),\n 10: (sublime.KIND_ID_VARIABLE, \"\u03c1\", \"Property\"),\n 11: (sublime.KIND_ID_VARIABLE, \"u\", \"Unit\"),\n 12: (sublime.KIND_ID_VARIABLE, \"\u03bd\", \"Value\"),\n 13: (sublime.KIND_ID_TYPE, \"\u03b5\", \"Enum\"),\n 14: (sublime.KIND_ID_KEYWORD, \"\u03ba\", \"Keyword\"),\n 15: (sublime.KIND_ID_SNIPPET, \"s\", \"Snippet\"),\n 16: (sublime.KIND_ID_AMBIGUOUS, \"c\", \"Color\"),\n 17: (sublime.KIND_ID_AMBIGUOUS, \"#\", \"File\"),\n 18: (sublime.KIND_ID_AMBIGUOUS, \"\u21e2\", \"Reference\"),\n 19: (sublime.KIND_ID_AMBIGUOUS, \"\u0192\", \"Folder\"),\n 20: (sublime.KIND_ID_TYPE, \"\u03b5\", \"EnumMember\"),\n 21: (sublime.KIND_ID_VARIABLE, \"\u03c0\", \"Constant\"),\n 22: (sublime.KIND_ID_TYPE, \"s\", \"Struct\"),\n 23: (sublime.KIND_ID_FUNCTION, \"e\", \"Event\"),\n 24: (sublime.KIND_ID_KEYWORD, \"\u03bf\", \"Operator\"),\n 25: (sublime.KIND_ID_TYPE, \"\u03c4\", \"Type Parameter\")\n}\n\n\nclass RestoreLines:\n def __init__(self) -> None:\n self.saved_lines = [] # type: List[dict]\n\n def save_lines(self, locations: List[int], view: sublime.View) -> None:\n change_id = view.change_id()\n\n for point in locations:\n line = view.line(point)\n change_region = (line.begin(), line.end())\n text = view.substr(line)\n\n self.saved_lines.append({\n \"change_id\": change_id,\n \"change_region\": change_region,\n \"text\": text,\n # cursor will be use retore the cursor the te exact position\n \"cursor\": point\n })\n\n def to_dict(self) -> Dict[str, Any]:\n return {\n \"saved_lines\": self.saved_lines\n }\n\n @staticmethod\n def from_dict(dictionary: Dict[str, Any]) -> 'RestoreLines':\n restore_lines = RestoreLines()\n restore_lines.saved_lines = dictionary[\"saved_lines\"]\n return restore_lines\n\n def restore_lines(self, edit: sublime.Edit, view: sublime.View) -> None:\n # restore lines contents\n # insert back lines from the bottom to top\n for saved_line in reversed(self.saved_lines):\n change_id = saved_line['change_id']\n begin, end = saved_line['change_region']\n change_region = sublime.Region(begin, end)\n\n transform_region = view.transform_region_from(change_region, change_id)\n view.erase(edit, transform_region)\n view.insert(edit, transform_region.begin(), saved_line['text'])\n\n # restore old cursor position\n view.sel().clear()\n for saved_line in self.saved_lines:\n view.sel().add(saved_line[\"cursor\"])\n\n\ndef format_completion(item: dict, restore_lines: RestoreLines) -> sublime.CompletionItem:\n kind = sublime.KIND_AMBIGUOUS\n\n item_kind = item.get(\"kind\")\n if item_kind:\n kind = completion_kinds.get(item_kind, sublime.KIND_AMBIGUOUS)\n\n is_deprecated = item.get(\"deprecated\", False)\n if is_deprecated:\n list_kind = list(kind)\n list_kind[1] = '\u26a0'\n list_kind[2] = \"\u26a0 {} - Deprecated\".format(list_kind[2])\n kind = tuple(list_kind) # type: ignore\n\n return sublime.CompletionItem.command_completion(\n trigger=item[\"label\"],\n command=\"lsp_select_completion_item\",\n args={\n \"item\": item,\n \"restore_lines_dict\": restore_lines.to_dict()\n },\n annotation=item.get('detail') or \"\",\n kind=kind\n )\n\n\nclass LspSelectCompletionItemCommand(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit, item: Any, restore_lines_dict: dict) -> None:\n insert_text_format = item.get(\"insertTextFormat\")\n\n text_edit = item.get('textEdit')\n if text_edit:\n # restore the lines\n # so we don't have to calculate the offset for the textEdit range\n restore_lines = RestoreLines.from_dict(restore_lines_dict)\n restore_lines.restore_lines(edit, self.view)\n\n new_text = text_edit.get('newText')\n\n range = Range.from_lsp(text_edit['range'])\n edit_region = range_to_region(range, self.view)\n\n # calculate offset by comparing cursor position with edit_region.begin.\n # by applying the offset to all selections\n # the TextEdit becomes valid for all selections\n cursor = self.view.sel()[0].begin() # type: int\n\n offset_start = cursor - edit_region.begin()\n offset_length = edit_region.end() - edit_region.begin()\n\n # erease regions from bottom to top\n for sel in reversed(self.view.sel()):\n begin = sel.begin() - offset_start\n end = begin + offset_length\n r = sublime.Region(begin, end)\n self.view.erase(edit, r)\n\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n # insert text from bottom to top\n for sel in reversed(self.view.sel()):\n self.view.insert(edit, sel.begin(), new_text)\n else:\n completion = item.get('insertText') or item.get('label') or \"\"\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": completion})\n else:\n for sel in self.view.sel():\n self.view.insert(edit, sel.begin(), completion)\n\n # import statements, etc. some servers only return these after a resolve.\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n else:\n self.do_resolve(item)\n\n def do_resolve(self, item: dict) -> None:\n session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())\n if not session:\n return\n\n client = client_from_session(session)\n if not client:\n return\n\n completion_provider = session.get_capability('completionProvider')\n has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)\n if has_resolve_provider:\n client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n\n def handle_resolve_response(self, response: Optional[dict]) -> None:\n if response:\n additional_edits = response.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n\n def apply_additional_edits(self, additional_edits: List[dict]) -> None:\n edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)\n debug('applying additional edits:', edits)\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n sublime.status_message('Applied additional edits for completion')\n\n\nclass CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'completion' in settings.disabled_capabilities:\n return False\n\n syntax = view_settings.get('syntax')\n return is_supported_syntax(syntax, client_configs.all) if syntax else False\n\n def initialize(self) -> None:\n self.initialized = True\n session = session_for_view(self.view, 'completionProvider')\n if session:\n completionProvider = session.get_capability('completionProvider') or dict() # type: dict\n # A language server may have an empty dict as CompletionOptions. In that case,\n # no trigger characters will be registered but we'll still respond to Sublime's\n # usual query for completions. So the explicit check for None is necessary.\n self.enabled = True\n\n trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n\n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if not scope_trigger: # do not override user's trigger settings.\n completion_triggers.append({\n 'characters': \"\".join(trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n self.initialize()\n if not self.enabled:\n return None\n client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))\n if not client:\n return None\n restore_lines = RestoreLines()\n restore_lines.save_lines(locations, self.view)\n self.manager.documents.purge_changes(self.view)\n completion_list = sublime.CompletionList()\n client.send_request(\n Request.complete(text_document_position_params(self.view, locations[0])),\n lambda res: self.handle_response(res, completion_list, restore_lines),\n lambda res: self.handle_error(res, completion_list))\n return completion_list\n\n def handle_response(self, response: Optional[Union[dict, List]],\n completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:\n response_items = [] # type: List[Dict]\n incomplete = False\n if isinstance(response, dict):\n response_items = response[\"items\"] or []\n incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n response_items = response\n response_items = sorted(response_items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n\n flags = 0\n if settings.only_show_lsp_completions:\n flags |= sublime.INHIBIT_WORD_COMPLETIONS\n flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n if incomplete:\n flags |= sublime.DYNAMIC_COMPLETIONS\n completion_list.set_completions([format_completion(i, restore_lines) for i in response_items], flags)\n\n def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:\n completion_list.set_completions([])\n sublime.status_message('Completion error: ' + str(error.get('message')))\n", "path": "plugin/completion.py"}], "after_files": [{"content": "import sublime\nimport sublime_plugin\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.edit import parse_text_edit\nfrom .core.logging import debug\nfrom .core.protocol import Request, Range, InsertTextFormat\nfrom .core.registry import session_for_view, client_from_session, LSPViewEventListener\nfrom .core.sessions import Session\nfrom .core.settings import settings, client_configs\nfrom .core.typing import Any, List, Dict, Optional, Union\nfrom .core.views import range_to_region\nfrom .core.views import text_document_position_params\n\n\ncompletion_kinds = {\n 1: (sublime.KIND_ID_MARKUP, \"\u039e\", \"Text\"),\n 2: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Method\"),\n 3: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Function\"),\n 4: (sublime.KIND_ID_FUNCTION, \"c\", \"Constructor\"),\n 5: (sublime.KIND_ID_VARIABLE, \"f\", \"Field\"),\n 6: (sublime.KIND_ID_VARIABLE, \"v\", \"Variable\"),\n 7: (sublime.KIND_ID_TYPE, \"c\", \"Class\"),\n 8: (sublime.KIND_ID_TYPE, \"i\", \"Interface\"),\n 9: (sublime.KIND_ID_NAMESPACE, \"\u25ea\", \"Module\"),\n 10: (sublime.KIND_ID_VARIABLE, \"\u03c1\", \"Property\"),\n 11: (sublime.KIND_ID_VARIABLE, \"u\", \"Unit\"),\n 12: (sublime.KIND_ID_VARIABLE, \"\u03bd\", \"Value\"),\n 13: (sublime.KIND_ID_TYPE, \"\u03b5\", \"Enum\"),\n 14: (sublime.KIND_ID_KEYWORD, \"\u03ba\", \"Keyword\"),\n 15: (sublime.KIND_ID_SNIPPET, \"s\", \"Snippet\"),\n 16: (sublime.KIND_ID_AMBIGUOUS, \"c\", \"Color\"),\n 17: (sublime.KIND_ID_AMBIGUOUS, \"#\", \"File\"),\n 18: (sublime.KIND_ID_AMBIGUOUS, \"\u21e2\", \"Reference\"),\n 19: (sublime.KIND_ID_AMBIGUOUS, \"\u0192\", \"Folder\"),\n 20: (sublime.KIND_ID_TYPE, \"\u03b5\", \"EnumMember\"),\n 21: (sublime.KIND_ID_VARIABLE, \"\u03c0\", \"Constant\"),\n 22: (sublime.KIND_ID_TYPE, \"s\", \"Struct\"),\n 23: (sublime.KIND_ID_FUNCTION, \"e\", \"Event\"),\n 24: (sublime.KIND_ID_KEYWORD, \"\u03bf\", \"Operator\"),\n 25: (sublime.KIND_ID_TYPE, \"\u03c4\", \"Type Parameter\")\n}\n\n\nclass RestoreLines:\n def __init__(self) -> None:\n self.saved_lines = [] # type: List[dict]\n\n def save_lines(self, locations: List[int], view: sublime.View) -> None:\n change_id = view.change_id()\n\n for point in locations:\n line = view.line(point)\n change_region = (line.begin(), line.end())\n text = view.substr(line)\n\n self.saved_lines.append({\n \"change_id\": change_id,\n \"change_region\": change_region,\n \"text\": text,\n # cursor will be use retore the cursor the te exact position\n \"cursor\": point\n })\n\n def to_dict(self) -> Dict[str, Any]:\n return {\n \"saved_lines\": self.saved_lines\n }\n\n @staticmethod\n def from_dict(dictionary: Dict[str, Any]) -> 'RestoreLines':\n restore_lines = RestoreLines()\n restore_lines.saved_lines = dictionary[\"saved_lines\"]\n return restore_lines\n\n def restore_lines(self, edit: sublime.Edit, view: sublime.View) -> None:\n # restore lines contents\n # insert back lines from the bottom to top\n for saved_line in reversed(self.saved_lines):\n change_id = saved_line['change_id']\n begin, end = saved_line['change_region']\n change_region = sublime.Region(begin, end)\n\n transform_region = view.transform_region_from(change_region, change_id)\n view.erase(edit, transform_region)\n view.insert(edit, transform_region.begin(), saved_line['text'])\n\n # restore old cursor position\n view.sel().clear()\n for saved_line in self.saved_lines:\n view.sel().add(saved_line[\"cursor\"])\n\n\ndef format_completion(item: dict, restore_lines: RestoreLines) -> sublime.CompletionItem:\n kind = sublime.KIND_AMBIGUOUS\n\n item_kind = item.get(\"kind\")\n if item_kind:\n kind = completion_kinds.get(item_kind, sublime.KIND_AMBIGUOUS)\n\n is_deprecated = item.get(\"deprecated\", False)\n if is_deprecated:\n list_kind = list(kind)\n list_kind[1] = '\u26a0'\n list_kind[2] = \"\u26a0 {} - Deprecated\".format(list_kind[2])\n kind = tuple(list_kind) # type: ignore\n\n return sublime.CompletionItem.command_completion(\n trigger=item[\"label\"],\n command=\"lsp_select_completion_item\",\n args={\n \"item\": item,\n \"restore_lines_dict\": restore_lines.to_dict()\n },\n annotation=item.get('detail') or \"\",\n kind=kind\n )\n\n\nclass LspSelectCompletionItemCommand(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit, item: Any, restore_lines_dict: dict) -> None:\n insert_text_format = item.get(\"insertTextFormat\")\n\n text_edit = item.get('textEdit')\n if text_edit:\n # restore the lines\n # so we don't have to calculate the offset for the textEdit range\n restore_lines = RestoreLines.from_dict(restore_lines_dict)\n restore_lines.restore_lines(edit, self.view)\n\n new_text = text_edit.get('newText')\n\n range = Range.from_lsp(text_edit['range'])\n edit_region = range_to_region(range, self.view)\n\n # calculate offset by comparing cursor position with edit_region.begin.\n # by applying the offset to all selections\n # the TextEdit becomes valid for all selections\n cursor = self.view.sel()[0].begin() # type: int\n\n offset_start = cursor - edit_region.begin()\n offset_length = edit_region.end() - edit_region.begin()\n\n # erease regions from bottom to top\n for sel in reversed(self.view.sel()):\n begin = sel.begin() - offset_start\n end = begin + offset_length\n r = sublime.Region(begin, end)\n self.view.erase(edit, r)\n\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n # insert text from bottom to top\n for sel in reversed(self.view.sel()):\n self.view.insert(edit, sel.begin(), new_text)\n else:\n completion = item.get('insertText') or item.get('label') or \"\"\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": completion})\n else:\n for sel in self.view.sel():\n self.view.insert(edit, sel.begin(), completion)\n\n # import statements, etc. some servers only return these after a resolve.\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n else:\n self.do_resolve(item)\n\n def do_resolve(self, item: dict) -> None:\n session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())\n if not session:\n return\n\n client = client_from_session(session)\n if not client:\n return\n\n completion_provider = session.get_capability('completionProvider')\n has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)\n if has_resolve_provider:\n client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n\n def handle_resolve_response(self, response: Optional[dict]) -> None:\n if response:\n additional_edits = response.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n\n def apply_additional_edits(self, additional_edits: List[dict]) -> None:\n edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)\n debug('applying additional edits:', edits)\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n sublime.status_message('Applied additional edits for completion')\n\n\nclass CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'completion' in settings.disabled_capabilities:\n return False\n\n syntax = view_settings.get('syntax')\n return is_supported_syntax(syntax, client_configs.all) if syntax else False\n\n def initialize(self) -> None:\n self.initialized = True\n session = session_for_view(self.view, 'completionProvider')\n if session:\n completionProvider = session.get_capability('completionProvider') or dict() # type: dict\n # A language server may have an empty dict as CompletionOptions. In that case,\n # no trigger characters will be registered but we'll still respond to Sublime's\n # usual query for completions. So the explicit check for None is necessary.\n self.enabled = True\n\n trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n # This is to make ST match with labels that have a weird prefix like a space character.\n self.view.settings().set(\"auto_complete_preserve_order\", \"none\")\n\n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if not scope_trigger: # do not override user's trigger settings.\n completion_triggers.append({\n 'characters': \"\".join(trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n self.initialize()\n if not self.enabled:\n return None\n client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))\n if not client:\n return None\n restore_lines = RestoreLines()\n restore_lines.save_lines(locations, self.view)\n self.manager.documents.purge_changes(self.view)\n completion_list = sublime.CompletionList()\n client.send_request(\n Request.complete(text_document_position_params(self.view, locations[0])),\n lambda res: self.handle_response(res, completion_list, restore_lines),\n lambda res: self.handle_error(res, completion_list))\n return completion_list\n\n def handle_response(self, response: Optional[Union[dict, List]],\n completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:\n response_items = [] # type: List[Dict]\n incomplete = False\n if isinstance(response, dict):\n response_items = response[\"items\"] or []\n incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n response_items = response\n response_items = sorted(response_items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n\n flags = 0\n if settings.only_show_lsp_completions:\n flags |= sublime.INHIBIT_WORD_COMPLETIONS\n flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n if incomplete:\n flags |= sublime.DYNAMIC_COMPLETIONS\n completion_list.set_completions([format_completion(i, restore_lines) for i in response_items], flags)\n\n def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:\n completion_list.set_completions([])\n sublime.status_message('Completion error: ' + str(error.get('message')))\n", "path": "plugin/completion.py"}]}
| 4,091 | 132 |
gh_patches_debug_12625
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-1530
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
License file in pypi archive
The pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!
Add keywords to setup.py
Testing with pyroma package showed keywords are missing from setup.py
```
willo@macbook ~/g/m/h/hypothesis-python> pyroma .
------------------------------
Checking .
Found hypothesis
------------------------------
Your package does not have keywords data.
------------------------------
Final rating: 9/10
Cottage Cheese
------------------------------
```
Discussed with @Zac-HD and keywords from the github repo below would be good to use.
python testing fuzzing property-based-testing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hypothesis-python/setup.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # (david@drmaciver.com), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import warnings
23
24 import setuptools
25
26
27 def local_file(name):
28 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
29
30
31 SOURCE = local_file('src')
32 README = local_file('README.rst')
33
34 setuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))
35
36 if setuptools_version < (36, 2):
37 # Warning only - very bad if uploading bdist but fine if installing sdist.
38 warnings.warn(
39 'This version of setuptools is too old to correctly store '
40 'conditional dependencies in binary wheels. For more info, see: '
41 'https://hynek.me/articles/conditional-python-dependencies/'
42 )
43
44
45 # Assignment to placate pyflakes. The actual version is from the exec that
46 # follows.
47 __version__ = None
48
49 with open(local_file('src/hypothesis/version.py')) as o:
50 exec(o.read())
51
52 assert __version__ is not None
53
54
55 extras = {
56 'datetime': ['pytz'],
57 'pytz': ['pytz'],
58 'dateutil': ['python-dateutil'],
59 'fakefactory': ['Faker>=0.7'],
60 'numpy': ['numpy>=1.9.0'],
61 'pytest': ['pytest>=3.0'],
62 # We only support Django versions with upstream support - see
63 # https://www.djangoproject.com/download/#supported-versions
64 'django': ['pytz', 'django>=1.11'],
65 }
66
67 extras['faker'] = extras['fakefactory']
68 extras['all'] = sorted(sum(extras.values(), []))
69
70
71 install_requires = ['attrs>=16.0.0', 'coverage>=4.0']
72 # Using an environment marker on enum34 makes the dependency condition
73 # independent of the build environemnt, which is important for wheels.
74 # https://www.python.org/dev/peps/pep-0345/#environment-markers
75 if sys.version_info[0] < 3 and setuptools_version < (8, 0):
76 # Except really old systems, where we give up and install unconditionally
77 install_requires.append('enum34')
78 else:
79 install_requires.append('enum34; python_version=="2.7"')
80
81
82 setuptools.setup(
83 name='hypothesis',
84 version=__version__,
85 author='David R. MacIver',
86 author_email='david@drmaciver.com',
87 packages=setuptools.find_packages(SOURCE),
88 package_dir={'': SOURCE},
89 package_data={'hypothesis': ['py.typed']},
90 url=(
91 'https://github.com/HypothesisWorks/hypothesis/'
92 'tree/master/hypothesis-python'
93 ),
94 license='MPL v2',
95 description='A library for property based testing',
96 zip_safe=False,
97 extras_require=extras,
98 install_requires=install_requires,
99 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
100 classifiers=[
101 'Development Status :: 5 - Production/Stable',
102 'Intended Audience :: Developers',
103 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
104 'Operating System :: Unix',
105 'Operating System :: POSIX',
106 'Operating System :: Microsoft :: Windows',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: 2.7',
109 'Programming Language :: Python :: 3',
110 'Programming Language :: Python :: 3.4',
111 'Programming Language :: Python :: 3.5',
112 'Programming Language :: Python :: 3.6',
113 'Programming Language :: Python :: 3.7',
114 'Programming Language :: Python :: Implementation :: CPython',
115 'Programming Language :: Python :: Implementation :: PyPy',
116 'Topic :: Software Development :: Testing',
117 'Framework :: Pytest',
118 ],
119 entry_points={
120 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
121 },
122 long_description=open(README).read(),
123 )
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py
--- a/hypothesis-python/setup.py
+++ b/hypothesis-python/setup.py
@@ -53,9 +53,9 @@
extras = {
- 'datetime': ['pytz'],
- 'pytz': ['pytz'],
- 'dateutil': ['python-dateutil'],
+ 'datetime': ['pytz>=2014.1'],
+ 'pytz': ['pytz>=2014.1'],
+ 'dateutil': ['python-dateutil>=1.4'],
'fakefactory': ['Faker>=0.7'],
'numpy': ['numpy>=1.9.0'],
'pytest': ['pytest>=3.0'],
@@ -120,4 +120,5 @@
'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
},
long_description=open(README).read(),
+ keywords='python testing fuzzing property-based-testing',
)
|
{"golden_diff": "diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py\n--- a/hypothesis-python/setup.py\n+++ b/hypothesis-python/setup.py\n@@ -53,9 +53,9 @@\n \n \n extras = {\n- 'datetime': ['pytz'],\n- 'pytz': ['pytz'],\n- 'dateutil': ['python-dateutil'],\n+ 'datetime': ['pytz>=2014.1'],\n+ 'pytz': ['pytz>=2014.1'],\n+ 'dateutil': ['python-dateutil>=1.4'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n@@ -120,4 +120,5 @@\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n+ keywords='python testing fuzzing property-based-testing',\n )\n", "issue": "License file in pypi archive\nThe pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!\nAdd keywords to setup.py\nTesting with pyroma package showed keywords are missing from setup.py\r\n\r\n```\r\nwillo@macbook ~/g/m/h/hypothesis-python> pyroma .\r\n------------------------------\r\nChecking .\r\nFound hypothesis\r\n------------------------------\r\nYour package does not have keywords data.\r\n------------------------------\r\nFinal rating: 9/10\r\nCottage Cheese\r\n------------------------------\r\n```\r\nDiscussed with @Zac-HD and keywords from the github repo below would be good to use.\r\n\r\npython testing fuzzing property-based-testing\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# (david@drmaciver.com), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n 'This version of setuptools is too old to correctly store '\n 'conditional dependencies in binary wheels. For more info, see: '\n 'https://hynek.me/articles/conditional-python-dependencies/'\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'dateutil': ['python-dateutil'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n 'django': ['pytz', 'django>=1.11'],\n}\n\nextras['faker'] = extras['fakefactory']\nextras['all'] = sorted(sum(extras.values(), []))\n\n\ninstall_requires = ['attrs>=16.0.0', 'coverage>=4.0']\n# Using an environment marker on enum34 makes the dependency condition\n# independent of the build environemnt, which is important for wheels.\n# https://www.python.org/dev/peps/pep-0345/#environment-markers\nif sys.version_info[0] < 3 and setuptools_version < (8, 0):\n # Except really old systems, where we give up and install unconditionally\n install_requires.append('enum34')\nelse:\n install_requires.append('enum34; python_version==\"2.7\"')\n\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='david@drmaciver.com',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n package_data={'hypothesis': ['py.typed']},\n url=(\n 'https://github.com/HypothesisWorks/hypothesis/'\n 'tree/master/hypothesis-python'\n ),\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n 'Framework :: Pytest',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "hypothesis-python/setup.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# (david@drmaciver.com), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n 'This version of setuptools is too old to correctly store '\n 'conditional dependencies in binary wheels. For more info, see: '\n 'https://hynek.me/articles/conditional-python-dependencies/'\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz>=2014.1'],\n 'pytz': ['pytz>=2014.1'],\n 'dateutil': ['python-dateutil>=1.4'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n 'django': ['pytz', 'django>=1.11'],\n}\n\nextras['faker'] = extras['fakefactory']\nextras['all'] = sorted(sum(extras.values(), []))\n\n\ninstall_requires = ['attrs>=16.0.0', 'coverage>=4.0']\n# Using an environment marker on enum34 makes the dependency condition\n# independent of the build environemnt, which is important for wheels.\n# https://www.python.org/dev/peps/pep-0345/#environment-markers\nif sys.version_info[0] < 3 and setuptools_version < (8, 0):\n # Except really old systems, where we give up and install unconditionally\n install_requires.append('enum34')\nelse:\n install_requires.append('enum34; python_version==\"2.7\"')\n\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='david@drmaciver.com',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n package_data={'hypothesis': ['py.typed']},\n url=(\n 'https://github.com/HypothesisWorks/hypothesis/'\n 'tree/master/hypothesis-python'\n ),\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n 'Framework :: Pytest',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n keywords='python testing fuzzing property-based-testing',\n)\n", "path": "hypothesis-python/setup.py"}]}
| 1,768 | 223 |
gh_patches_debug_3972
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-635
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multiple definitions of `SoxEffect`.
`SoxEffect` is defined both in
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54
and
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77
-----
-----
`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined
only once in the header.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import platform
4 import sys
5 import subprocess
6
7 from setuptools import setup, find_packages
8 from torch.utils.cpp_extension import BuildExtension, CppExtension
9
10
11 def check_env_flag(name, default=''):
12 return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])
13
14 DEBUG = check_env_flag('DEBUG')
15 IS_WHEEL = check_env_flag('IS_WHEEL')
16 IS_CONDA = check_env_flag('IS_CONDA')
17
18 print('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)
19
20 eca = []
21 ela = []
22 if DEBUG:
23 if platform.system() == 'Windows':
24 ela += ['/DEBUG:FULL']
25 else:
26 eca += ['-O0', '-g']
27 ela += ['-O0', '-g']
28
29
30 libraries = []
31 include_dirs = []
32 extra_objects = []
33
34 # Hypothetically, the conda distribution could rely on an external sox,
35 # but the library is pretty small and it is not available on the default
36 # anaconda channel. So we statically link it in, just as we do with wheels.
37 if IS_WHEEL or IS_CONDA:
38 audio_path = os.path.dirname(os.path.abspath(__file__))
39
40 include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]
41 include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]
42 include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]
43 include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]
44
45 # proper link order (sox, mad, flac, lame)
46 # (the most important thing is that dependencies come after a libraryl
47 # e.g., sox comes first)
48 extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]
49 extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]
50 extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]
51 extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]
52 else:
53 libraries += ['sox']
54
55
56 # Creating the version file
57 cwd = os.path.dirname(os.path.abspath(__file__))
58 version = '0.6.0a0'
59 sha = 'Unknown'
60
61 try:
62 sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
63 except Exception:
64 pass
65
66 if os.getenv('BUILD_VERSION'):
67 version = os.getenv('BUILD_VERSION')
68 elif sha != 'Unknown':
69 version += '+' + sha[:7]
70 print('-- Building version ' + version)
71
72 version_path = os.path.join(cwd, 'torchaudio', 'version.py')
73 with open(version_path, 'w') as f:
74 f.write("__version__ = '{}'\n".format(version))
75 f.write("git_version = {}\n".format(repr(sha)))
76
77 pytorch_package_version = os.getenv('PYTORCH_VERSION')
78
79 pytorch_package_dep = 'torch'
80 if pytorch_package_version is not None:
81 pytorch_package_dep += "==" + pytorch_package_version
82
83 if platform.system() == 'Windows':
84 ext_modules = None
85 else:
86 ext_modules = [
87 CppExtension(
88 '_torch_sox',
89 ['torchaudio/torch_sox.cpp'],
90 libraries=libraries,
91 include_dirs=include_dirs,
92 extra_compile_args=eca,
93 extra_objects=extra_objects,
94 extra_link_args=ela),
95 ]
96
97 setup(
98 name="torchaudio",
99 version=version,
100 description="An audio package for PyTorch",
101 url="https://github.com/pytorch/audio",
102 author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
103 author_email="soumith@pytorch.org",
104 classifiers=[
105 "Environment :: Plugins",
106 "Intended Audience :: Developers",
107 "Intended Audience :: Science/Research",
108 "License :: OSI Approved :: BSD License",
109 "Operating System :: MacOS :: MacOS X",
110 "Operating System :: Microsoft :: Windows",
111 "Operating System :: POSIX",
112 "Programming Language :: C++",
113 "Programming Language :: Python :: 2.7",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: Implementation :: CPython",
116 "Topic :: Multimedia :: Sound/Audio",
117 "Topic :: Scientific/Engineering :: Artificial Intelligence"
118 ],
119 # Exclude the build files.
120 packages=find_packages(exclude=["build"]),
121 ext_modules=ext_modules,
122 cmdclass={'build_ext': BuildExtension},
123 install_requires=[pytorch_package_dep]
124 )
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,7 +88,7 @@
'_torch_sox',
['torchaudio/torch_sox.cpp'],
libraries=libraries,
- include_dirs=include_dirs,
+ include_dirs=include_dirs + [cwd],
extra_compile_args=eca,
extra_objects=extra_objects,
extra_link_args=ela),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,7 +88,7 @@\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n- include_dirs=include_dirs,\n+ include_dirs=include_dirs + [cwd],\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n", "issue": "multiple definitions of `SoxEffect`.\n`SoxEffect` is defined both in\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54\r\n\r\nand\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77\r\n\r\n\r\n-----\r\n\r\n-----\r\n\r\n`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined\r\nonly once in the header.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport platform\nimport sys\nimport subprocess\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension\n\n\ndef check_env_flag(name, default=''):\n return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])\n\nDEBUG = check_env_flag('DEBUG')\nIS_WHEEL = check_env_flag('IS_WHEEL')\nIS_CONDA = check_env_flag('IS_CONDA')\n\nprint('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)\n\neca = []\nela = []\nif DEBUG:\n if platform.system() == 'Windows':\n ela += ['/DEBUG:FULL']\n else:\n eca += ['-O0', '-g']\n ela += ['-O0', '-g']\n\n\nlibraries = []\ninclude_dirs = []\nextra_objects = []\n\n# Hypothetically, the conda distribution could rely on an external sox,\n# but the library is pretty small and it is not available on the default\n# anaconda channel. So we statically link it in, just as we do with wheels.\nif IS_WHEEL or IS_CONDA:\n audio_path = os.path.dirname(os.path.abspath(__file__))\n\n include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]\n\n # proper link order (sox, mad, flac, lame)\n # (the most important thing is that dependencies come after a libraryl\n # e.g., sox comes first)\n extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]\nelse:\n libraries += ['sox']\n\n\n# Creating the version file\ncwd = os.path.dirname(os.path.abspath(__file__))\nversion = '0.6.0a0'\nsha = 'Unknown'\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint('-- Building version ' + version)\n\nversion_path = os.path.join(cwd, 'torchaudio', 'version.py')\nwith open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\npytorch_package_version = os.getenv('PYTORCH_VERSION')\n\npytorch_package_dep = 'torch'\nif pytorch_package_version is not None:\n pytorch_package_dep += \"==\" + pytorch_package_version\n\nif platform.system() == 'Windows':\n ext_modules = None\nelse:\n ext_modules = [\n CppExtension(\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n include_dirs=include_dirs,\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n ]\n\nsetup(\n name=\"torchaudio\",\n version=version,\n description=\"An audio package for PyTorch\",\n url=\"https://github.com/pytorch/audio\",\n author=\"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough\",\n author_email=\"soumith@pytorch.org\",\n classifiers=[\n \"Environment :: Plugins\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ],\n # Exclude the build files.\n packages=find_packages(exclude=[\"build\"]),\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension},\n install_requires=[pytorch_package_dep]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport platform\nimport sys\nimport subprocess\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension\n\n\ndef check_env_flag(name, default=''):\n return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])\n\nDEBUG = check_env_flag('DEBUG')\nIS_WHEEL = check_env_flag('IS_WHEEL')\nIS_CONDA = check_env_flag('IS_CONDA')\n\nprint('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)\n\neca = []\nela = []\nif DEBUG:\n if platform.system() == 'Windows':\n ela += ['/DEBUG:FULL']\n else:\n eca += ['-O0', '-g']\n ela += ['-O0', '-g']\n\n\nlibraries = []\ninclude_dirs = []\nextra_objects = []\n\n# Hypothetically, the conda distribution could rely on an external sox,\n# but the library is pretty small and it is not available on the default\n# anaconda channel. So we statically link it in, just as we do with wheels.\nif IS_WHEEL or IS_CONDA:\n audio_path = os.path.dirname(os.path.abspath(__file__))\n\n include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]\n\n # proper link order (sox, mad, flac, lame)\n # (the most important thing is that dependencies come after a libraryl\n # e.g., sox comes first)\n extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]\nelse:\n libraries += ['sox']\n\n\n# Creating the version file\ncwd = os.path.dirname(os.path.abspath(__file__))\nversion = '0.6.0a0'\nsha = 'Unknown'\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint('-- Building version ' + version)\n\nversion_path = os.path.join(cwd, 'torchaudio', 'version.py')\nwith open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\npytorch_package_version = os.getenv('PYTORCH_VERSION')\n\npytorch_package_dep = 'torch'\nif pytorch_package_version is not None:\n pytorch_package_dep += \"==\" + pytorch_package_version\n\nif platform.system() == 'Windows':\n ext_modules = None\nelse:\n ext_modules = [\n CppExtension(\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n include_dirs=include_dirs + [cwd],\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n ]\n\nsetup(\n name=\"torchaudio\",\n version=version,\n description=\"An audio package for PyTorch\",\n url=\"https://github.com/pytorch/audio\",\n author=\"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough\",\n author_email=\"soumith@pytorch.org\",\n classifiers=[\n \"Environment :: Plugins\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ],\n # Exclude the build files.\n packages=find_packages(exclude=[\"build\"]),\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension},\n install_requires=[pytorch_package_dep]\n)\n", "path": "setup.py"}]}
| 1,741 | 94 |
gh_patches_debug_9080
|
rasdani/github-patches
|
git_diff
|
ARM-DOE__ACT-634
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Valid Min in act.utils.decode_present_weather
* ACT version: 1.3.4
* Python version:3.9.15
* Operating System: Windows 10
### Description
I am receiving an error for "del data.attrs['valid_min'] when reading in data trying to decode present weather data.
### What I Did
```
act.utils.decode_present_weather(ds_fd70,'present_wx2',precip_codes)
decode_present_weather
del data.attrs['valid_min']
KeyError: 'valid_min'
When outputting time I have:
ds_fd70.time
array(['2023-03-09T16:45:00.000000000', '2023-03-09T17:00:00.000000000',
'2023-03-09T17:15:00.000000000', '2023-03-09T17:30:00.000000000',
'2023-03-09T17:45:00.000000000', '2023-03-09T18:00:00.000000000',
'2023-03-09T18:15:00.000000000', '2023-03-09T18:30:00.000000000',
'2023-03-09T18:45:00.000000000', '2023-03-09T19:00:00.000000000',
'2023-03-09T19:15:00.000000000', '2023-03-09T19:30:00.000000000',
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/utils/inst_utils.py`
Content:
```
1 """
2 Functions containing utilities for instruments.
3
4 """
5
6
7 def decode_present_weather(ds, variable=None, decoded_name=None):
8 """
9 This function is to decode codes reported from automatic weather stations suchas the PWD22.
10 This is based on WMO Table 4680.
11
12 Parameters
13 ----------
14 ds : xarray.Dataset
15 ACT or Xarray dataset from which to convert codes
16 variable : string
17 Variable to decode
18 decoded_name : string
19 New variable name to store updated labels
20
21 Returns
22 -------
23 ds : xarray.Dataset
24 Returns dataset with new decoded data
25
26 References
27 ----------
28 WMO Manual on Code Volume I.1
29 https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
30
31 """
32
33 # Check to ensure that a variable name is passed
34 if variable is None:
35 raise ValueError('You must specify a variable')
36
37 if variable not in ds:
38 raise ValueError('Variable not in the dataset')
39
40 # Define the weather hash
41 weather = {
42 0: 'No significant weather observed',
43 1: 'Clouds generally dissolving or becoming less developed during the past hour',
44 2: 'State of the sky on the whole unchanged during the past hour',
45 3: 'Clouds generally forming or developing during the past hour',
46 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
47 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
48 10: 'Mist',
49 11: 'Diamond dust',
50 12: 'Distant lightning',
51 18: 'Squalls',
52 20: 'Fog',
53 21: 'Precipitation',
54 22: 'Drizzle (not freezing) or snow grains',
55 23: 'Rain (not freezing)',
56 24: 'Snow',
57 25: 'Freezing drizzle or freezing rain',
58 26: 'Thunderstorm (with or without precipitation)',
59 27: 'Blowing or drifting snow or sand',
60 28: 'Blowing or drifting snow or sand, visibility >= 1 km',
61 29: 'Blowing or drifting snow or sand, visibility < 1 km',
62 30: 'Fog',
63 31: 'Fog or ice fog in patches',
64 32: 'Fog or ice fog, has become thinner during the past hour',
65 33: 'Fog or ice fog, no appreciable change during the past hour',
66 34: 'Fog or ice fog, has begun or become thicker during the past hour',
67 35: 'Fog, depositing rime',
68 40: 'Precipitation',
69 41: 'Precipitation, slight or moderate',
70 42: 'Precipitation, heavy',
71 43: 'Liquid precipitation, slight or moderate',
72 44: 'Liquid precipitation, heavy',
73 45: 'Solid precipitation, slight or moderate',
74 46: 'Solid precipitation, heavy',
75 47: 'Freezing precipitation, slight or moderate',
76 48: 'Freezing precipitation, heavy',
77 50: 'Drizzle',
78 51: 'Drizzle, not freezing, slight',
79 52: 'Drizzle, not freezing, moderate',
80 53: 'Drizzle, not freezing, heavy',
81 54: 'Drizzle, freezing, slight',
82 55: 'Drizzle, freezing, moderate',
83 56: 'Drizzle, freezing, heavy',
84 57: 'Drizzle and rain, slight',
85 58: 'Drizzle and rain, moderate or heavy',
86 60: 'Rain',
87 61: 'Rain, not freezing, slight',
88 62: 'Rain, not freezing, moderate',
89 63: 'Rain, not freezing, heavy',
90 64: 'Rain, freezing, slight',
91 65: 'Rain, freezing, moderate',
92 66: 'Rain, freezing, heavy',
93 67: 'Rain (or drizzle) and snow, slight',
94 68: 'Rain (or drizzle) and snow, moderate or heavy',
95 70: 'Snow',
96 71: 'Snow, light',
97 72: 'Snow, moderate',
98 73: 'Snow, heavy',
99 74: 'Ice pellets, slight',
100 75: 'Ice pellets, moderate',
101 76: 'Ice pellets, heavy',
102 77: 'Snow grains',
103 78: 'Ice crystals',
104 80: 'Shower(s) or Intermittent Precipitation',
105 81: 'Rain shower(s) or intermittent rain, slight',
106 82: 'Rain shower(s) or intermittent rain, moderate',
107 83: 'Rain shower(s) or intermittent rain, heavy',
108 84: 'Rain shower(s) or intermittent rain, violent',
109 85: 'Snow shower(s) or intermittent snow, slight',
110 86: 'Snow shower(s) or intermittent snow, moderate',
111 87: 'Snow shower(s) or intermittent snow, heavy',
112 89: 'Hail',
113 90: 'Thunderstorm',
114 91: 'Thunderstorm, slight or moderate, with no precipitation',
115 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
116 93: 'Thunderstorm, slight or moderate, with hail',
117 94: 'Thunderstorm, heavy, with no precipitation',
118 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
119 96: 'Thunderstorm, heavy, with hail',
120 99: 'Tornado',
121 -9999: 'Missing',
122 }
123
124 # If a decoded name is not passed, make one
125 if decoded_name is None:
126 decoded_name = variable + '_decoded'
127
128 # Get data and fill nans with -9999
129 data = ds[variable]
130 data = data.fillna(-9999)
131
132 # Get the weather type for each code
133 wx_type = [weather[d] for d in data.values]
134
135 # Massage the data array to set back in the dataset
136 data.values = wx_type
137 data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
138 del data.attrs['valid_min']
139 del data.attrs['valid_max']
140
141 ds[decoded_name] = data
142
143 return ds
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py
--- a/act/utils/inst_utils.py
+++ b/act/utils/inst_utils.py
@@ -134,9 +134,14 @@
# Massage the data array to set back in the dataset
data.values = wx_type
- data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
- del data.attrs['valid_min']
- del data.attrs['valid_max']
+ if 'long_name' in data.attrs:
+ data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
+ else:
+ data.attrs['long_name'] = 'Decoded present weather values'
+ if 'valid_min' in data.attrs:
+ del data.attrs['valid_min']
+ if 'valid_max' in data.attrs:
+ del data.attrs['valid_max']
ds[decoded_name] = data
|
{"golden_diff": "diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py\n--- a/act/utils/inst_utils.py\n+++ b/act/utils/inst_utils.py\n@@ -134,9 +134,14 @@\n \n # Massage the data array to set back in the dataset\n data.values = wx_type\n- data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n- del data.attrs['valid_min']\n- del data.attrs['valid_max']\n+ if 'long_name' in data.attrs:\n+ data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n+ else:\n+ data.attrs['long_name'] = 'Decoded present weather values'\n+ if 'valid_min' in data.attrs:\n+ del data.attrs['valid_min']\n+ if 'valid_max' in data.attrs:\n+ del data.attrs['valid_max']\n \n ds[decoded_name] = data\n", "issue": "Valid Min in act.utils.decode_present_weather\n* ACT version: 1.3.4\r\n* Python version:3.9.15\r\n* Operating System: Windows 10\r\n\r\n### Description\r\n\r\nI am receiving an error for \"del data.attrs['valid_min'] when reading in data trying to decode present weather data. \r\n\r\n### What I Did\r\n\r\n```\r\nact.utils.decode_present_weather(ds_fd70,'present_wx2',precip_codes)\r\ndecode_present_weather\r\n del data.attrs['valid_min']\r\n\r\nKeyError: 'valid_min'\r\n\r\nWhen outputting time I have:\r\nds_fd70.time\r\n\r\narray(['2023-03-09T16:45:00.000000000', '2023-03-09T17:00:00.000000000',\r\n '2023-03-09T17:15:00.000000000', '2023-03-09T17:30:00.000000000',\r\n '2023-03-09T17:45:00.000000000', '2023-03-09T18:00:00.000000000',\r\n '2023-03-09T18:15:00.000000000', '2023-03-09T18:30:00.000000000',\r\n '2023-03-09T18:45:00.000000000', '2023-03-09T19:00:00.000000000',\r\n '2023-03-09T19:15:00.000000000', '2023-03-09T19:30:00.000000000',\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions containing utilities for instruments.\n\n\"\"\"\n\n\ndef decode_present_weather(ds, variable=None, decoded_name=None):\n \"\"\"\n This function is to decode codes reported from automatic weather stations suchas the PWD22.\n This is based on WMO Table 4680.\n\n Parameters\n ----------\n ds : xarray.Dataset\n ACT or Xarray dataset from which to convert codes\n variable : string\n Variable to decode\n decoded_name : string\n New variable name to store updated labels\n\n Returns\n -------\n ds : xarray.Dataset\n Returns dataset with new decoded data\n\n References\n ----------\n WMO Manual on Code Volume I.1\n https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf\n\n \"\"\"\n\n # Check to ensure that a variable name is passed\n if variable is None:\n raise ValueError('You must specify a variable')\n\n if variable not in ds:\n raise ValueError('Variable not in the dataset')\n\n # Define the weather hash\n weather = {\n 0: 'No significant weather observed',\n 1: 'Clouds generally dissolving or becoming less developed during the past hour',\n 2: 'State of the sky on the whole unchanged during the past hour',\n 3: 'Clouds generally forming or developing during the past hour',\n 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',\n 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',\n 10: 'Mist',\n 11: 'Diamond dust',\n 12: 'Distant lightning',\n 18: 'Squalls',\n 20: 'Fog',\n 21: 'Precipitation',\n 22: 'Drizzle (not freezing) or snow grains',\n 23: 'Rain (not freezing)',\n 24: 'Snow',\n 25: 'Freezing drizzle or freezing rain',\n 26: 'Thunderstorm (with or without precipitation)',\n 27: 'Blowing or drifting snow or sand',\n 28: 'Blowing or drifting snow or sand, visibility >= 1 km',\n 29: 'Blowing or drifting snow or sand, visibility < 1 km',\n 30: 'Fog',\n 31: 'Fog or ice fog in patches',\n 32: 'Fog or ice fog, has become thinner during the past hour',\n 33: 'Fog or ice fog, no appreciable change during the past hour',\n 34: 'Fog or ice fog, has begun or become thicker during the past hour',\n 35: 'Fog, depositing rime',\n 40: 'Precipitation',\n 41: 'Precipitation, slight or moderate',\n 42: 'Precipitation, heavy',\n 43: 'Liquid precipitation, slight or moderate',\n 44: 'Liquid precipitation, heavy',\n 45: 'Solid precipitation, slight or moderate',\n 46: 'Solid precipitation, heavy',\n 47: 'Freezing precipitation, slight or moderate',\n 48: 'Freezing precipitation, heavy',\n 50: 'Drizzle',\n 51: 'Drizzle, not freezing, slight',\n 52: 'Drizzle, not freezing, moderate',\n 53: 'Drizzle, not freezing, heavy',\n 54: 'Drizzle, freezing, slight',\n 55: 'Drizzle, freezing, moderate',\n 56: 'Drizzle, freezing, heavy',\n 57: 'Drizzle and rain, slight',\n 58: 'Drizzle and rain, moderate or heavy',\n 60: 'Rain',\n 61: 'Rain, not freezing, slight',\n 62: 'Rain, not freezing, moderate',\n 63: 'Rain, not freezing, heavy',\n 64: 'Rain, freezing, slight',\n 65: 'Rain, freezing, moderate',\n 66: 'Rain, freezing, heavy',\n 67: 'Rain (or drizzle) and snow, slight',\n 68: 'Rain (or drizzle) and snow, moderate or heavy',\n 70: 'Snow',\n 71: 'Snow, light',\n 72: 'Snow, moderate',\n 73: 'Snow, heavy',\n 74: 'Ice pellets, slight',\n 75: 'Ice pellets, moderate',\n 76: 'Ice pellets, heavy',\n 77: 'Snow grains',\n 78: 'Ice crystals',\n 80: 'Shower(s) or Intermittent Precipitation',\n 81: 'Rain shower(s) or intermittent rain, slight',\n 82: 'Rain shower(s) or intermittent rain, moderate',\n 83: 'Rain shower(s) or intermittent rain, heavy',\n 84: 'Rain shower(s) or intermittent rain, violent',\n 85: 'Snow shower(s) or intermittent snow, slight',\n 86: 'Snow shower(s) or intermittent snow, moderate',\n 87: 'Snow shower(s) or intermittent snow, heavy',\n 89: 'Hail',\n 90: 'Thunderstorm',\n 91: 'Thunderstorm, slight or moderate, with no precipitation',\n 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',\n 93: 'Thunderstorm, slight or moderate, with hail',\n 94: 'Thunderstorm, heavy, with no precipitation',\n 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',\n 96: 'Thunderstorm, heavy, with hail',\n 99: 'Tornado',\n -9999: 'Missing',\n }\n\n # If a decoded name is not passed, make one\n if decoded_name is None:\n decoded_name = variable + '_decoded'\n\n # Get data and fill nans with -9999\n data = ds[variable]\n data = data.fillna(-9999)\n\n # Get the weather type for each code\n wx_type = [weather[d] for d in data.values]\n\n # Massage the data array to set back in the dataset\n data.values = wx_type\n data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n del data.attrs['valid_min']\n del data.attrs['valid_max']\n\n ds[decoded_name] = data\n\n return ds\n", "path": "act/utils/inst_utils.py"}], "after_files": [{"content": "\"\"\"\nFunctions containing utilities for instruments.\n\n\"\"\"\n\n\ndef decode_present_weather(ds, variable=None, decoded_name=None):\n \"\"\"\n This function is to decode codes reported from automatic weather stations suchas the PWD22.\n This is based on WMO Table 4680.\n\n Parameters\n ----------\n ds : xarray.Dataset\n ACT or Xarray dataset from which to convert codes\n variable : string\n Variable to decode\n decoded_name : string\n New variable name to store updated labels\n\n Returns\n -------\n ds : xarray.Dataset\n Returns dataset with new decoded data\n\n References\n ----------\n WMO Manual on Code Volume I.1\n https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf\n\n \"\"\"\n\n # Check to ensure that a variable name is passed\n if variable is None:\n raise ValueError('You must specify a variable')\n\n if variable not in ds:\n raise ValueError('Variable not in the dataset')\n\n # Define the weather hash\n weather = {\n 0: 'No significant weather observed',\n 1: 'Clouds generally dissolving or becoming less developed during the past hour',\n 2: 'State of the sky on the whole unchanged during the past hour',\n 3: 'Clouds generally forming or developing during the past hour',\n 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',\n 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',\n 10: 'Mist',\n 11: 'Diamond dust',\n 12: 'Distant lightning',\n 18: 'Squalls',\n 20: 'Fog',\n 21: 'Precipitation',\n 22: 'Drizzle (not freezing) or snow grains',\n 23: 'Rain (not freezing)',\n 24: 'Snow',\n 25: 'Freezing drizzle or freezing rain',\n 26: 'Thunderstorm (with or without precipitation)',\n 27: 'Blowing or drifting snow or sand',\n 28: 'Blowing or drifting snow or sand, visibility >= 1 km',\n 29: 'Blowing or drifting snow or sand, visibility < 1 km',\n 30: 'Fog',\n 31: 'Fog or ice fog in patches',\n 32: 'Fog or ice fog, has become thinner during the past hour',\n 33: 'Fog or ice fog, no appreciable change during the past hour',\n 34: 'Fog or ice fog, has begun or become thicker during the past hour',\n 35: 'Fog, depositing rime',\n 40: 'Precipitation',\n 41: 'Precipitation, slight or moderate',\n 42: 'Precipitation, heavy',\n 43: 'Liquid precipitation, slight or moderate',\n 44: 'Liquid precipitation, heavy',\n 45: 'Solid precipitation, slight or moderate',\n 46: 'Solid precipitation, heavy',\n 47: 'Freezing precipitation, slight or moderate',\n 48: 'Freezing precipitation, heavy',\n 50: 'Drizzle',\n 51: 'Drizzle, not freezing, slight',\n 52: 'Drizzle, not freezing, moderate',\n 53: 'Drizzle, not freezing, heavy',\n 54: 'Drizzle, freezing, slight',\n 55: 'Drizzle, freezing, moderate',\n 56: 'Drizzle, freezing, heavy',\n 57: 'Drizzle and rain, slight',\n 58: 'Drizzle and rain, moderate or heavy',\n 60: 'Rain',\n 61: 'Rain, not freezing, slight',\n 62: 'Rain, not freezing, moderate',\n 63: 'Rain, not freezing, heavy',\n 64: 'Rain, freezing, slight',\n 65: 'Rain, freezing, moderate',\n 66: 'Rain, freezing, heavy',\n 67: 'Rain (or drizzle) and snow, slight',\n 68: 'Rain (or drizzle) and snow, moderate or heavy',\n 70: 'Snow',\n 71: 'Snow, light',\n 72: 'Snow, moderate',\n 73: 'Snow, heavy',\n 74: 'Ice pellets, slight',\n 75: 'Ice pellets, moderate',\n 76: 'Ice pellets, heavy',\n 77: 'Snow grains',\n 78: 'Ice crystals',\n 80: 'Shower(s) or Intermittent Precipitation',\n 81: 'Rain shower(s) or intermittent rain, slight',\n 82: 'Rain shower(s) or intermittent rain, moderate',\n 83: 'Rain shower(s) or intermittent rain, heavy',\n 84: 'Rain shower(s) or intermittent rain, violent',\n 85: 'Snow shower(s) or intermittent snow, slight',\n 86: 'Snow shower(s) or intermittent snow, moderate',\n 87: 'Snow shower(s) or intermittent snow, heavy',\n 89: 'Hail',\n 90: 'Thunderstorm',\n 91: 'Thunderstorm, slight or moderate, with no precipitation',\n 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',\n 93: 'Thunderstorm, slight or moderate, with hail',\n 94: 'Thunderstorm, heavy, with no precipitation',\n 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',\n 96: 'Thunderstorm, heavy, with hail',\n 99: 'Tornado',\n -9999: 'Missing',\n }\n\n # If a decoded name is not passed, make one\n if decoded_name is None:\n decoded_name = variable + '_decoded'\n\n # Get data and fill nans with -9999\n data = ds[variable]\n data = data.fillna(-9999)\n\n # Get the weather type for each code\n wx_type = [weather[d] for d in data.values]\n\n # Massage the data array to set back in the dataset\n data.values = wx_type\n if 'long_name' in data.attrs:\n data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n else:\n data.attrs['long_name'] = 'Decoded present weather values'\n if 'valid_min' in data.attrs:\n del data.attrs['valid_min']\n if 'valid_max' in data.attrs:\n del data.attrs['valid_max']\n\n ds[decoded_name] = data\n\n return ds\n", "path": "act/utils/inst_utils.py"}]}
| 2,592 | 215 |
gh_patches_debug_42912
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix symlinks not copied during export
- [x] Refer to the issue that supports this Pull Request: fixes #3258
- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.
- [x] I've read the [Contributing guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've followed the PEP8 style guides for Python code.
- [ ] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one. Also adding a description of the changes in the ``changelog.rst`` file. https://github.com/conan-io/docs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/file_copier.py`
Content:
```
1 import os
2 import fnmatch
3 import shutil
4 from collections import defaultdict
5
6 from conans.util.files import mkdir
7
8
9 def report_copied_files(copied, output):
10 ext_files = defaultdict(list)
11 for f in copied:
12 _, ext = os.path.splitext(f)
13 ext_files[ext].append(os.path.basename(f))
14
15 if not ext_files:
16 return False
17
18 for ext, files in ext_files.items():
19 files_str = (", ".join(files)) if len(files) < 5 else ""
20 file_or_files = "file" if len(files) == 1 else "files"
21 if not ext:
22 output.info("Copied %d %s: %s" % (len(files), file_or_files, files_str))
23 else:
24 output.info("Copied %d '%s' %s: %s" % (len(files), ext, file_or_files, files_str))
25 return True
26
27
28 class FileCopier(object):
29 """ main responsible of copying files from place to place:
30 package: build folder -> package folder
31 imports: package folder -> user folder
32 export: user folder -> store "export" folder
33 """
34 def __init__(self, root_source_folder, root_destination_folder, excluded=None):
35 """
36 Takes the base folders to copy resources src -> dst. These folders names
37 will not be used in the relative names while copying
38 param root_source_folder: The base folder to copy things from, typically the
39 store build folder
40 param root_destination_folder: The base folder to copy things to, typicall the
41 store package folder
42 """
43 self._base_src = root_source_folder
44 self._base_dst = root_destination_folder
45 self._copied = []
46 self._excluded = [root_destination_folder]
47 if excluded:
48 self._excluded.append(excluded)
49
50 def report(self, output):
51 return report_copied_files(self._copied, output)
52
53 def __call__(self, pattern, dst="", src="", keep_path=True, links=False, symlinks=None,
54 excludes=None, ignore_case=False):
55 """
56 param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
57 param dst: the destination local folder, wrt to current conanfile dir, to which
58 the files will be copied. Eg: "bin"
59 param src: the source folder in which those files will be searched. This folder
60 will be stripped from the dst name. Eg.: lib/Debug/x86
61 param keep_path: False if you want the relative paths to be maintained from
62 src to dst folders, or just drop. False is useful if you want
63 to collect e.g. many *.libs among many dirs into a single
64 lib dir
65 return: list of copied files
66 """
67 if symlinks is not None:
68 links = symlinks
69 # Check for ../ patterns and allow them
70 if pattern.startswith(".."):
71 rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))
72 base_src = os.path.dirname(rel_dir)
73 pattern = os.path.basename(rel_dir)
74 else:
75 base_src = self._base_src
76
77 src = os.path.join(base_src, src)
78 dst = os.path.join(self._base_dst, dst)
79
80 files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,
81 ignore_case)
82 copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
83 self._link_folders(src, dst, link_folders)
84 self._copied.extend(files_to_copy)
85 return copied_files
86
87 def _filter_files(self, src, pattern, links, excludes, ignore_case):
88
89 """ return a list of the files matching the patterns
90 The list will be relative path names wrt to the root src folder
91 """
92 filenames = []
93 linked_folders = []
94 for root, subfolders, files in os.walk(src, followlinks=True):
95 if root in self._excluded:
96 subfolders[:] = []
97 continue
98
99 if links and os.path.islink(root):
100 linked_folders.append(os.path.relpath(root, src))
101 subfolders[:] = []
102 continue
103 basename = os.path.basename(root)
104 # Skip git or svn subfolders
105 if basename in [".git", ".svn"]:
106 subfolders[:] = []
107 continue
108 if basename == "test_package": # DO NOT export test_package/build folder
109 try:
110 subfolders.remove("build")
111 except:
112 pass
113
114 relative_path = os.path.relpath(root, src)
115 for f in files:
116 relative_name = os.path.normpath(os.path.join(relative_path, f))
117 filenames.append(relative_name)
118
119 if ignore_case:
120 filenames = {f.lower(): f for f in filenames}
121 pattern = pattern.lower()
122
123 files_to_copy = fnmatch.filter(filenames, pattern)
124 if excludes:
125 if not isinstance(excludes, (tuple, list)):
126 excludes = (excludes, )
127 if ignore_case:
128 excludes = [e.lower() for e in excludes]
129 for exclude in excludes:
130 files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
131
132 if ignore_case:
133 files_to_copy = [filenames[f] for f in files_to_copy]
134
135 return files_to_copy, linked_folders
136
137 @staticmethod
138 def _link_folders(src, dst, linked_folders):
139 for linked_folder in linked_folders:
140 link = os.readlink(os.path.join(src, linked_folder))
141 dst_link = os.path.join(dst, linked_folder)
142 try:
143 # Remove the previous symlink
144 os.remove(dst_link)
145 except OSError:
146 pass
147 # link is a string relative to linked_folder
148 # e.g.: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
149 mkdir(os.path.dirname(dst_link))
150 os.symlink(link, dst_link)
151 # Remove empty links
152 for linked_folder in linked_folders:
153 dst_link = os.path.join(dst, linked_folder)
154 abs_path = os.path.realpath(dst_link)
155 if not os.path.exists(abs_path):
156 os.remove(dst_link)
157
158 @staticmethod
159 def _copy_files(files, src, dst, keep_path, symlinks):
160 """ executes a multiple file copy from [(src_file, dst_file), (..)]
161 managing symlinks if necessary
162 """
163 copied_files = []
164 for filename in files:
165 abs_src_name = os.path.join(src, filename)
166 filename = filename if keep_path else os.path.basename(filename)
167 abs_dst_name = os.path.normpath(os.path.join(dst, filename))
168 try:
169 os.makedirs(os.path.dirname(abs_dst_name))
170 except:
171 pass
172 if symlinks and os.path.islink(abs_src_name):
173 linkto = os.readlink(abs_src_name) # @UndefinedVariable
174 try:
175 os.remove(abs_dst_name)
176 except OSError:
177 pass
178 os.symlink(linkto, abs_dst_name) # @UndefinedVariable
179 else:
180 shutil.copy2(abs_src_name, abs_dst_name)
181 copied_files.append(abs_dst_name)
182 return copied_files
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py
--- a/conans/client/file_copier.py
+++ b/conans/client/file_copier.py
@@ -91,6 +91,14 @@
"""
filenames = []
linked_folders = []
+ if excludes:
+ if not isinstance(excludes, (tuple, list)):
+ excludes = (excludes, )
+ if ignore_case:
+ excludes = [e.lower() for e in excludes]
+ else:
+ excludes = []
+
for root, subfolders, files in os.walk(src, followlinks=True):
if root in self._excluded:
subfolders[:] = []
@@ -112,6 +120,11 @@
pass
relative_path = os.path.relpath(root, src)
+ for exclude in excludes:
+ if fnmatch.fnmatch(relative_path, exclude):
+ subfolders[:] = []
+ files = []
+ break
for f in files:
relative_name = os.path.normpath(os.path.join(relative_path, f))
filenames.append(relative_name)
@@ -121,13 +134,8 @@
pattern = pattern.lower()
files_to_copy = fnmatch.filter(filenames, pattern)
- if excludes:
- if not isinstance(excludes, (tuple, list)):
- excludes = (excludes, )
- if ignore_case:
- excludes = [e.lower() for e in excludes]
- for exclude in excludes:
- files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
+ for exclude in excludes:
+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
if ignore_case:
files_to_copy = [filenames[f] for f in files_to_copy]
@@ -136,8 +144,20 @@
@staticmethod
def _link_folders(src, dst, linked_folders):
+ created_links = []
for linked_folder in linked_folders:
- link = os.readlink(os.path.join(src, linked_folder))
+ src_link = os.path.join(src, linked_folder)
+ # Discard symlinks that go out of the src folder
+ abs_path = os.path.realpath(src_link)
+ relpath = os.path.relpath(abs_path, src)
+ if relpath.startswith("."):
+ continue
+
+ link = os.readlink(src_link)
+ # Absoluted path symlinks are a problem, convert it to relative
+ if os.path.isabs(link):
+ link = os.path.relpath(link, os.path.dirname(src_link))
+
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
@@ -148,12 +168,19 @@
# e.g.: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
+ created_links.append(dst_link)
# Remove empty links
- for linked_folder in linked_folders:
- dst_link = os.path.join(dst, linked_folder)
+ for dst_link in created_links:
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
+ base_path = os.path.dirname(dst_link)
os.remove(dst_link)
+ while base_path.startswith(dst):
+ try: # Take advantage that os.rmdir does not delete non-empty dirs
+ os.rmdir(base_path)
+ except OSError:
+ break # not empty
+ base_path = os.path.dirname(base_path)
@staticmethod
def _copy_files(files, src, dst, keep_path, symlinks):
|
{"golden_diff": "diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py\n--- a/conans/client/file_copier.py\n+++ b/conans/client/file_copier.py\n@@ -91,6 +91,14 @@\n \"\"\"\n filenames = []\n linked_folders = []\n+ if excludes:\n+ if not isinstance(excludes, (tuple, list)):\n+ excludes = (excludes, )\n+ if ignore_case:\n+ excludes = [e.lower() for e in excludes]\n+ else:\n+ excludes = []\n+\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n@@ -112,6 +120,11 @@\n pass\n \n relative_path = os.path.relpath(root, src)\n+ for exclude in excludes:\n+ if fnmatch.fnmatch(relative_path, exclude):\n+ subfolders[:] = []\n+ files = []\n+ break\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n@@ -121,13 +134,8 @@\n pattern = pattern.lower()\n \n files_to_copy = fnmatch.filter(filenames, pattern)\n- if excludes:\n- if not isinstance(excludes, (tuple, list)):\n- excludes = (excludes, )\n- if ignore_case:\n- excludes = [e.lower() for e in excludes]\n- for exclude in excludes:\n- files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n+ for exclude in excludes:\n+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n \n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n@@ -136,8 +144,20 @@\n \n @staticmethod\n def _link_folders(src, dst, linked_folders):\n+ created_links = []\n for linked_folder in linked_folders:\n- link = os.readlink(os.path.join(src, linked_folder))\n+ src_link = os.path.join(src, linked_folder)\n+ # Discard symlinks that go out of the src folder\n+ abs_path = os.path.realpath(src_link)\n+ relpath = os.path.relpath(abs_path, src)\n+ if relpath.startswith(\".\"):\n+ continue\n+ \n+ link = os.readlink(src_link)\n+ # Absoluted path symlinks are a problem, convert it to relative\n+ if os.path.isabs(link):\n+ link = os.path.relpath(link, os.path.dirname(src_link))\n+\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n@@ -148,12 +168,19 @@\n # e.g.: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n+ created_links.append(dst_link)\n # Remove empty links\n- for linked_folder in linked_folders:\n- dst_link = os.path.join(dst, linked_folder)\n+ for dst_link in created_links:\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n+ base_path = os.path.dirname(dst_link)\n os.remove(dst_link)\n+ while base_path.startswith(dst):\n+ try: # Take advantage that os.rmdir does not delete non-empty dirs\n+ os.rmdir(base_path)\n+ except OSError:\n+ break # not empty\n+ base_path = os.path.dirname(base_path)\n \n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n", "issue": "Fix symlinks not copied during export\n- [x] Refer to the issue that supports this Pull Request: fixes #3258\r\n- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.\r\n- [x] I've read the [Contributing guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've followed the PEP8 style guides for Python code.\r\n- [ ] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one. Also adding a description of the changes in the ``changelog.rst`` file. https://github.com/conan-io/docs\r\n\n", "before_files": [{"content": "import os\nimport fnmatch\nimport shutil\nfrom collections import defaultdict\n\nfrom conans.util.files import mkdir\n\n\ndef report_copied_files(copied, output):\n ext_files = defaultdict(list)\n for f in copied:\n _, ext = os.path.splitext(f)\n ext_files[ext].append(os.path.basename(f))\n\n if not ext_files:\n return False\n\n for ext, files in ext_files.items():\n files_str = (\", \".join(files)) if len(files) < 5 else \"\"\n file_or_files = \"file\" if len(files) == 1 else \"files\"\n if not ext:\n output.info(\"Copied %d %s: %s\" % (len(files), file_or_files, files_str))\n else:\n output.info(\"Copied %d '%s' %s: %s\" % (len(files), ext, file_or_files, files_str))\n return True\n\n\nclass FileCopier(object):\n \"\"\" main responsible of copying files from place to place:\n package: build folder -> package folder\n imports: package folder -> user folder\n export: user folder -> store \"export\" folder\n \"\"\"\n def __init__(self, root_source_folder, root_destination_folder, excluded=None):\n \"\"\"\n Takes the base folders to copy resources src -> dst. These folders names\n will not be used in the relative names while copying\n param root_source_folder: The base folder to copy things from, typically the\n store build folder\n param root_destination_folder: The base folder to copy things to, typicall the\n store package folder\n \"\"\"\n self._base_src = root_source_folder\n self._base_dst = root_destination_folder\n self._copied = []\n self._excluded = [root_destination_folder]\n if excluded:\n self._excluded.append(excluded)\n\n def report(self, output):\n return report_copied_files(self._copied, output)\n\n def __call__(self, pattern, dst=\"\", src=\"\", keep_path=True, links=False, symlinks=None,\n excludes=None, ignore_case=False):\n \"\"\"\n param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll\n param dst: the destination local folder, wrt to current conanfile dir, to which\n the files will be copied. Eg: \"bin\"\n param src: the source folder in which those files will be searched. This folder\n will be stripped from the dst name. Eg.: lib/Debug/x86\n param keep_path: False if you want the relative paths to be maintained from\n src to dst folders, or just drop. False is useful if you want\n to collect e.g. many *.libs among many dirs into a single\n lib dir\n return: list of copied files\n \"\"\"\n if symlinks is not None:\n links = symlinks\n # Check for ../ patterns and allow them\n if pattern.startswith(\"..\"):\n rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))\n base_src = os.path.dirname(rel_dir)\n pattern = os.path.basename(rel_dir)\n else:\n base_src = self._base_src\n\n src = os.path.join(base_src, src)\n dst = os.path.join(self._base_dst, dst)\n\n files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,\n ignore_case)\n copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)\n self._link_folders(src, dst, link_folders)\n self._copied.extend(files_to_copy)\n return copied_files\n\n def _filter_files(self, src, pattern, links, excludes, ignore_case):\n\n \"\"\" return a list of the files matching the patterns\n The list will be relative path names wrt to the root src folder\n \"\"\"\n filenames = []\n linked_folders = []\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n continue\n\n if links and os.path.islink(root):\n linked_folders.append(os.path.relpath(root, src))\n subfolders[:] = []\n continue\n basename = os.path.basename(root)\n # Skip git or svn subfolders\n if basename in [\".git\", \".svn\"]:\n subfolders[:] = []\n continue\n if basename == \"test_package\": # DO NOT export test_package/build folder\n try:\n subfolders.remove(\"build\")\n except:\n pass\n\n relative_path = os.path.relpath(root, src)\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n\n if ignore_case:\n filenames = {f.lower(): f for f in filenames}\n pattern = pattern.lower()\n\n files_to_copy = fnmatch.filter(filenames, pattern)\n if excludes:\n if not isinstance(excludes, (tuple, list)):\n excludes = (excludes, )\n if ignore_case:\n excludes = [e.lower() for e in excludes]\n for exclude in excludes:\n files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n\n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n\n return files_to_copy, linked_folders\n\n @staticmethod\n def _link_folders(src, dst, linked_folders):\n for linked_folder in linked_folders:\n link = os.readlink(os.path.join(src, linked_folder))\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n os.remove(dst_link)\n except OSError:\n pass\n # link is a string relative to linked_folder\n # e.g.: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n # Remove empty links\n for linked_folder in linked_folders:\n dst_link = os.path.join(dst, linked_folder)\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n os.remove(dst_link)\n\n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n \"\"\" executes a multiple file copy from [(src_file, dst_file), (..)]\n managing symlinks if necessary\n \"\"\"\n copied_files = []\n for filename in files:\n abs_src_name = os.path.join(src, filename)\n filename = filename if keep_path else os.path.basename(filename)\n abs_dst_name = os.path.normpath(os.path.join(dst, filename))\n try:\n os.makedirs(os.path.dirname(abs_dst_name))\n except:\n pass\n if symlinks and os.path.islink(abs_src_name):\n linkto = os.readlink(abs_src_name) # @UndefinedVariable\n try:\n os.remove(abs_dst_name)\n except OSError:\n pass\n os.symlink(linkto, abs_dst_name) # @UndefinedVariable\n else:\n shutil.copy2(abs_src_name, abs_dst_name)\n copied_files.append(abs_dst_name)\n return copied_files\n", "path": "conans/client/file_copier.py"}], "after_files": [{"content": "import os\nimport fnmatch\nimport shutil\nfrom collections import defaultdict\n\nfrom conans.util.files import mkdir\n\n\ndef report_copied_files(copied, output):\n ext_files = defaultdict(list)\n for f in copied:\n _, ext = os.path.splitext(f)\n ext_files[ext].append(os.path.basename(f))\n\n if not ext_files:\n return False\n\n for ext, files in ext_files.items():\n files_str = (\", \".join(files)) if len(files) < 5 else \"\"\n file_or_files = \"file\" if len(files) == 1 else \"files\"\n if not ext:\n output.info(\"Copied %d %s: %s\" % (len(files), file_or_files, files_str))\n else:\n output.info(\"Copied %d '%s' %s: %s\" % (len(files), ext, file_or_files, files_str))\n return True\n\n\nclass FileCopier(object):\n \"\"\" main responsible of copying files from place to place:\n package: build folder -> package folder\n imports: package folder -> user folder\n export: user folder -> store \"export\" folder\n \"\"\"\n def __init__(self, root_source_folder, root_destination_folder, excluded=None):\n \"\"\"\n Takes the base folders to copy resources src -> dst. These folders names\n will not be used in the relative names while copying\n param root_source_folder: The base folder to copy things from, typically the\n store build folder\n param root_destination_folder: The base folder to copy things to, typicall the\n store package folder\n \"\"\"\n self._base_src = root_source_folder\n self._base_dst = root_destination_folder\n self._copied = []\n self._excluded = [root_destination_folder]\n if excluded:\n self._excluded.append(excluded)\n\n def report(self, output):\n return report_copied_files(self._copied, output)\n\n def __call__(self, pattern, dst=\"\", src=\"\", keep_path=True, links=False, symlinks=None,\n excludes=None, ignore_case=False):\n \"\"\"\n param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll\n param dst: the destination local folder, wrt to current conanfile dir, to which\n the files will be copied. Eg: \"bin\"\n param src: the source folder in which those files will be searched. This folder\n will be stripped from the dst name. Eg.: lib/Debug/x86\n param keep_path: False if you want the relative paths to be maintained from\n src to dst folders, or just drop. False is useful if you want\n to collect e.g. many *.libs among many dirs into a single\n lib dir\n return: list of copied files\n \"\"\"\n if symlinks is not None:\n links = symlinks\n # Check for ../ patterns and allow them\n if pattern.startswith(\"..\"):\n rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))\n base_src = os.path.dirname(rel_dir)\n pattern = os.path.basename(rel_dir)\n else:\n base_src = self._base_src\n\n src = os.path.join(base_src, src)\n dst = os.path.join(self._base_dst, dst)\n\n files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,\n ignore_case)\n copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)\n self._link_folders(src, dst, link_folders)\n self._copied.extend(files_to_copy)\n return copied_files\n\n def _filter_files(self, src, pattern, links, excludes, ignore_case):\n\n \"\"\" return a list of the files matching the patterns\n The list will be relative path names wrt to the root src folder\n \"\"\"\n filenames = []\n linked_folders = []\n if excludes:\n if not isinstance(excludes, (tuple, list)):\n excludes = (excludes, )\n if ignore_case:\n excludes = [e.lower() for e in excludes]\n else:\n excludes = []\n\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n continue\n\n if links and os.path.islink(root):\n linked_folders.append(os.path.relpath(root, src))\n subfolders[:] = []\n continue\n basename = os.path.basename(root)\n # Skip git or svn subfolders\n if basename in [\".git\", \".svn\"]:\n subfolders[:] = []\n continue\n if basename == \"test_package\": # DO NOT export test_package/build folder\n try:\n subfolders.remove(\"build\")\n except:\n pass\n\n relative_path = os.path.relpath(root, src)\n for exclude in excludes:\n if fnmatch.fnmatch(relative_path, exclude):\n subfolders[:] = []\n files = []\n break\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n\n if ignore_case:\n filenames = {f.lower(): f for f in filenames}\n pattern = pattern.lower()\n\n files_to_copy = fnmatch.filter(filenames, pattern)\n for exclude in excludes:\n files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n\n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n\n return files_to_copy, linked_folders\n\n @staticmethod\n def _link_folders(src, dst, linked_folders):\n created_links = []\n for linked_folder in linked_folders:\n src_link = os.path.join(src, linked_folder)\n # Discard symlinks that go out of the src folder\n abs_path = os.path.realpath(src_link)\n relpath = os.path.relpath(abs_path, src)\n if relpath.startswith(\".\"):\n continue\n \n link = os.readlink(src_link)\n # Absoluted path symlinks are a problem, convert it to relative\n if os.path.isabs(link):\n link = os.path.relpath(link, os.path.dirname(src_link))\n\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n os.remove(dst_link)\n except OSError:\n pass\n # link is a string relative to linked_folder\n # e.g.: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n created_links.append(dst_link)\n # Remove empty links\n for dst_link in created_links:\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n base_path = os.path.dirname(dst_link)\n os.remove(dst_link)\n while base_path.startswith(dst):\n try: # Take advantage that os.rmdir does not delete non-empty dirs\n os.rmdir(base_path)\n except OSError:\n break # not empty\n base_path = os.path.dirname(base_path)\n\n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n \"\"\" executes a multiple file copy from [(src_file, dst_file), (..)]\n managing symlinks if necessary\n \"\"\"\n copied_files = []\n for filename in files:\n abs_src_name = os.path.join(src, filename)\n filename = filename if keep_path else os.path.basename(filename)\n abs_dst_name = os.path.normpath(os.path.join(dst, filename))\n try:\n os.makedirs(os.path.dirname(abs_dst_name))\n except:\n pass\n if symlinks and os.path.islink(abs_src_name):\n linkto = os.readlink(abs_src_name) # @UndefinedVariable\n try:\n os.remove(abs_dst_name)\n except OSError:\n pass\n os.symlink(linkto, abs_dst_name) # @UndefinedVariable\n else:\n shutil.copy2(abs_src_name, abs_dst_name)\n copied_files.append(abs_dst_name)\n return copied_files\n", "path": "conans/client/file_copier.py"}]}
| 2,431 | 848 |
gh_patches_debug_18014
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1606
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tf1 plugin better stream
Hi,
Are you sure there isn't better stream than 360p for TF1 ?
[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct
[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)
I have a far better quality with a web browser than best.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/tf1.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http, useragents
6 from streamlink.stream import HDSStream
7 from streamlink.stream import HLSStream
8
9
10 class TF1(Plugin):
11 url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?")
12 embed_url = "http://www.wat.tv/embedframe/live{0}"
13 embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE)
14 api_url = "http://www.wat.tv/get/{0}/591997"
15 swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf"
16 hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci", "tfx" : "nt1live", "tf1-series-films" : "hd1live" }
17 hls_channel_remap = {"lci": "LCI", "tf1": "V4", "tfx" : "nt1", "tf1-series-films" : "hd1" }
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_hds_streams(self, channel):
24 channel = self.hds_channel_remap.get(channel, "{0}live".format(channel))
25 manifest_url = http.get(self.api_url.format(channel),
26 params={"getURL": 1},
27 headers={"User-Agent": useragents.FIREFOX}).text
28
29 for s in HDSStream.parse_manifest(self.session,
30 manifest_url,
31 pvswf=self.swf_url,
32 headers={"User-Agent": useragents.FIREFOX}).items():
33 yield s
34
35 def _get_hls_streams(self, channel):
36 channel = self.hls_channel_remap.get(channel, channel)
37 embed_url = self.embed_url.format(channel)
38 self.logger.debug("Found embed URL: {0}", embed_url)
39 # page needs to have a mobile user agent
40 embed_page = http.get(embed_url, headers={"User-Agent": useragents.ANDROID})
41
42 m = self.embed_re.search(embed_page.text)
43 if m:
44 hls_stream_url = m.group(1)
45
46 try:
47 for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
48 yield s
49 except Exception:
50 self.logger.error("Failed to load the HLS playlist for {0}", channel)
51
52 def _get_streams(self):
53 m = self.url_re.match(self.url)
54 if m:
55 channel = m.group(1) or m.group(2)
56 self.logger.debug("Found channel {0}", channel)
57 for s in self._get_hds_streams(channel):
58 yield s
59
60 for s in self._get_hls_streams(channel):
61 yield s
62
63
64 __plugin__ = TF1
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py
--- a/src/streamlink/plugins/tf1.py
+++ b/src/streamlink/plugins/tf1.py
@@ -1,6 +1,7 @@
from __future__ import print_function
import re
+from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HDSStream
@@ -41,8 +42,9 @@
m = self.embed_re.search(embed_page.text)
if m:
- hls_stream_url = m.group(1)
-
+ o = urlparse(m.group(1))
+ prms = dict(parse_qsl(o.query))
+ hls_stream_url = "{0}://{1}{2}?hdnea={3}".format(o.scheme, o.netloc, o.path, prms["hdnea"])
try:
for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
yield s
|
{"golden_diff": "diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py\n--- a/src/streamlink/plugins/tf1.py\n+++ b/src/streamlink/plugins/tf1.py\n@@ -1,6 +1,7 @@\n from __future__ import print_function\n import re\n \n+from streamlink.compat import urlparse, parse_qsl\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, useragents\n from streamlink.stream import HDSStream\n@@ -41,8 +42,9 @@\n \n m = self.embed_re.search(embed_page.text)\n if m:\n- hls_stream_url = m.group(1)\n-\n+ o = urlparse(m.group(1))\n+ prms = dict(parse_qsl(o.query))\n+ hls_stream_url = \"{0}://{1}{2}?hdnea={3}\".format(o.scheme, o.netloc, o.path, prms[\"hdnea\"])\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n", "issue": "tf1 plugin better stream\nHi,\r\n\r\nAre you sure there isn't better stream than 360p for TF1 ?\r\n[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct\r\n[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)\r\n\r\nI have a far better quality with a web browser than best.\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import HDSStream\nfrom streamlink.stream import HLSStream\n\n\nclass TF1(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?(?:tf1\\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?\")\n embed_url = \"http://www.wat.tv/embedframe/live{0}\"\n embed_re = re.compile(r\"urlLive.*?:.*?\\\"(http.*?)\\\"\", re.MULTILINE)\n api_url = \"http://www.wat.tv/get/{0}/591997\"\n swf_url = \"http://www.wat.tv/images/v70/PlayerLite.swf\"\n hds_channel_remap = {\"tf1\": \"androidliveconnect\", \"lci\": \"androidlivelci\", \"tfx\" : \"nt1live\", \"tf1-series-films\" : \"hd1live\" }\n hls_channel_remap = {\"lci\": \"LCI\", \"tf1\": \"V4\", \"tfx\" : \"nt1\", \"tf1-series-films\" : \"hd1\" }\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_hds_streams(self, channel):\n channel = self.hds_channel_remap.get(channel, \"{0}live\".format(channel))\n manifest_url = http.get(self.api_url.format(channel),\n params={\"getURL\": 1},\n headers={\"User-Agent\": useragents.FIREFOX}).text\n\n for s in HDSStream.parse_manifest(self.session,\n manifest_url,\n pvswf=self.swf_url,\n headers={\"User-Agent\": useragents.FIREFOX}).items():\n yield s\n\n def _get_hls_streams(self, channel):\n channel = self.hls_channel_remap.get(channel, channel)\n embed_url = self.embed_url.format(channel)\n self.logger.debug(\"Found embed URL: {0}\", embed_url)\n # page needs to have a mobile user agent\n embed_page = http.get(embed_url, headers={\"User-Agent\": useragents.ANDROID})\n\n m = self.embed_re.search(embed_page.text)\n if m:\n hls_stream_url = m.group(1)\n\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n except Exception:\n self.logger.error(\"Failed to load the HLS playlist for {0}\", channel)\n\n def _get_streams(self):\n m = self.url_re.match(self.url)\n if m:\n channel = m.group(1) or m.group(2)\n self.logger.debug(\"Found channel {0}\", channel)\n for s in self._get_hds_streams(channel):\n yield s\n\n for s in self._get_hls_streams(channel):\n yield s\n\n\n__plugin__ = TF1\n", "path": "src/streamlink/plugins/tf1.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.compat import urlparse, parse_qsl\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import HDSStream\nfrom streamlink.stream import HLSStream\n\n\nclass TF1(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?(?:tf1\\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?\")\n embed_url = \"http://www.wat.tv/embedframe/live{0}\"\n embed_re = re.compile(r\"urlLive.*?:.*?\\\"(http.*?)\\\"\", re.MULTILINE)\n api_url = \"http://www.wat.tv/get/{0}/591997\"\n swf_url = \"http://www.wat.tv/images/v70/PlayerLite.swf\"\n hds_channel_remap = {\"tf1\": \"androidliveconnect\", \"lci\": \"androidlivelci\", \"tfx\" : \"nt1live\", \"tf1-series-films\" : \"hd1live\" }\n hls_channel_remap = {\"lci\": \"LCI\", \"tf1\": \"V4\", \"tfx\" : \"nt1\", \"tf1-series-films\" : \"hd1\" }\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_hds_streams(self, channel):\n channel = self.hds_channel_remap.get(channel, \"{0}live\".format(channel))\n manifest_url = http.get(self.api_url.format(channel),\n params={\"getURL\": 1},\n headers={\"User-Agent\": useragents.FIREFOX}).text\n\n for s in HDSStream.parse_manifest(self.session,\n manifest_url,\n pvswf=self.swf_url,\n headers={\"User-Agent\": useragents.FIREFOX}).items():\n yield s\n\n def _get_hls_streams(self, channel):\n channel = self.hls_channel_remap.get(channel, channel)\n embed_url = self.embed_url.format(channel)\n self.logger.debug(\"Found embed URL: {0}\", embed_url)\n # page needs to have a mobile user agent\n embed_page = http.get(embed_url, headers={\"User-Agent\": useragents.ANDROID})\n\n m = self.embed_re.search(embed_page.text)\n if m:\n o = urlparse(m.group(1))\n prms = dict(parse_qsl(o.query))\n hls_stream_url = \"{0}://{1}{2}?hdnea={3}\".format(o.scheme, o.netloc, o.path, prms[\"hdnea\"])\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n except Exception:\n self.logger.error(\"Failed to load the HLS playlist for {0}\", channel)\n\n def _get_streams(self):\n m = self.url_re.match(self.url)\n if m:\n channel = m.group(1) or m.group(2)\n self.logger.debug(\"Found channel {0}\", channel)\n for s in self._get_hds_streams(channel):\n yield s\n\n for s in self._get_hls_streams(channel):\n yield s\n\n\n__plugin__ = TF1\n", "path": "src/streamlink/plugins/tf1.py"}]}
| 1,164 | 230 |
gh_patches_debug_1466
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-1813
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Red input fields on first page load
When loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/users/fields.py`
Content:
```
1 import re
2
3 from django import forms
4 from django.core.exceptions import ValidationError
5 from django.core.validators import EmailValidator
6 from django.forms import widgets
7 from django.utils.translation import ugettext_lazy as _
8
9
10 class CommaSeparatedEmailField(forms.Field):
11 email_validator = EmailValidator(
12 message=_('Please enter correct email addresses, separated by '
13 'commas.')
14 )
15
16 widget = widgets.TextInput(attrs={
17 'placeholder': 'maria@example.com, peter@example.com, '
18 'nicola@example.com,…'
19 })
20
21 def to_python(self, value):
22 if not value:
23 return []
24
25 emails = []
26 for email in value.split(','):
27 email = email.strip()
28 self.email_validator(email)
29 emails.append(email)
30
31 return emails
32
33
34 class EmailFileField(forms.FileField):
35 """Extract emails from uploaded text files."""
36
37 widget = widgets.FileInput
38 # Find possible email strings. Emails may be quoted and separated by
39 # whitespaces, commas, semicolons or < and >.
40 email_regex = re.compile(r'[^\s;,"\'<]+@[^\s;,"\'>]+\.[a-z]{2,}')
41 email_validator = EmailValidator()
42
43 def clean(self, data, initial=None):
44 file = super().clean(data, initial)
45 return self._extract_emails(file)
46
47 def _extract_emails(self, file):
48 if not file:
49 return []
50
51 emails = []
52 for byteline in file:
53 # As it is difficult to guess the correct encoding of a file,
54 # email addresses are restricted to contain only ascii letters.
55 # This works for every encoding which is a superset of ascii like
56 # utf-8 and latin-1. Non ascii chars are simply ignored.
57 line = byteline.decode('ascii', 'ignore')
58 for match in self.email_regex.finditer(line):
59 email = match.group(0)
60 if self.is_valid_email(email):
61 emails.append(email)
62 return emails
63
64 def is_valid_email(self, email):
65 try:
66 self.email_validator(email)
67 return True
68 except ValidationError:
69 return False
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py
--- a/meinberlin/apps/users/fields.py
+++ b/meinberlin/apps/users/fields.py
@@ -14,8 +14,7 @@
)
widget = widgets.TextInput(attrs={
- 'placeholder': 'maria@example.com, peter@example.com, '
- 'nicola@example.com,…'
+ 'placeholder': 'maria@example.com, peter@example.com,…'
})
def to_python(self, value):
|
{"golden_diff": "diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py\n--- a/meinberlin/apps/users/fields.py\n+++ b/meinberlin/apps/users/fields.py\n@@ -14,8 +14,7 @@\n )\n \n widget = widgets.TextInput(attrs={\n- 'placeholder': 'maria@example.com, peter@example.com, '\n- 'nicola@example.com,\u2026'\n+ 'placeholder': 'maria@example.com, peter@example.com,\u2026'\n })\n \n def to_python(self, value):\n", "issue": "Red input fields on first page load\nWhen loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.\r\n\r\n\n", "before_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': 'maria@example.com, peter@example.com, '\n 'nicola@example.com,\u2026'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n", "path": "meinberlin/apps/users/fields.py"}], "after_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': 'maria@example.com, peter@example.com,\u2026'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n", "path": "meinberlin/apps/users/fields.py"}]}
| 966 | 122 |
gh_patches_debug_14599
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2939
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Integrate DRF Spectacular with Mathesar's backend codebase
## Why DRF Spectacular
-DRF Spectacular automatically generates comprehensive documentation based on OpenAPI specification for Mathesar's API.
-It is compatible with Mathesar's Django REST Framework
## Proposed solution
-Add the "drf-spectacular" package to the project's dependencies
-Configure the SPECTACULAR_SETTINGS in the Mathesar's settings file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/common_settings.py`
Content:
```
1 """
2 Base settings to build other settings files upon.
3
4 Generated by 'django-admin startproject' using Django 3.1.7.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from pathlib import Path
15
16 from decouple import Csv, config as decouple_config
17 from dj_database_url import parse as db_url
18
19
20 # We use a 'tuple' with pipes as delimiters as decople naively splits the global
21 # variables on commas when casting to Csv()
22 def pipe_delim(pipe_string):
23 # Remove opening and closing brackets
24 pipe_string = pipe_string[1:-1]
25 # Split on pipe delim
26 return pipe_string.split("|")
27
28
29 # Build paths inside the project like this: BASE_DIR / 'subdir'.
30 BASE_DIR = Path(__file__).resolve().parent.parent.parent
31
32 # Application definition
33
34 INSTALLED_APPS = [
35 "django.contrib.admin",
36 "django.contrib.auth",
37 "django.contrib.contenttypes",
38 "django.contrib.sessions",
39 "django.contrib.messages",
40 "whitenoise.runserver_nostatic",
41 "django.contrib.staticfiles",
42 "rest_framework",
43 "django_filters",
44 "django_property_filter",
45 "mathesar",
46 ]
47
48 MIDDLEWARE = [
49 "django.middleware.security.SecurityMiddleware",
50 "whitenoise.middleware.WhiteNoiseMiddleware",
51 "django.contrib.sessions.middleware.SessionMiddleware",
52 "django.middleware.common.CommonMiddleware",
53 "django.middleware.csrf.CsrfViewMiddleware",
54 "django.contrib.auth.middleware.AuthenticationMiddleware",
55 "django.contrib.messages.middleware.MessageMiddleware",
56 "django.middleware.clickjacking.XFrameOptionsMiddleware",
57 "mathesar.middleware.CursorClosedHandlerMiddleware",
58 "mathesar.middleware.PasswordChangeNeededMiddleware",
59 'django_userforeignkey.middleware.UserForeignKeyMiddleware',
60 'django_request_cache.middleware.RequestCacheMiddleware',
61 ]
62
63 ROOT_URLCONF = "config.urls"
64
65 TEMPLATES = [
66 {
67 "BACKEND": "django.template.backends.django.DjangoTemplates",
68 "DIRS": [],
69 "APP_DIRS": True,
70 "OPTIONS": {
71 "context_processors": [
72 "config.context_processors.frontend_settings",
73 "django.template.context_processors.debug",
74 "django.template.context_processors.request",
75 "django.contrib.auth.context_processors.auth",
76 "django.contrib.messages.context_processors.messages",
77 "mathesar.template_context_processors.base_template_extensions.script_extension_templates"
78 ],
79 },
80 },
81 ]
82
83 WSGI_APPLICATION = "config.wsgi.application"
84
85 # Database
86 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
87
88 # TODO: Add to documentation that database keys should not be than 128 characters.
89
90 # MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'
91 # See pipe_delim above for why we use pipes as delimiters
92 DATABASES = {
93 db_key: db_url(url_string)
94 for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))
95 }
96 DATABASES[decouple_config('DJANGO_DATABASE_KEY', default="default")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)
97
98 for db_key, db_dict in DATABASES.items():
99 # Engine can be '.postgresql' or '.postgresql_psycopg2'
100 if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):
101 raise ValueError(
102 f"{db_key} is not a PostgreSQL database. "
103 f"{db_dict['ENGINE']} found for {db_key}'s engine."
104 )
105
106 # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'
107 # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']
108 TEST = decouple_config('TEST', default=False, cast=bool)
109 if TEST:
110 for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):
111 DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}
112
113
114 # SECURITY WARNING: keep the secret key used in production secret!
115 SECRET_KEY = decouple_config('SECRET_KEY')
116
117 # SECURITY WARNING: don't run with debug turned on in production!
118 DEBUG = decouple_config('DEBUG', default=False, cast=bool)
119
120 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())
121
122 # Password validation
123 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
124
125 AUTH_PASSWORD_VALIDATORS = [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139
140 # Internationalization
141 # https://docs.djangoproject.com/en/3.1/topics/i18n/
142
143 LANGUAGE_CODE = "en-us"
144
145 TIME_ZONE = "UTC"
146
147 USE_I18N = True
148
149 USE_L10N = True
150
151 USE_TZ = True
152
153 # Static files (CSS, JavaScript, Images)
154 # https://docs.djangoproject.com/en/3.1/howto/static-files/
155 # https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/
156
157 STATIC_URL = "/static/"
158
159 # When running with DEBUG=False, the webserver needs to serve files from this location
160 # python manage.py collectstatic has to be run to collect all static files into this location
161 # The files need to served in brotli or gzip compressed format
162 STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
163
164 # Media files (uploaded by the user)
165
166 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')
167
168 MEDIA_URL = "/media/"
169
170 # Update Authentication classes, removed BasicAuthentication
171 # Defaults: https://www.django-rest-framework.org/api-guide/settings/
172 REST_FRAMEWORK = {
173 'DEFAULT_AUTHENTICATION_CLASSES': [
174 'rest_framework.authentication.TokenAuthentication',
175 'rest_framework.authentication.SessionAuthentication'
176 ],
177 'DEFAULT_PERMISSION_CLASSES': [
178 'rest_framework.permissions.IsAuthenticated',
179 ],
180 'DEFAULT_FILTER_BACKENDS': (
181 'django_filters.rest_framework.DjangoFilterBackend',
182 'rest_framework.filters.OrderingFilter',
183 ),
184 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
185 'EXCEPTION_HANDLER':
186 'mathesar.exception_handlers.mathesar_exception_handler'
187 }
188 FRIENDLY_ERRORS = {
189 'FIELD_ERRORS': {
190 # By default drf-friendly-errors does contain error codes for ListSerializer type
191 'ListSerializer': {
192 'required': 2007,
193 'null': 2027,
194 'invalid_choice': 2083,
195 'not_a_list': 2123,
196 'empty': 2093
197 },
198 'PermittedPkRelatedField': {
199 'required': 2007,
200 'null': 2027,
201 'does_not_exist': 2151,
202 'incorrect_type': 2161
203 },
204 'PermittedSlugRelatedField': {
205 'required': 2007, 'invalid': 2002, 'null': 2027,
206 'does_not_exist': 2151, 'incorrect_type': 2161
207 },
208 },
209 'EXCEPTION_DICT': {
210 'Http404': 4005
211 }
212 }
213 # Mathesar settings
214 MATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')
215 MATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')
216 MATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')
217 MATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'
218 MATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')
219 MATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)
220 MATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')
221
222 # UI source files have to be served by Django in order for static assets to be included during dev mode
223 # https://vitejs.dev/guide/assets.html
224 # https://vitejs.dev/guide/backend-integration.html
225 STATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]
226 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
227
228 # Accounts
229 AUTH_USER_MODEL = 'mathesar.User'
230 LOGIN_URL = '/auth/login/'
231 LOGIN_REDIRECT_URL = '/'
232 LOGOUT_REDIRECT_URL = LOGIN_URL
233 DRF_ACCESS_POLICY = {
234 'reusable_conditions': ['mathesar.api.permission_conditions']
235 }
236 # List of Template names that contains additional script tags to be added to the base template
237 BASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py
--- a/config/settings/common_settings.py
+++ b/config/settings/common_settings.py
@@ -42,6 +42,7 @@
"rest_framework",
"django_filters",
"django_property_filter",
+ "drf_spectacular",
"mathesar",
]
@@ -183,7 +184,15 @@
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'EXCEPTION_HANDLER':
- 'mathesar.exception_handlers.mathesar_exception_handler'
+ 'mathesar.exception_handlers.mathesar_exception_handler',
+ 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'
+}
+SPECTACULAR_SETTINGS = {
+ 'TITLE': 'Mathesar API',
+ 'DESCRIPTION': '',
+ 'VERSION': '1.0.0',
+ 'SERVE_INCLUDE_SCHEMA': False,
+ # OTHER SETTINGS
}
FRIENDLY_ERRORS = {
'FIELD_ERRORS': {
|
{"golden_diff": "diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py\n--- a/config/settings/common_settings.py\n+++ b/config/settings/common_settings.py\n@@ -42,6 +42,7 @@\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n+ \"drf_spectacular\",\n \"mathesar\",\n ]\n \n@@ -183,7 +184,15 @@\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n- 'mathesar.exception_handlers.mathesar_exception_handler'\n+ 'mathesar.exception_handlers.mathesar_exception_handler',\n+ 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'\n+}\n+SPECTACULAR_SETTINGS = {\n+ 'TITLE': 'Mathesar API',\n+ 'DESCRIPTION': '',\n+ 'VERSION': '1.0.0',\n+ 'SERVE_INCLUDE_SCHEMA': False,\n+ # OTHER SETTINGS\n }\n FRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n", "issue": "Integrate DRF Spectacular with Mathesar's backend codebase\n## Why DRF Spectacular\r\n-DRF Spectacular automatically generates comprehensive documentation based on OpenAPI specification for Mathesar's API. \r\n-It is compatible with Mathesar's Django REST Framework\r\n## Proposed solution\r\n-Add the \"drf-spectacular\" package to the project's dependencies\r\n-Configure the SPECTACULAR_SETTINGS in the Mathesar's settings file \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBase settings to build other settings files upon.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"mathesar.middleware.CursorClosedHandlerMiddleware\",\n \"mathesar.middleware.PasswordChangeNeededMiddleware\",\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\n 'django_request_cache.middleware.RequestCacheMiddleware',\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"mathesar.template_context_processors.base_template_extensions.script_extension_templates\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY', default=\"default\")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nTEST = decouple_config('TEST', default=False, cast=bool)\nif TEST:\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n 'mathesar.exception_handlers.mathesar_exception_handler'\n}\nFRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n # By default drf-friendly-errors does contain error codes for ListSerializer type\n 'ListSerializer': {\n 'required': 2007,\n 'null': 2027,\n 'invalid_choice': 2083,\n 'not_a_list': 2123,\n 'empty': 2093\n },\n 'PermittedPkRelatedField': {\n 'required': 2007,\n 'null': 2027,\n 'does_not_exist': 2151,\n 'incorrect_type': 2161\n },\n 'PermittedSlugRelatedField': {\n 'required': 2007, 'invalid': 2002, 'null': 2027,\n 'does_not_exist': 2151, 'incorrect_type': 2161\n },\n },\n 'EXCEPTION_DICT': {\n 'Http404': 4005\n }\n}\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\nMATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')\nMATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)\nMATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')\n\n# UI source files have to be served by Django in order for static assets to be included during dev mode\n# https://vitejs.dev/guide/assets.html\n# https://vitejs.dev/guide/backend-integration.html\nSTATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Accounts\nAUTH_USER_MODEL = 'mathesar.User'\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = LOGIN_URL\nDRF_ACCESS_POLICY = {\n 'reusable_conditions': ['mathesar.api.permission_conditions']\n}\n# List of Template names that contains additional script tags to be added to the base template\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []\n", "path": "config/settings/common_settings.py"}], "after_files": [{"content": "\"\"\"\nBase settings to build other settings files upon.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"drf_spectacular\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"mathesar.middleware.CursorClosedHandlerMiddleware\",\n \"mathesar.middleware.PasswordChangeNeededMiddleware\",\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\n 'django_request_cache.middleware.RequestCacheMiddleware',\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"mathesar.template_context_processors.base_template_extensions.script_extension_templates\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY', default=\"default\")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nTEST = decouple_config('TEST', default=False, cast=bool)\nif TEST:\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n 'mathesar.exception_handlers.mathesar_exception_handler',\n 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'\n}\nSPECTACULAR_SETTINGS = {\n 'TITLE': 'Mathesar API',\n 'DESCRIPTION': '',\n 'VERSION': '1.0.0',\n 'SERVE_INCLUDE_SCHEMA': False,\n # OTHER SETTINGS\n}\nFRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n # By default drf-friendly-errors does contain error codes for ListSerializer type\n 'ListSerializer': {\n 'required': 2007,\n 'null': 2027,\n 'invalid_choice': 2083,\n 'not_a_list': 2123,\n 'empty': 2093\n },\n 'PermittedPkRelatedField': {\n 'required': 2007,\n 'null': 2027,\n 'does_not_exist': 2151,\n 'incorrect_type': 2161\n },\n 'PermittedSlugRelatedField': {\n 'required': 2007, 'invalid': 2002, 'null': 2027,\n 'does_not_exist': 2151, 'incorrect_type': 2161\n },\n },\n 'EXCEPTION_DICT': {\n 'Http404': 4005\n }\n}\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\nMATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')\nMATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)\nMATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')\n\n# UI source files have to be served by Django in order for static assets to be included during dev mode\n# https://vitejs.dev/guide/assets.html\n# https://vitejs.dev/guide/backend-integration.html\nSTATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Accounts\nAUTH_USER_MODEL = 'mathesar.User'\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = LOGIN_URL\nDRF_ACCESS_POLICY = {\n 'reusable_conditions': ['mathesar.api.permission_conditions']\n}\n# List of Template names that contains additional script tags to be added to the base template\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []\n", "path": "config/settings/common_settings.py"}]}
| 2,954 | 215 |
gh_patches_debug_7117
|
rasdani/github-patches
|
git_diff
|
napari__napari-413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Massive traceback when deleting a (markers) layer
## 🐛 Bug
Sorry, no time to investigate now, but here is the traceback I saw when deleting a markers layer (using the trash icon):
<details>
```pytb
In [15]: WARNING: Traceback (most recent call last):
File "/Users/jni/conda/envs/36/bin/ipython", line 11, in <module>
sys.exit(start_ipython())
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/__init__.py", line 125, in start_ipython
return launch_new_instance(argv=argv, **kwargs)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/ipapp.py", line 356, in start
self.shell.mainloop()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 480, in mainloop
self.interact()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 463, in interact
code = self.prompt_for_code()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 376, in prompt_for_code
pre_run=self.pre_prompt, reset_current_buffer=True)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 415, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks())
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py", line 102, in run
self._inputhook_context.call_inputhook(ready)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/inputhook.py", line 74, in call_inputhook
self.inputhook(self)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 495, in inputhook
self._inputhook(context)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/pt_inputhooks/qt.py", line 35, in inputhook
event_loop.exec_()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py", line 182, in remove_selected
self.pop(i)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/list/_model.py", line 55, in pop
self.changed.removed(item=obj)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py", line 489, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py", line 504, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py", line 29, in _remove
layer.viewer = None
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/model.py", line 162, in viewer
self._parent = parent
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/_visual_wrapper.py", line 82, in _parent
self._node.parent = parent
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/frozen.py", line 17, in __setattr__
object.__setattr__(self, key, value)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/scene/node.py", line 209, in parent
self._update_trsys(None)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/_vispy/scene/visuals.py", line 82, in _update_trsys
self.transforms.scene_transform = scene.node_transform(doc)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/transform_system.py", line 276, in scene_transform
self._scene_transform.transforms = tr
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 96, in transforms
self.update()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py", line 153, in update
self.changed(*args)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 455, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 471, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 212, in _subtr_changed
self.update(ev)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py", line 153, in update
self.changed(*args)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 455, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 475, in _invoke_callback
self, cb_event=(cb, event))
<< caught exception here: >>
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 471, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 281, in source_changed
new_tr = [tr[0]]
IndexError: list index out of range
ERROR: Invoking <bound method SimplifiedChainTransform.source_changed of <ChainTransform [<STTransform scale=[1. 1. 1. 1.] translate=[0. 0. 0. 0.] at 0x5071730616>,
<STTransform scale=[2.6181054e+00 2.6181054e+00 1.0000000e-06 1.0000000e+00] translate=[-558.83954 -403.84387 0. 0. ] at 0x5319047544>] at 0x12e27aba8>> for Event
```
</details>
## To Reproduce
Approximately: (not sure which bits are important, maybe only the final step matters)
- add a (3D) image
- add a second image (segmentation in this case)
- decrease opacity of second image
- add a markers layer (with the + button)
- set annotation mode
- pan and zoom while holding down space
- hit the delete button.
## Expected behavior
Console should stay nice and clean when layers are deleted. ;)
## Environment
- napari Version (e.g., 1.0): 0.0.6 (from PyPI)
- OS (e.g., Linux): macOS Mojave
- Python version: 3.6
- Any other relevant information:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/components/layerlist.py`
Content:
```
1 from ..layers import Layer
2
3 from ..util.naming import inc_name_count
4 from ..util.list import ListModel
5
6
7 def _add(event):
8 """When a layer is added, set its name and order."""
9 layers = event.source
10 layer = event.item
11 layer.name = layers._coerce_name(layer.name, layer)
12 layer._order = -len(layers)
13 layer.events.name.connect(lambda e: layers._update_name(e))
14 layers.unselect_all(ignore=layer)
15
16
17 def _remove(event):
18 """When a layer is removed, remove its viewer."""
19 layers = event.source
20 layer = event.item
21 layer._order = 0
22 layer._node.parent = None
23
24
25 def _reorder(event):
26 """When the list is reordered, propagate those changes to draw order."""
27 layers = event.source
28 for i in range(len(layers)):
29 layers[i]._order = -i
30
31
32 class LayerList(ListModel):
33 """List-like layer collection with built-in reordering and callback hooks.
34
35 Attributes
36 ----------
37 events : vispy.util.event.EmitterGroup
38 Event hooks:
39 * added(item, index): whenever an item is added
40 * removed(item): whenever an item is removed
41 * reordered(): whenever the list is reordered
42 """
43
44 def __init__(self):
45 super().__init__(
46 basetype=Layer, lookup={str: lambda q, e: q == e.name}
47 )
48
49 self.events.added.connect(_add)
50 self.events.removed.connect(_remove)
51 self.events.reordered.connect(_reorder)
52
53 def __newlike__(self, iterable):
54 return ListModel(self._basetype, iterable, self._lookup)
55
56 def _coerce_name(self, name, layer=None):
57 """Coerce a name into a unique equivalent.
58
59 Parameters
60 ----------
61 name : str
62 Original name.
63 layer : Layer, optional
64 Layer for which name is generated.
65
66 Returns
67 -------
68 new_name : str
69 Coerced, unique name.
70 """
71 for l in self:
72 if l is layer:
73 continue
74 if l.name == name:
75 name = inc_name_count(name)
76
77 return name
78
79 def _update_name(self, event):
80 """Coerce name of the layer in `event.layer`."""
81 layer = event.source
82 layer.name = self._coerce_name(layer.name, layer)
83
84 def move_selected(self, index, insert):
85 """Reorder list by moving the item at index and inserting it
86 at the insert index. If additional items are selected these will
87 get inserted at the insert index too. This allows for rearranging
88 the list based on dragging and dropping a selection of items, where
89 index is the index of the primary item being dragged, and insert is
90 the index of the drop location, and the selection indicates if
91 multiple items are being dragged. If the moved layer is not selected
92 select it.
93
94 Parameters
95 ----------
96 index : int
97 Index of primary item to be moved
98 insert : int
99 Index that item(s) will be inserted at
100 """
101 total = len(self)
102 indices = list(range(total))
103 if not self[index].selected:
104 self.unselect_all()
105 self[index].selected = True
106 selected = [i for i in range(total) if self[i].selected]
107
108 # remove all indices to be moved
109 for i in selected:
110 indices.remove(i)
111 # adjust offset based on selected indices to move
112 offset = sum([i < insert and i != index for i in selected])
113 # insert indices to be moved at correct start
114 for insert_idx, elem_idx in enumerate(selected, start=insert - offset):
115 indices.insert(insert_idx, elem_idx)
116 # reorder list
117 self[:] = self[tuple(indices)]
118
119 def unselect_all(self, ignore=None):
120 """Unselects all layers expect any specified in ignore.
121
122 Parameters
123 ----------
124 ignore : Layer | None
125 Layer that should not be unselected if specified.
126 """
127 for layer in self:
128 if layer.selected and layer != ignore:
129 layer.selected = False
130
131 def remove_selected(self):
132 """Removes selected items from list.
133 """
134 to_delete = []
135 for i in range(len(self)):
136 if self[i].selected:
137 to_delete.append(i)
138 to_delete.reverse()
139 for i in to_delete:
140 self.pop(i)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/components/layerlist.py b/napari/components/layerlist.py
--- a/napari/components/layerlist.py
+++ b/napari/components/layerlist.py
@@ -2,6 +2,7 @@
from ..util.naming import inc_name_count
from ..util.list import ListModel
+from vispy.visuals.transforms import ChainTransform
def _add(event):
@@ -19,6 +20,7 @@
layers = event.source
layer = event.item
layer._order = 0
+ layer._node.transforms = ChainTransform()
layer._node.parent = None
|
{"golden_diff": "diff --git a/napari/components/layerlist.py b/napari/components/layerlist.py\n--- a/napari/components/layerlist.py\n+++ b/napari/components/layerlist.py\n@@ -2,6 +2,7 @@\n \n from ..util.naming import inc_name_count\n from ..util.list import ListModel\n+from vispy.visuals.transforms import ChainTransform\n \n \n def _add(event):\n@@ -19,6 +20,7 @@\n layers = event.source\n layer = event.item\n layer._order = 0\n+ layer._node.transforms = ChainTransform()\n layer._node.parent = None\n", "issue": "Massive traceback when deleting a (markers) layer\n## \ud83d\udc1b Bug\r\n\r\nSorry, no time to investigate now, but here is the traceback I saw when deleting a markers layer (using the trash icon):\r\n\r\n<details>\r\n\r\n```pytb\r\nIn [15]: WARNING: Traceback (most recent call last):\r\n File \"/Users/jni/conda/envs/36/bin/ipython\", line 11, in <module>\r\n sys.exit(start_ipython())\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/__init__.py\", line 125, in start_ipython\r\n return launch_new_instance(argv=argv, **kwargs)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\r\n app.start()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/ipapp.py\", line 356, in start\r\n self.shell.mainloop()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 480, in mainloop\r\n self.interact()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 463, in interact\r\n code = self.prompt_for_code()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 376, in prompt_for_code\r\n pre_run=self.pre_prompt, reset_current_buffer=True)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/interface.py\", line 415, in run\r\n self.eventloop.run(self.input, self.create_eventloop_callbacks())\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py\", line 102, in run\r\n self._inputhook_context.call_inputhook(ready)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/inputhook.py\", line 74, in call_inputhook\r\n self.inputhook(self)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 495, in inputhook\r\n self._inputhook(context)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/pt_inputhooks/qt.py\", line 35, in inputhook\r\n event_loop.exec_()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py\", line 182, in remove_selected\r\n self.pop(i)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/list/_model.py\", line 55, in pop\r\n self.changed.removed(item=obj)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py\", line 489, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py\", line 504, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py\", line 29, in _remove\r\n layer.viewer = None\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/model.py\", line 162, in viewer\r\n self._parent = parent\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/_visual_wrapper.py\", line 82, in _parent\r\n self._node.parent = parent\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/frozen.py\", line 17, in __setattr__\r\n object.__setattr__(self, key, value)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/scene/node.py\", line 209, in parent\r\n self._update_trsys(None)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/_vispy/scene/visuals.py\", line 82, in _update_trsys\r\n self.transforms.scene_transform = scene.node_transform(doc)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/transform_system.py\", line 276, in scene_transform\r\n self._scene_transform.transforms = tr\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 96, in transforms\r\n self.update()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py\", line 153, in update\r\n self.changed(*args)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 455, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 471, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 212, in _subtr_changed\r\n self.update(ev)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py\", line 153, in update\r\n self.changed(*args)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 455, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 475, in _invoke_callback\r\n self, cb_event=(cb, event))\r\n << caught exception here: >>\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 471, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 281, in source_changed\r\n new_tr = [tr[0]]\r\nIndexError: list index out of range\r\nERROR: Invoking <bound method SimplifiedChainTransform.source_changed of <ChainTransform [<STTransform scale=[1. 1. 1. 1.] translate=[0. 0. 0. 0.] at 0x5071730616>,\r\n <STTransform scale=[2.6181054e+00 2.6181054e+00 1.0000000e-06 1.0000000e+00] translate=[-558.83954 -403.84387 0. 0. ] at 0x5319047544>] at 0x12e27aba8>> for Event\r\n```\r\n\r\n</details>\r\n\r\n## To Reproduce\r\n\r\nApproximately: (not sure which bits are important, maybe only the final step matters)\r\n- add a (3D) image\r\n- add a second image (segmentation in this case)\r\n- decrease opacity of second image\r\n- add a markers layer (with the + button)\r\n- set annotation mode\r\n- pan and zoom while holding down space\r\n- hit the delete button.\r\n\r\n## Expected behavior\r\n\r\nConsole should stay nice and clean when layers are deleted. ;)\r\n\r\n## Environment\r\n\r\n - napari Version (e.g., 1.0): 0.0.6 (from PyPI)\r\n - OS (e.g., Linux): macOS Mojave\r\n - Python version: 3.6\r\n - Any other relevant information:\r\n\n", "before_files": [{"content": "from ..layers import Layer\n\nfrom ..util.naming import inc_name_count\nfrom ..util.list import ListModel\n\n\ndef _add(event):\n \"\"\"When a layer is added, set its name and order.\"\"\"\n layers = event.source\n layer = event.item\n layer.name = layers._coerce_name(layer.name, layer)\n layer._order = -len(layers)\n layer.events.name.connect(lambda e: layers._update_name(e))\n layers.unselect_all(ignore=layer)\n\n\ndef _remove(event):\n \"\"\"When a layer is removed, remove its viewer.\"\"\"\n layers = event.source\n layer = event.item\n layer._order = 0\n layer._node.parent = None\n\n\ndef _reorder(event):\n \"\"\"When the list is reordered, propagate those changes to draw order.\"\"\"\n layers = event.source\n for i in range(len(layers)):\n layers[i]._order = -i\n\n\nclass LayerList(ListModel):\n \"\"\"List-like layer collection with built-in reordering and callback hooks.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Event hooks:\n * added(item, index): whenever an item is added\n * removed(item): whenever an item is removed\n * reordered(): whenever the list is reordered\n \"\"\"\n\n def __init__(self):\n super().__init__(\n basetype=Layer, lookup={str: lambda q, e: q == e.name}\n )\n\n self.events.added.connect(_add)\n self.events.removed.connect(_remove)\n self.events.reordered.connect(_reorder)\n\n def __newlike__(self, iterable):\n return ListModel(self._basetype, iterable, self._lookup)\n\n def _coerce_name(self, name, layer=None):\n \"\"\"Coerce a name into a unique equivalent.\n\n Parameters\n ----------\n name : str\n Original name.\n layer : Layer, optional\n Layer for which name is generated.\n\n Returns\n -------\n new_name : str\n Coerced, unique name.\n \"\"\"\n for l in self:\n if l is layer:\n continue\n if l.name == name:\n name = inc_name_count(name)\n\n return name\n\n def _update_name(self, event):\n \"\"\"Coerce name of the layer in `event.layer`.\"\"\"\n layer = event.source\n layer.name = self._coerce_name(layer.name, layer)\n\n def move_selected(self, index, insert):\n \"\"\"Reorder list by moving the item at index and inserting it\n at the insert index. If additional items are selected these will\n get inserted at the insert index too. This allows for rearranging\n the list based on dragging and dropping a selection of items, where\n index is the index of the primary item being dragged, and insert is\n the index of the drop location, and the selection indicates if\n multiple items are being dragged. If the moved layer is not selected\n select it.\n\n Parameters\n ----------\n index : int\n Index of primary item to be moved\n insert : int\n Index that item(s) will be inserted at\n \"\"\"\n total = len(self)\n indices = list(range(total))\n if not self[index].selected:\n self.unselect_all()\n self[index].selected = True\n selected = [i for i in range(total) if self[i].selected]\n\n # remove all indices to be moved\n for i in selected:\n indices.remove(i)\n # adjust offset based on selected indices to move\n offset = sum([i < insert and i != index for i in selected])\n # insert indices to be moved at correct start\n for insert_idx, elem_idx in enumerate(selected, start=insert - offset):\n indices.insert(insert_idx, elem_idx)\n # reorder list\n self[:] = self[tuple(indices)]\n\n def unselect_all(self, ignore=None):\n \"\"\"Unselects all layers expect any specified in ignore.\n\n Parameters\n ----------\n ignore : Layer | None\n Layer that should not be unselected if specified.\n \"\"\"\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False\n\n def remove_selected(self):\n \"\"\"Removes selected items from list.\n \"\"\"\n to_delete = []\n for i in range(len(self)):\n if self[i].selected:\n to_delete.append(i)\n to_delete.reverse()\n for i in to_delete:\n self.pop(i)\n", "path": "napari/components/layerlist.py"}], "after_files": [{"content": "from ..layers import Layer\n\nfrom ..util.naming import inc_name_count\nfrom ..util.list import ListModel\nfrom vispy.visuals.transforms import ChainTransform\n\n\ndef _add(event):\n \"\"\"When a layer is added, set its name and order.\"\"\"\n layers = event.source\n layer = event.item\n layer.name = layers._coerce_name(layer.name, layer)\n layer._order = -len(layers)\n layer.events.name.connect(lambda e: layers._update_name(e))\n layers.unselect_all(ignore=layer)\n\n\ndef _remove(event):\n \"\"\"When a layer is removed, remove its viewer.\"\"\"\n layers = event.source\n layer = event.item\n layer._order = 0\n layer._node.transforms = ChainTransform()\n layer._node.parent = None\n\n\ndef _reorder(event):\n \"\"\"When the list is reordered, propagate those changes to draw order.\"\"\"\n layers = event.source\n for i in range(len(layers)):\n layers[i]._order = -i\n\n\nclass LayerList(ListModel):\n \"\"\"List-like layer collection with built-in reordering and callback hooks.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Event hooks:\n * added(item, index): whenever an item is added\n * removed(item): whenever an item is removed\n * reordered(): whenever the list is reordered\n \"\"\"\n\n def __init__(self):\n super().__init__(\n basetype=Layer, lookup={str: lambda q, e: q == e.name}\n )\n\n self.events.added.connect(_add)\n self.events.removed.connect(_remove)\n self.events.reordered.connect(_reorder)\n\n def __newlike__(self, iterable):\n return ListModel(self._basetype, iterable, self._lookup)\n\n def _coerce_name(self, name, layer=None):\n \"\"\"Coerce a name into a unique equivalent.\n\n Parameters\n ----------\n name : str\n Original name.\n layer : Layer, optional\n Layer for which name is generated.\n\n Returns\n -------\n new_name : str\n Coerced, unique name.\n \"\"\"\n for l in self:\n if l is layer:\n continue\n if l.name == name:\n name = inc_name_count(name)\n\n return name\n\n def _update_name(self, event):\n \"\"\"Coerce name of the layer in `event.layer`.\"\"\"\n layer = event.source\n layer.name = self._coerce_name(layer.name, layer)\n\n def move_selected(self, index, insert):\n \"\"\"Reorder list by moving the item at index and inserting it\n at the insert index. If additional items are selected these will\n get inserted at the insert index too. This allows for rearranging\n the list based on dragging and dropping a selection of items, where\n index is the index of the primary item being dragged, and insert is\n the index of the drop location, and the selection indicates if\n multiple items are being dragged. If the moved layer is not selected\n select it.\n\n Parameters\n ----------\n index : int\n Index of primary item to be moved\n insert : int\n Index that item(s) will be inserted at\n \"\"\"\n total = len(self)\n indices = list(range(total))\n if not self[index].selected:\n self.unselect_all()\n self[index].selected = True\n selected = [i for i in range(total) if self[i].selected]\n\n # remove all indices to be moved\n for i in selected:\n indices.remove(i)\n # adjust offset based on selected indices to move\n offset = sum([i < insert and i != index for i in selected])\n # insert indices to be moved at correct start\n for insert_idx, elem_idx in enumerate(selected, start=insert - offset):\n indices.insert(insert_idx, elem_idx)\n # reorder list\n self[:] = self[tuple(indices)]\n\n def unselect_all(self, ignore=None):\n \"\"\"Unselects all layers expect any specified in ignore.\n\n Parameters\n ----------\n ignore : Layer | None\n Layer that should not be unselected if specified.\n \"\"\"\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False\n\n def remove_selected(self):\n \"\"\"Removes selected items from list.\n \"\"\"\n to_delete = []\n for i in range(len(self)):\n if self[i].selected:\n to_delete.append(i)\n to_delete.reverse()\n for i in to_delete:\n self.pop(i)\n", "path": "napari/components/layerlist.py"}]}
| 3,508 | 140 |
gh_patches_debug_7715
|
rasdani/github-patches
|
git_diff
|
pallets__click-787
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make_pass_decorator strips argument
The inner most wrapper of make_pass_decorator invokes a function with `*args[1:]` which strips an argument.
A smaller example could be made but the following is the [invoking other commands](http://click.pocoo.org/6/advanced/#invoking-other-commands) example from the docs modified to also take in a custom config object.
The only difference between `dist_working` and `dist_broken` is the order of the decorators and consequently the parameters they take.
In my opinion they should both be valid and work. What happens in `dist_broken` is, like the title says, the `make_pass_decorator` wrapper invokes the function with `*args[1:]` which removes the context object put there by the previous decorator.
```python
import click
class MyConfig(object):
def __init__(self, verbose):
self.verbose = verbose
pass_config = click.make_pass_decorator(MyConfig)
@click.group()
@click.option('--verbose', '-v', is_flag=True)
@click.pass_context
def cli(ctx, verbose):
ctx.obj = MyConfig(verbose=verbose)
@cli.command()
@click.option('--count', default=1)
def test(count):
click.echo('Count: %d' % count)
@cli.command('dist-working')
@click.option('--count', default=1)
@pass_config
@click.pass_context
def dist_working(ctx, cfg, count):
if cfg.verbose:
click.echo('about to forward')
ctx.forward(test)
if cfg.verbose:
click.echo('about to invoke')
ctx.invoke(test, count=42)
@cli.command('dist-broken')
@click.option('--count', default=1)
@click.pass_context
@pass_config
def dist_broken(cfg, ctx, count):
if cfg.verbose:
click.echo('about to forward')
ctx.forward(test)
if cfg.verbose:
click.echo('about to invoke')
ctx.invoke(test, count=42)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `click/decorators.py`
Content:
```
1 import sys
2 import inspect
3
4 from functools import update_wrapper
5
6 from ._compat import iteritems
7 from ._unicodefun import _check_for_unicode_literals
8 from .utils import echo
9 from .globals import get_current_context
10
11
12 def pass_context(f):
13 """Marks a callback as wanting to receive the current context
14 object as first argument.
15 """
16 def new_func(*args, **kwargs):
17 return f(get_current_context(), *args, **kwargs)
18 return update_wrapper(new_func, f)
19
20
21 def pass_obj(f):
22 """Similar to :func:`pass_context`, but only pass the object on the
23 context onwards (:attr:`Context.obj`). This is useful if that object
24 represents the state of a nested system.
25 """
26 def new_func(*args, **kwargs):
27 return f(get_current_context().obj, *args, **kwargs)
28 return update_wrapper(new_func, f)
29
30
31 def make_pass_decorator(object_type, ensure=False):
32 """Given an object type this creates a decorator that will work
33 similar to :func:`pass_obj` but instead of passing the object of the
34 current context, it will find the innermost context of type
35 :func:`object_type`.
36
37 This generates a decorator that works roughly like this::
38
39 from functools import update_wrapper
40
41 def decorator(f):
42 @pass_context
43 def new_func(ctx, *args, **kwargs):
44 obj = ctx.find_object(object_type)
45 return ctx.invoke(f, obj, *args, **kwargs)
46 return update_wrapper(new_func, f)
47 return decorator
48
49 :param object_type: the type of the object to pass.
50 :param ensure: if set to `True`, a new object will be created and
51 remembered on the context if it's not there yet.
52 """
53 def decorator(f):
54 def new_func(*args, **kwargs):
55 ctx = get_current_context()
56 if ensure:
57 obj = ctx.ensure_object(object_type)
58 else:
59 obj = ctx.find_object(object_type)
60 if obj is None:
61 raise RuntimeError('Managed to invoke callback without a '
62 'context object of type %r existing'
63 % object_type.__name__)
64 return ctx.invoke(f, obj, *args[1:], **kwargs)
65 return update_wrapper(new_func, f)
66 return decorator
67
68
69 def _make_command(f, name, attrs, cls):
70 if isinstance(f, Command):
71 raise TypeError('Attempted to convert a callback into a '
72 'command twice.')
73 try:
74 params = f.__click_params__
75 params.reverse()
76 del f.__click_params__
77 except AttributeError:
78 params = []
79 help = attrs.get('help')
80 if help is None:
81 help = inspect.getdoc(f)
82 if isinstance(help, bytes):
83 help = help.decode('utf-8')
84 else:
85 help = inspect.cleandoc(help)
86 attrs['help'] = help
87 _check_for_unicode_literals()
88 return cls(name=name or f.__name__.lower().replace('_', '-'),
89 callback=f, params=params, **attrs)
90
91
92 def command(name=None, cls=None, **attrs):
93 r"""Creates a new :class:`Command` and uses the decorated function as
94 callback. This will also automatically attach all decorated
95 :func:`option`\s and :func:`argument`\s as parameters to the command.
96
97 The name of the command defaults to the name of the function. If you
98 want to change that, you can pass the intended name as the first
99 argument.
100
101 All keyword arguments are forwarded to the underlying command class.
102
103 Once decorated the function turns into a :class:`Command` instance
104 that can be invoked as a command line utility or be attached to a
105 command :class:`Group`.
106
107 :param name: the name of the command. This defaults to the function
108 name with underscores replaced by dashes.
109 :param cls: the command class to instantiate. This defaults to
110 :class:`Command`.
111 """
112 if cls is None:
113 cls = Command
114 def decorator(f):
115 cmd = _make_command(f, name, attrs, cls)
116 cmd.__doc__ = f.__doc__
117 return cmd
118 return decorator
119
120
121 def group(name=None, **attrs):
122 """Creates a new :class:`Group` with a function as callback. This
123 works otherwise the same as :func:`command` just that the `cls`
124 parameter is set to :class:`Group`.
125 """
126 attrs.setdefault('cls', Group)
127 return command(name, **attrs)
128
129
130 def _param_memo(f, param):
131 if isinstance(f, Command):
132 f.params.append(param)
133 else:
134 if not hasattr(f, '__click_params__'):
135 f.__click_params__ = []
136 f.__click_params__.append(param)
137
138
139 def argument(*param_decls, **attrs):
140 """Attaches an argument to the command. All positional arguments are
141 passed as parameter declarations to :class:`Argument`; all keyword
142 arguments are forwarded unchanged (except ``cls``).
143 This is equivalent to creating an :class:`Argument` instance manually
144 and attaching it to the :attr:`Command.params` list.
145
146 :param cls: the argument class to instantiate. This defaults to
147 :class:`Argument`.
148 """
149 def decorator(f):
150 ArgumentClass = attrs.pop('cls', Argument)
151 _param_memo(f, ArgumentClass(param_decls, **attrs))
152 return f
153 return decorator
154
155
156 def option(*param_decls, **attrs):
157 """Attaches an option to the command. All positional arguments are
158 passed as parameter declarations to :class:`Option`; all keyword
159 arguments are forwarded unchanged (except ``cls``).
160 This is equivalent to creating an :class:`Option` instance manually
161 and attaching it to the :attr:`Command.params` list.
162
163 :param cls: the option class to instantiate. This defaults to
164 :class:`Option`.
165 """
166 def decorator(f):
167 # Issue 926, copy attrs, so pre-defined options can re-use the same cls=
168 option_attrs = attrs.copy()
169
170 if 'help' in option_attrs:
171 option_attrs['help'] = inspect.cleandoc(option_attrs['help'])
172 OptionClass = option_attrs.pop('cls', Option)
173 _param_memo(f, OptionClass(param_decls, **option_attrs))
174 return f
175 return decorator
176
177
178 def confirmation_option(*param_decls, **attrs):
179 """Shortcut for confirmation prompts that can be ignored by passing
180 ``--yes`` as parameter.
181
182 This is equivalent to decorating a function with :func:`option` with
183 the following parameters::
184
185 def callback(ctx, param, value):
186 if not value:
187 ctx.abort()
188
189 @click.command()
190 @click.option('--yes', is_flag=True, callback=callback,
191 expose_value=False, prompt='Do you want to continue?')
192 def dropdb():
193 pass
194 """
195 def decorator(f):
196 def callback(ctx, param, value):
197 if not value:
198 ctx.abort()
199 attrs.setdefault('is_flag', True)
200 attrs.setdefault('callback', callback)
201 attrs.setdefault('expose_value', False)
202 attrs.setdefault('prompt', 'Do you want to continue?')
203 attrs.setdefault('help', 'Confirm the action without prompting.')
204 return option(*(param_decls or ('--yes',)), **attrs)(f)
205 return decorator
206
207
208 def password_option(*param_decls, **attrs):
209 """Shortcut for password prompts.
210
211 This is equivalent to decorating a function with :func:`option` with
212 the following parameters::
213
214 @click.command()
215 @click.option('--password', prompt=True, confirmation_prompt=True,
216 hide_input=True)
217 def changeadmin(password):
218 pass
219 """
220 def decorator(f):
221 attrs.setdefault('prompt', True)
222 attrs.setdefault('confirmation_prompt', True)
223 attrs.setdefault('hide_input', True)
224 return option(*(param_decls or ('--password',)), **attrs)(f)
225 return decorator
226
227
228 def version_option(version=None, *param_decls, **attrs):
229 """Adds a ``--version`` option which immediately ends the program
230 printing out the version number. This is implemented as an eager
231 option that prints the version and exits the program in the callback.
232
233 :param version: the version number to show. If not provided Click
234 attempts an auto discovery via setuptools.
235 :param prog_name: the name of the program (defaults to autodetection)
236 :param message: custom message to show instead of the default
237 (``'%(prog)s, version %(version)s'``)
238 :param others: everything else is forwarded to :func:`option`.
239 """
240 if version is None:
241 if hasattr(sys, '_getframe'):
242 module = sys._getframe(1).f_globals.get('__name__')
243 else:
244 module = ''
245
246 def decorator(f):
247 prog_name = attrs.pop('prog_name', None)
248 message = attrs.pop('message', '%(prog)s, version %(version)s')
249
250 def callback(ctx, param, value):
251 if not value or ctx.resilient_parsing:
252 return
253 prog = prog_name
254 if prog is None:
255 prog = ctx.find_root().info_name
256 ver = version
257 if ver is None:
258 try:
259 import pkg_resources
260 except ImportError:
261 pass
262 else:
263 for dist in pkg_resources.working_set:
264 scripts = dist.get_entry_map().get('console_scripts') or {}
265 for script_name, entry_point in iteritems(scripts):
266 if entry_point.module_name == module:
267 ver = dist.version
268 break
269 if ver is None:
270 raise RuntimeError('Could not determine version')
271 echo(message % {
272 'prog': prog,
273 'version': ver,
274 }, color=ctx.color)
275 ctx.exit()
276
277 attrs.setdefault('is_flag', True)
278 attrs.setdefault('expose_value', False)
279 attrs.setdefault('is_eager', True)
280 attrs.setdefault('help', 'Show the version and exit.')
281 attrs['callback'] = callback
282 return option(*(param_decls or ('--version',)), **attrs)(f)
283 return decorator
284
285
286 def help_option(*param_decls, **attrs):
287 """Adds a ``--help`` option which immediately ends the program
288 printing out the help page. This is usually unnecessary to add as
289 this is added by default to all commands unless suppressed.
290
291 Like :func:`version_option`, this is implemented as eager option that
292 prints in the callback and exits.
293
294 All arguments are forwarded to :func:`option`.
295 """
296 def decorator(f):
297 def callback(ctx, param, value):
298 if value and not ctx.resilient_parsing:
299 echo(ctx.get_help(), color=ctx.color)
300 ctx.exit()
301 attrs.setdefault('is_flag', True)
302 attrs.setdefault('expose_value', False)
303 attrs.setdefault('help', 'Show this message and exit.')
304 attrs.setdefault('is_eager', True)
305 attrs['callback'] = callback
306 return option(*(param_decls or ('--help',)), **attrs)(f)
307 return decorator
308
309
310 # Circular dependencies between core and decorators
311 from .core import Command, Group, Argument, Option
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/click/decorators.py b/click/decorators.py
--- a/click/decorators.py
+++ b/click/decorators.py
@@ -61,7 +61,7 @@
raise RuntimeError('Managed to invoke callback without a '
'context object of type %r existing'
% object_type.__name__)
- return ctx.invoke(f, obj, *args[1:], **kwargs)
+ return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
|
{"golden_diff": "diff --git a/click/decorators.py b/click/decorators.py\n--- a/click/decorators.py\n+++ b/click/decorators.py\n@@ -61,7 +61,7 @@\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n- return ctx.invoke(f, obj, *args[1:], **kwargs)\n+ return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n", "issue": "make_pass_decorator strips argument\nThe inner most wrapper of make_pass_decorator invokes a function with `*args[1:]` which strips an argument.\r\n\r\nA smaller example could be made but the following is the [invoking other commands](http://click.pocoo.org/6/advanced/#invoking-other-commands) example from the docs modified to also take in a custom config object.\r\n\r\nThe only difference between `dist_working` and `dist_broken` is the order of the decorators and consequently the parameters they take.\r\n\r\nIn my opinion they should both be valid and work. What happens in `dist_broken` is, like the title says, the `make_pass_decorator` wrapper invokes the function with `*args[1:]` which removes the context object put there by the previous decorator.\r\n\r\n```python\r\nimport click\r\n\r\n\r\nclass MyConfig(object):\r\n def __init__(self, verbose):\r\n self.verbose = verbose\r\n\r\n\r\npass_config = click.make_pass_decorator(MyConfig)\r\n\r\n\r\n@click.group()\r\n@click.option('--verbose', '-v', is_flag=True)\r\n@click.pass_context\r\ndef cli(ctx, verbose):\r\n ctx.obj = MyConfig(verbose=verbose)\r\n\r\n\r\n@cli.command()\r\n@click.option('--count', default=1)\r\ndef test(count):\r\n click.echo('Count: %d' % count)\r\n\r\n\r\n@cli.command('dist-working')\r\n@click.option('--count', default=1)\r\n@pass_config\r\n@click.pass_context\r\ndef dist_working(ctx, cfg, count):\r\n if cfg.verbose:\r\n click.echo('about to forward')\r\n ctx.forward(test)\r\n if cfg.verbose:\r\n click.echo('about to invoke')\r\n ctx.invoke(test, count=42)\r\n\r\n\r\n@cli.command('dist-broken')\r\n@click.option('--count', default=1)\r\n@click.pass_context\r\n@pass_config\r\ndef dist_broken(cfg, ctx, count):\r\n if cfg.verbose:\r\n click.echo('about to forward')\r\n ctx.forward(test)\r\n if cfg.verbose:\r\n click.echo('about to invoke')\r\n ctx.invoke(test, count=42)\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom ._unicodefun import _check_for_unicode_literals\nfrom .utils import echo\nfrom .globals import get_current_context\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n def new_func(*args, **kwargs):\n return f(get_current_context(), *args, **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n def new_func(*args, **kwargs):\n return f(get_current_context().obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n def new_func(*args, **kwargs):\n ctx = get_current_context()\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = attrs.get('help')\n if help is None:\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n else:\n help = inspect.cleandoc(help)\n attrs['help'] = help\n _check_for_unicode_literals()\n return cls(name=name or f.__name__.lower().replace('_', '-'),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n r\"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name with underscores replaced by dashes.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n cmd = _make_command(f, name, attrs, cls)\n cmd.__doc__ = f.__doc__\n return cmd\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an argument to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged (except ``cls``).\n This is equivalent to creating an :class:`Argument` instance manually\n and attaching it to the :attr:`Command.params` list.\n\n :param cls: the argument class to instantiate. This defaults to\n :class:`Argument`.\n \"\"\"\n def decorator(f):\n ArgumentClass = attrs.pop('cls', Argument)\n _param_memo(f, ArgumentClass(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged (except ``cls``).\n This is equivalent to creating an :class:`Option` instance manually\n and attaching it to the :attr:`Command.params` list.\n\n :param cls: the option class to instantiate. This defaults to\n :class:`Option`.\n \"\"\"\n def decorator(f):\n # Issue 926, copy attrs, so pre-defined options can re-use the same cls=\n option_attrs = attrs.copy()\n\n if 'help' in option_attrs:\n option_attrs['help'] = inspect.cleandoc(option_attrs['help'])\n OptionClass = option_attrs.pop('cls', Option)\n _param_memo(f, OptionClass(param_decls, **option_attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided Click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n if hasattr(sys, '_getframe'):\n module = sys._getframe(1).f_globals.get('__name__')\n else:\n module = ''\n\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n }, color=ctx.color)\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help(), color=ctx.color)\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py"}], "after_files": [{"content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom ._unicodefun import _check_for_unicode_literals\nfrom .utils import echo\nfrom .globals import get_current_context\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n def new_func(*args, **kwargs):\n return f(get_current_context(), *args, **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n def new_func(*args, **kwargs):\n return f(get_current_context().obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n def new_func(*args, **kwargs):\n ctx = get_current_context()\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = attrs.get('help')\n if help is None:\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n else:\n help = inspect.cleandoc(help)\n attrs['help'] = help\n _check_for_unicode_literals()\n return cls(name=name or f.__name__.lower().replace('_', '-'),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n r\"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name with underscores replaced by dashes.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n cmd = _make_command(f, name, attrs, cls)\n cmd.__doc__ = f.__doc__\n return cmd\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an argument to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged (except ``cls``).\n This is equivalent to creating an :class:`Argument` instance manually\n and attaching it to the :attr:`Command.params` list.\n\n :param cls: the argument class to instantiate. This defaults to\n :class:`Argument`.\n \"\"\"\n def decorator(f):\n ArgumentClass = attrs.pop('cls', Argument)\n _param_memo(f, ArgumentClass(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged (except ``cls``).\n This is equivalent to creating an :class:`Option` instance manually\n and attaching it to the :attr:`Command.params` list.\n\n :param cls: the option class to instantiate. This defaults to\n :class:`Option`.\n \"\"\"\n def decorator(f):\n # Issue 926, copy attrs, so pre-defined options can re-use the same cls=\n option_attrs = attrs.copy()\n\n if 'help' in option_attrs:\n option_attrs['help'] = inspect.cleandoc(option_attrs['help'])\n OptionClass = option_attrs.pop('cls', Option)\n _param_memo(f, OptionClass(param_decls, **option_attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided Click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n if hasattr(sys, '_getframe'):\n module = sys._getframe(1).f_globals.get('__name__')\n else:\n module = ''\n\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n }, color=ctx.color)\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help(), color=ctx.color)\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py"}]}
| 3,945 | 121 |
gh_patches_debug_7470
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Engineering: Back-end - Combine logic for Agency Index and Getting Started page
_As a MST Senior Rider, when I go to benefits.calitp.org, I should see the new Introduction page design and content._
- Change the app logic to the following -
1. When there is only 1 transit agency, go directly to the agency index page (/mst)
2. From the agency index page, go directly to the verifier selection page (/eligibility/)
3. When there is only 1 eligibility verifier, go directly to the only start page (Login.gov verifier)
- Write tests for this new logic
- Update sample data for this new logic (??) so we can adequately test this in Dev, Staging
## How it is now
```mermaid
graph LR
A[core:index] -->|redirect| B(core:agency_index)
B -->|user clicks button| C(eligiblity:index)
C -->|redirect| D(eligibility:start)
```
## After this is implemented
```mermaid
graph LR
A[core:index] -->|redirect| B(core:agency_index)
B -->|*redirect*| C(eligiblity:index)
C -->|redirect| D(eligibility:start)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/views.py`
Content:
```
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.shortcuts import redirect
6 from django.template import loader
7 from django.template.response import TemplateResponse
8 from django.urls import reverse
9 from django.utils.translation import gettext as _
10
11 from . import models, session, viewmodels
12 from .middleware import pageview_decorator
13
14
15 def PageTemplateResponse(request, page_vm):
16 """Helper returns a TemplateResponse using the common page template."""
17 return TemplateResponse(request, "core/page.html", page_vm.context_dict())
18
19
20 def _index_content_title():
21 """Helper returns the content title for the common index page."""
22 return _("core.pages.index.content_title")
23
24
25 def _index_url():
26 """Helper computes the index url path."""
27 return reverse("core:index")
28
29
30 @pageview_decorator
31 def index(request):
32 """View handler for the main entry page."""
33 session.reset(request)
34
35 agencies = models.TransitAgency.all_active()
36
37 if len(agencies) == 1:
38 agency = agencies[0]
39 return redirect(agency.index_url)
40
41 # generate a button to the landing page for each active agency
42 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
43 buttons[0].classes.append("mt-3")
44 buttons[0].label = _("core.pages.index.chooseprovider")
45
46 page = viewmodels.Page(
47 title=_("core.pages.index.title"),
48 content_title=_index_content_title(),
49 buttons=buttons,
50 classes="home",
51 )
52
53 return PageTemplateResponse(request, page)
54
55
56 @pageview_decorator
57 def agency_index(request, agency):
58 """View handler for an agency entry page."""
59 session.reset(request)
60 session.update(request, agency=agency, origin=agency.index_url)
61
62 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
63 button.label = _("core.pages.agency_index.button.label")
64
65 page = viewmodels.Page(
66 title=_("core.pages.agency_index.title"),
67 content_title=_("core.pages.agency_index.content_title"),
68 button=button,
69 classes="home",
70 )
71
72 help_page = reverse("core:help")
73 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
74
75 return TemplateResponse(request, "core/agency_index.html", context_dict)
76
77
78 @pageview_decorator
79 def help(request):
80 """View handler for the help page."""
81 if session.active_agency(request):
82 agency = session.agency(request)
83 buttons = viewmodels.Button.agency_contact_links(agency)
84 else:
85 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
86
87 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
88
89 page = viewmodels.Page(
90 title=_("core.buttons.help"),
91 content_title=_("core.buttons.help"),
92 buttons=buttons,
93 noimage=True,
94 )
95
96 return TemplateResponse(request, "core/help.html", page.context_dict())
97
98
99 @pageview_decorator
100 def bad_request(request, exception, template_name="400.html"):
101 """View handler for HTTP 400 Bad Request responses."""
102 if session.active_agency(request):
103 session.update(request, origin=session.agency(request).index_url)
104 else:
105 session.update(request, origin=_index_url())
106
107 home = viewmodels.Button.home(request)
108 page = viewmodels.ErrorPage.error(button=home)
109 t = loader.get_template(template_name)
110
111 return HttpResponseBadRequest(t.render(page.context_dict()))
112
113
114 @pageview_decorator
115 def csrf_failure(request, reason):
116 """
117 View handler for CSRF_FAILURE_VIEW with custom data.
118 """
119 if session.active_agency(request):
120 session.update(request, origin=session.agency(request).index_url)
121 else:
122 session.update(request, origin=_index_url())
123
124 home = viewmodels.Button.home(request)
125 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
126 t = loader.get_template("400.html")
127
128 return HttpResponseNotFound(t.render(page.context_dict()))
129
130
131 @pageview_decorator
132 def page_not_found(request, exception, template_name="404.html"):
133 """View handler for HTTP 404 Not Found responses."""
134 if session.active_agency(request):
135 session.update(request, origin=session.agency(request).index_url)
136 else:
137 session.update(request, origin=_index_url())
138
139 home = viewmodels.Button.home(request)
140 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
141 t = loader.get_template(template_name)
142
143 return HttpResponseNotFound(t.render(page.context_dict()))
144
145
146 @pageview_decorator
147 def server_error(request, template_name="500.html"):
148 """View handler for HTTP 500 Server Error responses."""
149 if session.active_agency(request):
150 session.update(request, origin=session.agency(request).index_url)
151 else:
152 session.update(request, origin=_index_url())
153
154 home = viewmodels.Button.home(request)
155 page = viewmodels.ErrorPage.error(button=home)
156 t = loader.get_template(template_name)
157
158 return HttpResponseServerError(t.render(page.context_dict()))
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -59,6 +59,9 @@
session.reset(request)
session.update(request, agency=agency, origin=agency.index_url)
+ if len(agency.eligibility_verifiers.all()) == 1:
+ return redirect(reverse("eligibility:index"))
+
button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
button.label = _("core.pages.agency_index.button.label")
|
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -59,6 +59,9 @@\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n \n+ if len(agency.eligibility_verifiers.all()) == 1:\n+ return redirect(reverse(\"eligibility:index\"))\n+\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n", "issue": "Engineering: Back-end - Combine logic for Agency Index and Getting Started page\n_As a MST Senior Rider, when I go to benefits.calitp.org, I should see the new Introduction page design and content._\r\n\r\n- Change the app logic to the following -\r\n1. When there is only 1 transit agency, go directly to the agency index page (/mst)\r\n2. From the agency index page, go directly to the verifier selection page (/eligibility/)\r\n3. When there is only 1 eligibility verifier, go directly to the only start page (Login.gov verifier)\r\n\r\n- Write tests for this new logic\r\n- Update sample data for this new logic (??) so we can adequately test this in Dev, Staging\r\n\r\n## How it is now\r\n\r\n```mermaid\r\ngraph LR\r\n A[core:index] -->|redirect| B(core:agency_index)\r\n B -->|user clicks button| C(eligiblity:index)\r\n C -->|redirect| D(eligibility:start)\r\n```\r\n\r\n## After this is implemented\r\n\r\n```mermaid\r\ngraph LR\r\n A[core:index] -->|redirect| B(core:agency_index)\r\n B -->|*redirect*| C(eligiblity:index)\r\n C -->|redirect| D(eligibility:start)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_index_content_title(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_index_content_title(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n if len(agency.eligibility_verifiers.all()) == 1:\n return redirect(reverse(\"eligibility:index\"))\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
| 2,022 | 133 |
gh_patches_debug_24104
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-1845
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
D2VTransformer.fit_transform doesn't work
The **X** parameter of the **fit_transform** method of **D2VTransformer** doesn't accept variables of any type, nor list of token lists (raises _AttributeError: 'list' object has no attribute 'words'_), nor list of TaggedDocument (raises _TypeError: sequence item 0: expected str instance, list found_).
Example:
```python
from gensim.sklearn_api import D2VTransformer
from gensim.models import doc2vec
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
(['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),
(['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')
]
d2v_sentences = [doc2vec.TaggedDocument(words[0], [i]) for i, words in enumerate(train_data)]
train_input = list(map(lambda x: x[0], train_data))
train_target = list(map(lambda x: class_dict[x[1]], train_data))
model = D2VTransformer(min_count=1)
model.fit_transform(train_input, train_target)
#model.fit_transform(d2v_sentences, train_target)
```
Versions:
Windows-10-10.0.16299-SP0
Python 3.6.4 | packaged by conda-forge | (default, Dec 24 2017, 10:11:43) [MSC v.1900 64 bit (AMD64)]
NumPy 1.13.3
SciPy 0.19.1
gensim 3.2.0
FAST_VERSION 1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/sklearn_api/d2vmodel.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 Scikit learn interface for gensim for easy use of gensim with scikit-learn
9 Follows scikit-learn API conventions
10 """
11
12 import numpy as np
13 from six import string_types
14 from sklearn.base import TransformerMixin, BaseEstimator
15 from sklearn.exceptions import NotFittedError
16
17 from gensim import models
18
19
20 class D2VTransformer(TransformerMixin, BaseEstimator):
21 """
22 Base Doc2Vec module
23 """
24
25 def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,
26 docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,
27 max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,
28 hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):
29 """
30 Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.
31 """
32 self.gensim_model = None
33 self.dm_mean = dm_mean
34 self.dm = dm
35 self.dbow_words = dbow_words
36 self.dm_concat = dm_concat
37 self.dm_tag_count = dm_tag_count
38 self.docvecs = docvecs
39 self.docvecs_mapfile = docvecs_mapfile
40 self.comment = comment
41 self.trim_rule = trim_rule
42
43 # attributes associated with gensim.models.Word2Vec
44 self.size = size
45 self.alpha = alpha
46 self.window = window
47 self.min_count = min_count
48 self.max_vocab_size = max_vocab_size
49 self.sample = sample
50 self.seed = seed
51 self.workers = workers
52 self.min_alpha = min_alpha
53 self.hs = hs
54 self.negative = negative
55 self.cbow_mean = int(cbow_mean)
56 self.hashfxn = hashfxn
57 self.iter = iter
58 self.sorted_vocab = sorted_vocab
59 self.batch_words = batch_words
60
61 def fit(self, X, y=None):
62 """
63 Fit the model according to the given training data.
64 Calls gensim.models.Doc2Vec
65 """
66 self.gensim_model = models.Doc2Vec(
67 documents=X, dm_mean=self.dm_mean, dm=self.dm,
68 dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
69 docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
70 trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,
71 min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,
72 seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,
73 negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,
74 iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
75 )
76 return self
77
78 def transform(self, docs):
79 """
80 Return the vector representations for the input documents.
81 The input `docs` should be a list of lists like
82 [['calculus', 'mathematical'],
83 ['geometry', 'operations', 'curves']]
84 or a single document like : ['calculus', 'mathematical']
85 """
86 if self.gensim_model is None:
87 raise NotFittedError(
88 "This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
89 )
90
91 # The input as array of array
92 if isinstance(docs[0], string_types):
93 docs = [docs]
94 vectors = [self.gensim_model.infer_vector(doc) for doc in docs]
95 return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py
--- a/gensim/sklearn_api/d2vmodel.py
+++ b/gensim/sklearn_api/d2vmodel.py
@@ -15,6 +15,7 @@
from sklearn.exceptions import NotFittedError
from gensim import models
+from gensim.models import doc2vec
class D2VTransformer(TransformerMixin, BaseEstimator):
@@ -63,8 +64,12 @@
Fit the model according to the given training data.
Calls gensim.models.Doc2Vec
"""
+ if isinstance(X[0], doc2vec.TaggedDocument):
+ d2v_sentences = X
+ else:
+ d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
self.gensim_model = models.Doc2Vec(
- documents=X, dm_mean=self.dm_mean, dm=self.dm,
+ documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,
dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,
|
{"golden_diff": "diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py\n--- a/gensim/sklearn_api/d2vmodel.py\n+++ b/gensim/sklearn_api/d2vmodel.py\n@@ -15,6 +15,7 @@\n from sklearn.exceptions import NotFittedError\n \n from gensim import models\n+from gensim.models import doc2vec\n \n \n class D2VTransformer(TransformerMixin, BaseEstimator):\n@@ -63,8 +64,12 @@\n Fit the model according to the given training data.\n Calls gensim.models.Doc2Vec\n \"\"\"\n+ if isinstance(X[0], doc2vec.TaggedDocument):\n+ d2v_sentences = X\n+ else:\n+ d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]\n self.gensim_model = models.Doc2Vec(\n- documents=X, dm_mean=self.dm_mean, dm=self.dm,\n+ documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,\n", "issue": "D2VTransformer.fit_transform doesn't work\nThe **X** parameter of the **fit_transform** method of **D2VTransformer** doesn't accept variables of any type, nor list of token lists (raises _AttributeError: 'list' object has no attribute 'words'_), nor list of TaggedDocument (raises _TypeError: sequence item 0: expected str instance, list found_).\r\n\r\nExample:\r\n```python\r\nfrom gensim.sklearn_api import D2VTransformer\r\nfrom gensim.models import doc2vec\r\n\r\nclass_dict = {'mathematics': 1, 'physics': 0}\r\ntrain_data = [\r\n (['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),\r\n (['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')\r\n]\r\nd2v_sentences = [doc2vec.TaggedDocument(words[0], [i]) for i, words in enumerate(train_data)]\r\ntrain_input = list(map(lambda x: x[0], train_data))\r\ntrain_target = list(map(lambda x: class_dict[x[1]], train_data))\r\n\r\nmodel = D2VTransformer(min_count=1)\r\nmodel.fit_transform(train_input, train_target)\r\n#model.fit_transform(d2v_sentences, train_target)\r\n```\r\nVersions:\r\nWindows-10-10.0.16299-SP0\r\nPython 3.6.4 | packaged by conda-forge | (default, Dec 24 2017, 10:11:43) [MSC v.1900 64 bit (AMD64)]\r\nNumPy 1.13.3\r\nSciPy 0.19.1\r\ngensim 3.2.0\r\nFAST_VERSION 1\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nScikit learn interface for gensim for easy use of gensim with scikit-learn\nFollows scikit-learn API conventions\n\"\"\"\n\nimport numpy as np\nfrom six import string_types\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.exceptions import NotFittedError\n\nfrom gensim import models\n\n\nclass D2VTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Base Doc2Vec module\n \"\"\"\n\n def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,\n docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,\n max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,\n hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):\n \"\"\"\n Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.\n \"\"\"\n self.gensim_model = None\n self.dm_mean = dm_mean\n self.dm = dm\n self.dbow_words = dbow_words\n self.dm_concat = dm_concat\n self.dm_tag_count = dm_tag_count\n self.docvecs = docvecs\n self.docvecs_mapfile = docvecs_mapfile\n self.comment = comment\n self.trim_rule = trim_rule\n\n # attributes associated with gensim.models.Word2Vec\n self.size = size\n self.alpha = alpha\n self.window = window\n self.min_count = min_count\n self.max_vocab_size = max_vocab_size\n self.sample = sample\n self.seed = seed\n self.workers = workers\n self.min_alpha = min_alpha\n self.hs = hs\n self.negative = negative\n self.cbow_mean = int(cbow_mean)\n self.hashfxn = hashfxn\n self.iter = iter\n self.sorted_vocab = sorted_vocab\n self.batch_words = batch_words\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model according to the given training data.\n Calls gensim.models.Doc2Vec\n \"\"\"\n self.gensim_model = models.Doc2Vec(\n documents=X, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,\n min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,\n seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,\n negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,\n iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words\n )\n return self\n\n def transform(self, docs):\n \"\"\"\n Return the vector representations for the input documents.\n The input `docs` should be a list of lists like\n [['calculus', 'mathematical'],\n ['geometry', 'operations', 'curves']]\n or a single document like : ['calculus', 'mathematical']\n \"\"\"\n if self.gensim_model is None:\n raise NotFittedError(\n \"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.\"\n )\n\n # The input as array of array\n if isinstance(docs[0], string_types):\n docs = [docs]\n vectors = [self.gensim_model.infer_vector(doc) for doc in docs]\n return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))\n", "path": "gensim/sklearn_api/d2vmodel.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nScikit learn interface for gensim for easy use of gensim with scikit-learn\nFollows scikit-learn API conventions\n\"\"\"\n\nimport numpy as np\nfrom six import string_types\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.exceptions import NotFittedError\n\nfrom gensim import models\nfrom gensim.models import doc2vec\n\n\nclass D2VTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Base Doc2Vec module\n \"\"\"\n\n def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,\n docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,\n max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,\n hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):\n \"\"\"\n Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.\n \"\"\"\n self.gensim_model = None\n self.dm_mean = dm_mean\n self.dm = dm\n self.dbow_words = dbow_words\n self.dm_concat = dm_concat\n self.dm_tag_count = dm_tag_count\n self.docvecs = docvecs\n self.docvecs_mapfile = docvecs_mapfile\n self.comment = comment\n self.trim_rule = trim_rule\n\n # attributes associated with gensim.models.Word2Vec\n self.size = size\n self.alpha = alpha\n self.window = window\n self.min_count = min_count\n self.max_vocab_size = max_vocab_size\n self.sample = sample\n self.seed = seed\n self.workers = workers\n self.min_alpha = min_alpha\n self.hs = hs\n self.negative = negative\n self.cbow_mean = int(cbow_mean)\n self.hashfxn = hashfxn\n self.iter = iter\n self.sorted_vocab = sorted_vocab\n self.batch_words = batch_words\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model according to the given training data.\n Calls gensim.models.Doc2Vec\n \"\"\"\n if isinstance(X[0], doc2vec.TaggedDocument):\n d2v_sentences = X\n else:\n d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]\n self.gensim_model = models.Doc2Vec(\n documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,\n min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,\n seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,\n negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,\n iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words\n )\n return self\n\n def transform(self, docs):\n \"\"\"\n Return the vector representations for the input documents.\n The input `docs` should be a list of lists like\n [['calculus', 'mathematical'],\n ['geometry', 'operations', 'curves']]\n or a single document like : ['calculus', 'mathematical']\n \"\"\"\n if self.gensim_model is None:\n raise NotFittedError(\n \"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.\"\n )\n\n # The input as array of array\n if isinstance(docs[0], string_types):\n docs = [docs]\n vectors = [self.gensim_model.infer_vector(doc) for doc in docs]\n return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))\n", "path": "gensim/sklearn_api/d2vmodel.py"}]}
| 1,781 | 309 |
gh_patches_debug_18562
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1424
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cache does not get invalidated correctly when LanguageTreeNode is deleted
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
If you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.
Cache does not get invalidated correctly when LanguageTreeNode is deleted
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
If you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/language_tree/language_tree_actions.py`
Content:
```
1 """
2 This module contains view actions for the language tree.
3 Typically, they do not render a whole page, but only parts of it or they redirect to regular views.
4 """
5 import logging
6
7 from django.contrib import messages
8 from django.shortcuts import redirect, get_object_or_404
9 from django.utils.translation import ugettext as _
10 from django.views.decorators.http import require_POST
11 from django.db import transaction
12
13 from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant
14
15 from ...constants import position
16 from ...decorators import permission_required
17 from ...models import LanguageTreeNode
18
19 logger = logging.getLogger(__name__)
20
21
22 @require_POST
23 @permission_required("cms.change_languagetreenode")
24 @transaction.atomic
25 def move_language_tree_node(
26 request, region_slug, language_tree_node_id, target_id, target_position
27 ):
28 """
29 This action moves the given language tree node to the given position relative to the given target.
30
31 :param request: The current request
32 :type request: ~django.http.HttpResponse
33
34 :param region_slug: The slug of the region which language tree should be modified
35 :type region_slug: str
36
37 :param language_tree_node_id: The id of the language tree node which should be moved
38 :type language_tree_node_id: int
39
40 :param target_id: The id of the target language tree node
41 :type target_id: int
42
43 :param target_position: The desired position (choices: :mod:`~integreat_cms.cms.constants.position`)
44 :type target_position: str
45
46 :return: A redirection to the language tree
47 :rtype: ~django.http.HttpResponseRedirect
48 """
49
50 region = request.region
51 language_tree_node = get_object_or_404(
52 region.language_tree_nodes, id=language_tree_node_id
53 )
54 target = get_object_or_404(region.language_tree_nodes, id=target_id)
55
56 try:
57 if target.depth == 1 and target_position in [position.LEFT, position.RIGHT]:
58 raise InvalidPosition(_("A region can only have one root language."))
59 language_tree_node.move(target, target_position)
60 # Call the save method on the (reloaded) node in order to trigger possible signal handlers etc.
61 # (The move()-method executes raw sql which might cause problems if the instance isn't fetched again)
62 language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)
63 language_tree_node.save()
64 messages.success(
65 request,
66 _('The language tree node "{}" was successfully moved.').format(
67 language_tree_node.translated_name
68 ),
69 )
70 logger.debug(
71 "%r moved to %r of %r by %r",
72 language_tree_node,
73 target_position,
74 target,
75 request.user,
76 )
77 except (ValueError, InvalidPosition, InvalidMoveToDescendant) as e:
78 messages.error(request, e)
79 logger.exception(e)
80
81 return redirect("language_tree", **{"region_slug": region_slug})
82
83
84 @require_POST
85 @permission_required("cms.delete_languagetreenode")
86 @transaction.atomic
87 def delete_language_tree_node(request, region_slug, language_tree_node_id):
88 """
89 Deletes the language node of distinct region
90 and all page translations for this language
91
92 :param request: The current request
93 :type request: ~django.http.HttpResponse
94
95 :param region_slug: The slug of the region which language node should be deleted
96 :type region_slug: str
97
98 :param language_tree_node_id: The id of the language tree node which should be deleted
99 :type language_tree_node_id: int
100
101 :return: A redirection to the language tree
102 :rtype: ~django.http.HttpResponseRedirect
103 """
104 # get current region
105 region = request.region
106 # get current selected language node
107 language_node = get_object_or_404(
108 region.language_tree_nodes, id=language_tree_node_id
109 )
110 # get all page translation assigned to the language node
111 page_translations = language_node.language.page_translations
112 # filter those translation that belong to the region and delete them
113 page_translations.filter(page__region=region).delete()
114 # get all event translation assigned to the language node
115 event_translations = language_node.language.event_translations
116 # filter those translation that belong to the region and delete them
117 event_translations.filter(event__region=region).delete()
118 # get all poi translation assigned to the language node
119 poi_translations = language_node.language.poi_translations
120 # filter those translation that belong to the region and delete them
121 poi_translations.filter(poi__region=region).delete()
122 # get all push notification translation assigned to the language node
123 push_notification_translations = (
124 language_node.language.push_notification_translations
125 )
126 # filter those translation that belong to the region and delete them
127 push_notification_translations.filter(push_notification__region=region).delete()
128
129 logger.debug("%r deleted by %r", language_node, request.user)
130 language_node.delete()
131 messages.success(
132 request,
133 _(
134 'The language tree node "{}" and all corresponding translations were successfully deleted.'
135 ).format(language_node.translated_name),
136 )
137 return redirect("language_tree", **{"region_slug": region_slug})
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/integreat_cms/cms/views/language_tree/language_tree_actions.py b/integreat_cms/cms/views/language_tree/language_tree_actions.py
--- a/integreat_cms/cms/views/language_tree/language_tree_actions.py
+++ b/integreat_cms/cms/views/language_tree/language_tree_actions.py
@@ -11,6 +11,7 @@
from django.db import transaction
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant
+from cacheops import invalidate_obj
from ...constants import position
from ...decorators import permission_required
@@ -128,6 +129,16 @@
logger.debug("%r deleted by %r", language_node, request.user)
language_node.delete()
+
+ for page in region.pages.all():
+ invalidate_obj(page)
+ for event in region.events.all():
+ invalidate_obj(event)
+ for poi in region.pois.all():
+ invalidate_obj(poi)
+ for push_notification in region.push_notifications.all():
+ invalidate_obj(push_notification)
+
messages.success(
request,
_(
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/language_tree/language_tree_actions.py b/integreat_cms/cms/views/language_tree/language_tree_actions.py\n--- a/integreat_cms/cms/views/language_tree/language_tree_actions.py\n+++ b/integreat_cms/cms/views/language_tree/language_tree_actions.py\n@@ -11,6 +11,7 @@\n from django.db import transaction\n \n from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant\n+from cacheops import invalidate_obj\n \n from ...constants import position\n from ...decorators import permission_required\n@@ -128,6 +129,16 @@\n \n logger.debug(\"%r deleted by %r\", language_node, request.user)\n language_node.delete()\n+\n+ for page in region.pages.all():\n+ invalidate_obj(page)\n+ for event in region.events.all():\n+ invalidate_obj(event)\n+ for poi in region.pois.all():\n+ invalidate_obj(poi)\n+ for push_notification in region.push_notifications.all():\n+ invalidate_obj(push_notification)\n+\n messages.success(\n request,\n _(\n", "issue": "Cache does not get invalidated correctly when LanguageTreeNode is deleted\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.\r\n\nCache does not get invalidated correctly when LanguageTreeNode is deleted\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains view actions for the language tree.\nTypically, they do not render a whole page, but only parts of it or they redirect to regular views.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.db import transaction\n\nfrom treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant\n\nfrom ...constants import position\nfrom ...decorators import permission_required\nfrom ...models import LanguageTreeNode\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@permission_required(\"cms.change_languagetreenode\")\n@transaction.atomic\ndef move_language_tree_node(\n request, region_slug, language_tree_node_id, target_id, target_position\n):\n \"\"\"\n This action moves the given language tree node to the given position relative to the given target.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language tree should be modified\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be moved\n :type language_tree_node_id: int\n\n :param target_id: The id of the target language tree node\n :type target_id: int\n\n :param target_position: The desired position (choices: :mod:`~integreat_cms.cms.constants.position`)\n :type target_position: str\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = request.region\n language_tree_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n target = get_object_or_404(region.language_tree_nodes, id=target_id)\n\n try:\n if target.depth == 1 and target_position in [position.LEFT, position.RIGHT]:\n raise InvalidPosition(_(\"A region can only have one root language.\"))\n language_tree_node.move(target, target_position)\n # Call the save method on the (reloaded) node in order to trigger possible signal handlers etc.\n # (The move()-method executes raw sql which might cause problems if the instance isn't fetched again)\n language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)\n language_tree_node.save()\n messages.success(\n request,\n _('The language tree node \"{}\" was successfully moved.').format(\n language_tree_node.translated_name\n ),\n )\n logger.debug(\n \"%r moved to %r of %r by %r\",\n language_tree_node,\n target_position,\n target,\n request.user,\n )\n except (ValueError, InvalidPosition, InvalidMoveToDescendant) as e:\n messages.error(request, e)\n logger.exception(e)\n\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n\n\n@require_POST\n@permission_required(\"cms.delete_languagetreenode\")\n@transaction.atomic\ndef delete_language_tree_node(request, region_slug, language_tree_node_id):\n \"\"\"\n Deletes the language node of distinct region\n and all page translations for this language\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language node should be deleted\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be deleted\n :type language_tree_node_id: int\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # get current region\n region = request.region\n # get current selected language node\n language_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n # get all page translation assigned to the language node\n page_translations = language_node.language.page_translations\n # filter those translation that belong to the region and delete them\n page_translations.filter(page__region=region).delete()\n # get all event translation assigned to the language node\n event_translations = language_node.language.event_translations\n # filter those translation that belong to the region and delete them\n event_translations.filter(event__region=region).delete()\n # get all poi translation assigned to the language node\n poi_translations = language_node.language.poi_translations\n # filter those translation that belong to the region and delete them\n poi_translations.filter(poi__region=region).delete()\n # get all push notification translation assigned to the language node\n push_notification_translations = (\n language_node.language.push_notification_translations\n )\n # filter those translation that belong to the region and delete them\n push_notification_translations.filter(push_notification__region=region).delete()\n\n logger.debug(\"%r deleted by %r\", language_node, request.user)\n language_node.delete()\n messages.success(\n request,\n _(\n 'The language tree node \"{}\" and all corresponding translations were successfully deleted.'\n ).format(language_node.translated_name),\n )\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n", "path": "integreat_cms/cms/views/language_tree/language_tree_actions.py"}], "after_files": [{"content": "\"\"\"\nThis module contains view actions for the language tree.\nTypically, they do not render a whole page, but only parts of it or they redirect to regular views.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.db import transaction\n\nfrom treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant\nfrom cacheops import invalidate_obj\n\nfrom ...constants import position\nfrom ...decorators import permission_required\nfrom ...models import LanguageTreeNode\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@permission_required(\"cms.change_languagetreenode\")\n@transaction.atomic\ndef move_language_tree_node(\n request, region_slug, language_tree_node_id, target_id, target_position\n):\n \"\"\"\n This action moves the given language tree node to the given position relative to the given target.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language tree should be modified\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be moved\n :type language_tree_node_id: int\n\n :param target_id: The id of the target language tree node\n :type target_id: int\n\n :param target_position: The desired position (choices: :mod:`~integreat_cms.cms.constants.position`)\n :type target_position: str\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = request.region\n language_tree_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n target = get_object_or_404(region.language_tree_nodes, id=target_id)\n\n try:\n if target.depth == 1 and target_position in [position.LEFT, position.RIGHT]:\n raise InvalidPosition(_(\"A region can only have one root language.\"))\n language_tree_node.move(target, target_position)\n # Call the save method on the (reloaded) node in order to trigger possible signal handlers etc.\n # (The move()-method executes raw sql which might cause problems if the instance isn't fetched again)\n language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)\n language_tree_node.save()\n messages.success(\n request,\n _('The language tree node \"{}\" was successfully moved.').format(\n language_tree_node.translated_name\n ),\n )\n logger.debug(\n \"%r moved to %r of %r by %r\",\n language_tree_node,\n target_position,\n target,\n request.user,\n )\n except (ValueError, InvalidPosition, InvalidMoveToDescendant) as e:\n messages.error(request, e)\n logger.exception(e)\n\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n\n\n@require_POST\n@permission_required(\"cms.delete_languagetreenode\")\n@transaction.atomic\ndef delete_language_tree_node(request, region_slug, language_tree_node_id):\n \"\"\"\n Deletes the language node of distinct region\n and all page translations for this language\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language node should be deleted\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be deleted\n :type language_tree_node_id: int\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # get current region\n region = request.region\n # get current selected language node\n language_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n # get all page translation assigned to the language node\n page_translations = language_node.language.page_translations\n # filter those translation that belong to the region and delete them\n page_translations.filter(page__region=region).delete()\n # get all event translation assigned to the language node\n event_translations = language_node.language.event_translations\n # filter those translation that belong to the region and delete them\n event_translations.filter(event__region=region).delete()\n # get all poi translation assigned to the language node\n poi_translations = language_node.language.poi_translations\n # filter those translation that belong to the region and delete them\n poi_translations.filter(poi__region=region).delete()\n # get all push notification translation assigned to the language node\n push_notification_translations = (\n language_node.language.push_notification_translations\n )\n # filter those translation that belong to the region and delete them\n push_notification_translations.filter(push_notification__region=region).delete()\n\n logger.debug(\"%r deleted by %r\", language_node, request.user)\n language_node.delete()\n\n for page in region.pages.all():\n invalidate_obj(page)\n for event in region.events.all():\n invalidate_obj(event)\n for poi in region.pois.all():\n invalidate_obj(poi)\n for push_notification in region.push_notifications.all():\n invalidate_obj(push_notification)\n\n messages.success(\n request,\n _(\n 'The language tree node \"{}\" and all corresponding translations were successfully deleted.'\n ).format(language_node.translated_name),\n )\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n", "path": "integreat_cms/cms/views/language_tree/language_tree_actions.py"}]}
| 1,820 | 232 |
gh_patches_debug_16723
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-2915
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot edit or delete challenge pages with slug `evaluation`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/pages/forms.py`
Content:
```
1 from django import forms
2 from django.db.models import BLANK_CHOICE_DASH
3 from django_summernote.widgets import SummernoteInplaceWidget
4
5 from grandchallenge.core.forms import SaveFormInitMixin
6 from grandchallenge.pages.models import Page
7
8
9 class PageCreateForm(SaveFormInitMixin, forms.ModelForm):
10 def __init__(self, *args, challenge, **kwargs):
11 self.challenge = challenge
12 super().__init__(*args, **kwargs)
13
14 class Meta:
15 model = Page
16 fields = (
17 "display_title",
18 "permission_level",
19 "hidden",
20 "html",
21 )
22 widgets = {"html": SummernoteInplaceWidget()}
23 help_texts = {
24 "html": (
25 "The content of your page. <b>Please note</b>: your html will "
26 "be filtered after it has been saved to remove any non-HTML5 "
27 "compliant markup and scripts. The filtering is not reflected "
28 "in the live view so please <b>check the rendering of your "
29 "page after you click save</b>. If you're going to paste from "
30 "another source such as MS Word, please <b>paste without "
31 "formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or "
32 "<b>⇧+⌥+⌘+V</b> on OS X."
33 )
34 }
35
36
37 class PageUpdateForm(PageCreateForm):
38 """Like the page update form but you can also move the page."""
39
40 move = forms.CharField(widget=forms.Select)
41 move.required = False
42 move.widget.choices = (
43 (BLANK_CHOICE_DASH[0]),
44 (Page.FIRST, "First"),
45 (Page.UP, "Up"),
46 (Page.DOWN, "Down"),
47 (Page.LAST, "Last"),
48 )
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py
--- a/app/grandchallenge/pages/forms.py
+++ b/app/grandchallenge/pages/forms.py
@@ -1,4 +1,5 @@
from django import forms
+from django.core.exceptions import ValidationError
from django.db.models import BLANK_CHOICE_DASH
from django_summernote.widgets import SummernoteInplaceWidget
@@ -33,6 +34,17 @@
)
}
+ def clean_display_title(self):
+ display_title = self.cleaned_data["display_title"]
+
+ if display_title.lower() in {"evaluation"}:
+ # evaluation results in a URL clash, especially with the update page.
+ raise ValidationError(
+ "Title not allowed, please select an alternative"
+ )
+
+ return display_title
+
class PageUpdateForm(PageCreateForm):
"""Like the page update form but you can also move the page."""
|
{"golden_diff": "diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py\n--- a/app/grandchallenge/pages/forms.py\n+++ b/app/grandchallenge/pages/forms.py\n@@ -1,4 +1,5 @@\n from django import forms\n+from django.core.exceptions import ValidationError\n from django.db.models import BLANK_CHOICE_DASH\n from django_summernote.widgets import SummernoteInplaceWidget\n \n@@ -33,6 +34,17 @@\n )\n }\n \n+ def clean_display_title(self):\n+ display_title = self.cleaned_data[\"display_title\"]\n+\n+ if display_title.lower() in {\"evaluation\"}:\n+ # evaluation results in a URL clash, especially with the update page.\n+ raise ValidationError(\n+ \"Title not allowed, please select an alternative\"\n+ )\n+\n+ return display_title\n+\n \n class PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n", "issue": "Cannot edit or delete challenge pages with slug `evaluation`\n\n", "before_files": [{"content": "from django import forms\nfrom django.db.models import BLANK_CHOICE_DASH\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.pages.models import Page\n\n\nclass PageCreateForm(SaveFormInitMixin, forms.ModelForm):\n def __init__(self, *args, challenge, **kwargs):\n self.challenge = challenge\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Page\n fields = (\n \"display_title\",\n \"permission_level\",\n \"hidden\",\n \"html\",\n )\n widgets = {\"html\": SummernoteInplaceWidget()}\n help_texts = {\n \"html\": (\n \"The content of your page. <b>Please note</b>: your html will \"\n \"be filtered after it has been saved to remove any non-HTML5 \"\n \"compliant markup and scripts. The filtering is not reflected \"\n \"in the live view so please <b>check the rendering of your \"\n \"page after you click save</b>. If you're going to paste from \"\n \"another source such as MS Word, please <b>paste without \"\n \"formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or \"\n \"<b>\u21e7+\u2325+\u2318+V</b> on OS X.\"\n )\n }\n\n\nclass PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n\n move = forms.CharField(widget=forms.Select)\n move.required = False\n move.widget.choices = (\n (BLANK_CHOICE_DASH[0]),\n (Page.FIRST, \"First\"),\n (Page.UP, \"Up\"),\n (Page.DOWN, \"Down\"),\n (Page.LAST, \"Last\"),\n )\n", "path": "app/grandchallenge/pages/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import BLANK_CHOICE_DASH\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.pages.models import Page\n\n\nclass PageCreateForm(SaveFormInitMixin, forms.ModelForm):\n def __init__(self, *args, challenge, **kwargs):\n self.challenge = challenge\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Page\n fields = (\n \"display_title\",\n \"permission_level\",\n \"hidden\",\n \"html\",\n )\n widgets = {\"html\": SummernoteInplaceWidget()}\n help_texts = {\n \"html\": (\n \"The content of your page. <b>Please note</b>: your html will \"\n \"be filtered after it has been saved to remove any non-HTML5 \"\n \"compliant markup and scripts. The filtering is not reflected \"\n \"in the live view so please <b>check the rendering of your \"\n \"page after you click save</b>. If you're going to paste from \"\n \"another source such as MS Word, please <b>paste without \"\n \"formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or \"\n \"<b>\u21e7+\u2325+\u2318+V</b> on OS X.\"\n )\n }\n\n def clean_display_title(self):\n display_title = self.cleaned_data[\"display_title\"]\n\n if display_title.lower() in {\"evaluation\"}:\n # evaluation results in a URL clash, especially with the update page.\n raise ValidationError(\n \"Title not allowed, please select an alternative\"\n )\n\n return display_title\n\n\nclass PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n\n move = forms.CharField(widget=forms.Select)\n move.required = False\n move.widget.choices = (\n (BLANK_CHOICE_DASH[0]),\n (Page.FIRST, \"First\"),\n (Page.UP, \"Up\"),\n (Page.DOWN, \"Down\"),\n (Page.LAST, \"Last\"),\n )\n", "path": "app/grandchallenge/pages/forms.py"}]}
| 757 | 208 |
gh_patches_debug_8713
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-3133
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't save term translation after updating the term's text in DB
I've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)
https://pontoon.mozilla.org/a/terminology/term/666/change/
To my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053
Error in log is
```
Internal Server Error: /translations/create/
response = get_response(request)
File "/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
return func(request, *args, **kwargs)
File "/app/pontoon/base/utils.py", line 126, in wrap
return f(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/contextlib.py", line 79, in inner
File "/app/pontoon/translations/views.py", line 117, in create_translation
translation.save(failed_checks=failed_checks)
File "/app/pontoon/base/models.py", line 3429, in save
self.entity.reset_term_translation(self.locale)
File "/app/pontoon/base/models.py", line 2865, in reset_term_translation
term = self.term
pontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.
```
Can't save term translation after updating the term's text in DB
I've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)
https://pontoon.mozilla.org/a/terminology/term/666/change/
To my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053
Error in log is
```
Internal Server Error: /translations/create/
response = get_response(request)
File "/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
return func(request, *args, **kwargs)
File "/app/pontoon/base/utils.py", line 126, in wrap
return f(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/contextlib.py", line 79, in inner
File "/app/pontoon/translations/views.py", line 117, in create_translation
translation.save(failed_checks=failed_checks)
File "/app/pontoon/base/models.py", line 3429, in save
self.entity.reset_term_translation(self.locale)
File "/app/pontoon/base/models.py", line 2865, in reset_term_translation
term = self.term
pontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/terminology/models.py`
Content:
```
1 import re
2
3 from django.db import models
4
5 from pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource
6
7
8 def update_terminology_project_stats():
9 resource = Resource.objects.get(project__slug="terminology")
10 project = resource.project
11 total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()
12 resource.total_strings = total_strings
13 resource.save(update_fields=["total_strings"])
14
15 translated_resources = list(TranslatedResource.objects.filter(resource=resource))
16
17 for translated_resource in translated_resources:
18 translated_resource.calculate_stats(save=False)
19
20 TranslatedResource.objects.bulk_update(
21 translated_resources,
22 [
23 "total_strings",
24 "approved_strings",
25 "pretranslated_strings",
26 "strings_with_errors",
27 "strings_with_warnings",
28 "unreviewed_strings",
29 ],
30 )
31
32 project.aggregate_stats()
33
34 for locale in project.locales.all():
35 locale.aggregate_stats()
36
37 for projectlocale in ProjectLocale.objects.filter(project=project):
38 projectlocale.aggregate_stats()
39
40
41 class TermQuerySet(models.QuerySet):
42 def for_string(self, string):
43 terms = []
44 available_terms = self.exclude(definition="").exclude(forbidden=True)
45
46 for term in available_terms:
47 term_text = r"\b" + re.escape(term.text)
48 flags = 0 if term.case_sensitive else re.IGNORECASE
49
50 if re.search(term_text, string, flags):
51 terms.append(term)
52
53 return terms
54
55 def delete(self, *args, **kwargs):
56 """
57 Before deleting Terms, obsolete their Entities
58 """
59 for term in self:
60 term.obsolete_entity()
61 update_terminology_project_stats()
62
63 super().delete(*args, **kwargs)
64
65
66 class Term(models.Model):
67 text = models.CharField(max_length=255)
68 entity = models.OneToOneField("base.Entity", models.SET_NULL, null=True, blank=True)
69
70 class PartOfSpeech(models.TextChoices):
71 ADJECTIVE = "adjective", "Adjective"
72 ADVERB = "adverb", "Adverb"
73 NOUN = "noun", "Noun"
74 VERB = "verb", "Verb"
75
76 part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)
77
78 definition = models.TextField(blank=True)
79 usage = models.TextField(blank=True)
80 notes = models.TextField(blank=True)
81
82 class Status(models.TextChoices):
83 APPROVED = "approved", "Approved"
84 NEW = "new", "New"
85 OBSOLETE = "obsolete", "Obsolete"
86 REVIEW = "review", "Review"
87
88 status = models.CharField(
89 max_length=20, choices=Status.choices, null=True, blank=True
90 )
91
92 case_sensitive = models.BooleanField(default=False)
93 do_not_translate = models.BooleanField(default=False)
94 forbidden = models.BooleanField(default=False)
95
96 created_at = models.DateTimeField(auto_now_add=True)
97 created_by = models.ForeignKey(
98 "auth.User", models.SET_NULL, related_name="terms", null=True, blank=True
99 )
100
101 objects = TermQuerySet.as_manager()
102
103 def translation(self, locale):
104 """
105 Get locale translation of the term.
106 """
107 if self.do_not_translate:
108 return self.text
109 else:
110 try:
111 return self.translations.get(locale=locale).text
112 except (AttributeError, TermTranslation.DoesNotExist):
113 return None
114
115 @property
116 def localizable(self):
117 """
118 Check if the term is localizable.
119 """
120 if self.do_not_translate:
121 return False
122
123 if self.forbidden:
124 return False
125
126 if self.definition == "":
127 return False
128
129 return True
130
131 def entity_comment(self):
132 """
133 Generate entity comment from the term.
134 """
135 comment = "{}. {}.".format(
136 self.part_of_speech.capitalize(),
137 self.definition.capitalize().rstrip("."),
138 )
139
140 if self.usage:
141 comment += " E.g. {}.".format(self.usage.capitalize().rstrip("."))
142
143 return comment
144
145 def create_entity(self):
146 """
147 An Entity must be created (or deobsoleted) for a Term according to the
148 following rules:
149 - Entity.string contains content of Term.text.
150 - Entity.comment contains joint content of several fields:
151 Term.part_of_speech. Term.definition. E.g.: Term.usage.
152 """
153 resource = Resource.objects.get(project__slug="terminology")
154
155 entity, created = Entity.objects.get_or_create(
156 string=self.text,
157 comment=self.entity_comment(),
158 resource=resource,
159 )
160
161 # Using update() to avoid circular Term.save() call
162 Term.objects.filter(pk=self.pk).update(entity_id=entity.id)
163
164 if not created:
165 entity.obsolete = False
166 entity.save(update_fields=["obsolete"])
167
168 # Make sure Term entities are ordered alphabetically
169 entities = list(
170 Entity.objects.filter(resource=resource, obsolete=False).order_by("string")
171 )
172 for index, e in enumerate(entities):
173 e.order = index
174 Entity.objects.bulk_update(entities, ["order"])
175
176 def obsolete_entity(self):
177 entity = self.entity
178
179 # Ignore if term doesn't have entity assigned
180 if entity is None:
181 return
182
183 entity.obsolete = True
184 entity.save(update_fields=["obsolete"])
185
186 def handle_term_update(self):
187 """
188 Before updating an existing Term, update its Entity if neccessary
189 """
190 term = self
191 old_term = Term.objects.get(pk=term.pk)
192
193 # Ignore changes to non-localizable terms that stay non-localizable
194 if not old_term.localizable and not term.localizable:
195 return
196
197 # If localizable term becomes non-localizable, obsolete its Entity
198 if old_term.localizable and not term.localizable:
199 old_term.obsolete_entity()
200
201 # If non-localizable term becomes localizable, create a corresponding Entity
202 elif not old_term.localizable and term.localizable:
203 term.create_entity()
204
205 # If relevant changes are made to the localizable term that stays localizable
206 else:
207 # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.
208 if old_term.text != term.text:
209 old_term.obsolete_entity()
210 term.create_entity()
211
212 # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.
213 elif (
214 old_term.part_of_speech != term.part_of_speech
215 or old_term.definition != term.definition
216 or old_term.usage != term.usage
217 ):
218 entity = term.entity
219
220 # Ignore if term doesn't have entity assigned
221 if entity is None:
222 return
223
224 entity.comment = term.entity_comment()
225 entity.save(update_fields=["comment"])
226
227 return
228
229 update_terminology_project_stats()
230
231 def handle_term_create(self):
232 """
233 After creating a new localizable Term, create its Entity
234 """
235 self.create_entity()
236 update_terminology_project_stats()
237
238 def save(self, *args, **kwargs):
239 created = self.pk is None
240
241 if not created:
242 self.handle_term_update()
243
244 super().save(*args, **kwargs)
245
246 if created and self.localizable:
247 self.handle_term_create()
248
249 def delete(self, *args, **kwargs):
250 """
251 Before deleting a Term, obsolete its Entity
252 """
253 self.obsolete_entity()
254 update_terminology_project_stats()
255
256 super().delete(*args, **kwargs)
257
258 def __str__(self):
259 return self.text
260
261
262 class TermTranslation(models.Model):
263 term = models.ForeignKey(Term, models.CASCADE, related_name="translations")
264 locale = models.ForeignKey("base.Locale", models.CASCADE, related_name="terms")
265
266 text = models.CharField(max_length=255)
267
268 def __str__(self):
269 return self.text
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py
--- a/pontoon/terminology/models.py
+++ b/pontoon/terminology/models.py
@@ -160,10 +160,12 @@
# Using update() to avoid circular Term.save() call
Term.objects.filter(pk=self.pk).update(entity_id=entity.id)
+ entity.term = self
if not created:
entity.obsolete = False
- entity.save(update_fields=["obsolete"])
+
+ entity.save()
# Make sure Term entities are ordered alphabetically
entities = list(
|
{"golden_diff": "diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py\n--- a/pontoon/terminology/models.py\n+++ b/pontoon/terminology/models.py\n@@ -160,10 +160,12 @@\n \n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n+ entity.term = self\n \n if not created:\n entity.obsolete = False\n- entity.save(update_fields=[\"obsolete\"])\n+\n+ entity.save()\n \n # Make sure Term entities are ordered alphabetically\n entities = list(\n", "issue": "Can't save term translation after updating the term's text in DB\nI've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)\r\nhttps://pontoon.mozilla.org/a/terminology/term/666/change/\r\n\r\nTo my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053\r\n\r\nError in log is \r\n\r\n```\r\nInternal Server Error: /translations/create/\r\n response = get_response(request)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n return func(request, *args, **kwargs)\r\n File \"/app/pontoon/base/utils.py\", line 126, in wrap\r\n return f(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/contextlib.py\", line 79, in inner\r\n File \"/app/pontoon/translations/views.py\", line 117, in create_translation\r\n translation.save(failed_checks=failed_checks)\r\n File \"/app/pontoon/base/models.py\", line 3429, in save\r\n self.entity.reset_term_translation(self.locale)\r\n File \"/app/pontoon/base/models.py\", line 2865, in reset_term_translation\r\n term = self.term\r\npontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.\r\n```\nCan't save term translation after updating the term's text in DB\nI've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)\r\nhttps://pontoon.mozilla.org/a/terminology/term/666/change/\r\n\r\nTo my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053\r\n\r\nError in log is \r\n\r\n```\r\nInternal Server Error: /translations/create/\r\n response = get_response(request)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n return func(request, *args, **kwargs)\r\n File \"/app/pontoon/base/utils.py\", line 126, in wrap\r\n return f(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/contextlib.py\", line 79, in inner\r\n File \"/app/pontoon/translations/views.py\", line 117, in create_translation\r\n translation.save(failed_checks=failed_checks)\r\n File \"/app/pontoon/base/models.py\", line 3429, in save\r\n self.entity.reset_term_translation(self.locale)\r\n File \"/app/pontoon/base/models.py\", line 2865, in reset_term_translation\r\n term = self.term\r\npontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.\r\n```\n", "before_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting Terms, obsolete their Entities\n \"\"\"\n for term in self:\n term.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n\n if not created:\n entity.obsolete = False\n entity.save(update_fields=[\"obsolete\"])\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}], "after_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting Terms, obsolete their Entities\n \"\"\"\n for term in self:\n term.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n entity.term = self\n\n if not created:\n entity.obsolete = False\n\n entity.save()\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}]}
| 3,441 | 140 |
gh_patches_debug_64705
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-3141
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lvg fails if pvs option omitted when state=absent
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
`lvg` module
##### ANSIBLE VERSION
```
ansible 2.1.2.0
config file = /Users/jsok/workspace/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
CentOS 6.7
##### SUMMARY
The `pvs` option is not necessary when `state=absent`, however failing to supply an empty string will cause the module to fail.
##### STEPS TO REPRODUCE
```
---
- name: Remove a volume group
hosts: localhost
tasks:
- name: Remove vg01
lvg:
vg: vg01
state: absent
```
##### EXPECTED RESULTS
The volume group is removed successfully.
##### ACTUAL RESULTS
```
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "", "module_stdout": "Traceback (most recent call last):\r\n File \"/tmp/ansible_tj_9JM/ansible_module_lvg.py\", line 255, in <module>\r\n main()\r\n File \"/tmp/ansible_tj_9JM/ansible_module_lvg.py\", line 140, in main\r\n for idx, dev in enumerate(dev_list):\r\nUnboundLocalError: local variable 'dev_list' referenced before assignment\r\n", "msg": "MODULE FAILURE"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `system/lvg.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
5 # based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
6 #
7 # This file is part of Ansible
8 #
9 # Ansible is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # Ansible is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
21
22 DOCUMENTATION = '''
23 ---
24 author: "Alexander Bulimov (@abulimov)"
25 module: lvg
26 short_description: Configure LVM volume groups
27 description:
28 - This module creates, removes or resizes volume groups.
29 version_added: "1.1"
30 options:
31 vg:
32 description:
33 - The name of the volume group.
34 required: true
35 pvs:
36 description:
37 - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
38 - The module will take care of running pvcreate if needed.
39 required: false
40 pesize:
41 description:
42 - The size of the physical extent in megabytes. Must be a power of 2.
43 default: 4
44 required: false
45 vg_options:
46 description:
47 - Additional options to pass to C(vgcreate) when creating the volume group.
48 default: null
49 required: false
50 version_added: "1.6"
51 state:
52 choices: [ "present", "absent" ]
53 default: present
54 description:
55 - Control if the volume group exists.
56 required: false
57 force:
58 choices: [ "yes", "no" ]
59 default: "no"
60 description:
61 - If yes, allows to remove volume group with logical volumes.
62 required: false
63 notes:
64 - module does not modify PE size for already present volume group
65 '''
66
67 EXAMPLES = '''
68 # Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
69 - lvg: vg=vg.services pvs=/dev/sda1 pesize=32
70
71 # Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
72 # If, for example, we already have VG vg.services on top of /dev/sdb1,
73 # this VG will be extended by /dev/sdc5. Or if vg.services was created on
74 # top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
75 # and then reduce by /dev/sda5.
76 - lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5
77
78 # Remove a volume group with name vg.services.
79 - lvg: vg=vg.services state=absent
80 '''
81
82 def parse_vgs(data):
83 vgs = []
84 for line in data.splitlines():
85 parts = line.strip().split(';')
86 vgs.append({
87 'name': parts[0],
88 'pv_count': int(parts[1]),
89 'lv_count': int(parts[2]),
90 })
91 return vgs
92
93 def find_mapper_device_name(module, dm_device):
94 dmsetup_cmd = module.get_bin_path('dmsetup', True)
95 mapper_prefix = '/dev/mapper/'
96 rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
97 if rc != 0:
98 module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
99 mapper_device = mapper_prefix + dm_name.rstrip()
100 return mapper_device
101
102 def parse_pvs(module, data):
103 pvs = []
104 dm_prefix = '/dev/dm-'
105 for line in data.splitlines():
106 parts = line.strip().split(';')
107 if parts[0].startswith(dm_prefix):
108 parts[0] = find_mapper_device_name(module, parts[0])
109 pvs.append({
110 'name': parts[0],
111 'vg_name': parts[1],
112 })
113 return pvs
114
115 def main():
116 module = AnsibleModule(
117 argument_spec = dict(
118 vg=dict(required=True),
119 pvs=dict(type='list'),
120 pesize=dict(type='int', default=4),
121 vg_options=dict(default=''),
122 state=dict(choices=["absent", "present"], default='present'),
123 force=dict(type='bool', default='no'),
124 ),
125 supports_check_mode=True,
126 )
127
128 vg = module.params['vg']
129 state = module.params['state']
130 force = module.boolean(module.params['force'])
131 pesize = module.params['pesize']
132 vgoptions = module.params['vg_options'].split()
133
134 if module.params['pvs']:
135 dev_list = module.params['pvs']
136 elif state == 'present':
137 module.fail_json(msg="No physical volumes given.")
138
139 # LVM always uses real paths not symlinks so replace symlinks with actual path
140 for idx, dev in enumerate(dev_list):
141 dev_list[idx] = os.path.realpath(dev)
142
143 if state=='present':
144 ### check given devices
145 for test_dev in dev_list:
146 if not os.path.exists(test_dev):
147 module.fail_json(msg="Device %s not found."%test_dev)
148
149 ### get pv list
150 pvs_cmd = module.get_bin_path('pvs', True)
151 rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd)
152 if rc != 0:
153 module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err)
154
155 ### check pv for devices
156 pvs = parse_pvs(module, current_pvs)
157 used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]
158 if used_pvs:
159 module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name']))
160
161 vgs_cmd = module.get_bin_path('vgs', True)
162 rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
163
164 if rc != 0:
165 module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err)
166
167 changed = False
168
169 vgs = parse_vgs(current_vgs)
170
171 for test_vg in vgs:
172 if test_vg['name'] == vg:
173 this_vg = test_vg
174 break
175 else:
176 this_vg = None
177
178 if this_vg is None:
179 if state == 'present':
180 ### create VG
181 if module.check_mode:
182 changed = True
183 else:
184 ### create PV
185 pvcreate_cmd = module.get_bin_path('pvcreate', True)
186 for current_dev in dev_list:
187 rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
188 if rc == 0:
189 changed = True
190 else:
191 module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
192 vgcreate_cmd = module.get_bin_path('vgcreate')
193 rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
194 if rc == 0:
195 changed = True
196 else:
197 module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err)
198 else:
199 if state == 'absent':
200 if module.check_mode:
201 module.exit_json(changed=True)
202 else:
203 if this_vg['lv_count'] == 0 or force:
204 ### remove VG
205 vgremove_cmd = module.get_bin_path('vgremove', True)
206 rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
207 if rc == 0:
208 module.exit_json(changed=True)
209 else:
210 module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err)
211 else:
212 module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg))
213
214 ### resize VG
215 current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]
216 devs_to_remove = list(set(current_devs) - set(dev_list))
217 devs_to_add = list(set(dev_list) - set(current_devs))
218
219 if devs_to_add or devs_to_remove:
220 if module.check_mode:
221 changed = True
222 else:
223 if devs_to_add:
224 devs_to_add_string = ' '.join(devs_to_add)
225 ### create PV
226 pvcreate_cmd = module.get_bin_path('pvcreate', True)
227 for current_dev in devs_to_add:
228 rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
229 if rc == 0:
230 changed = True
231 else:
232 module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
233 ### add PV to our VG
234 vgextend_cmd = module.get_bin_path('vgextend', True)
235 rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
236 if rc == 0:
237 changed = True
238 else:
239 module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err)
240
241 ### remove some PV from our VG
242 if devs_to_remove:
243 devs_to_remove_string = ' '.join(devs_to_remove)
244 vgreduce_cmd = module.get_bin_path('vgreduce', True)
245 rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
246 if rc == 0:
247 changed = True
248 else:
249 module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err)
250
251 module.exit_json(changed=changed)
252
253 # import module snippets
254 from ansible.module_utils.basic import *
255 main()
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/system/lvg.py b/system/lvg.py
--- a/system/lvg.py
+++ b/system/lvg.py
@@ -131,6 +131,7 @@
pesize = module.params['pesize']
vgoptions = module.params['vg_options'].split()
+ dev_list = []
if module.params['pvs']:
dev_list = module.params['pvs']
elif state == 'present':
|
{"golden_diff": "diff --git a/system/lvg.py b/system/lvg.py\n--- a/system/lvg.py\n+++ b/system/lvg.py\n@@ -131,6 +131,7 @@\n pesize = module.params['pesize']\n vgoptions = module.params['vg_options'].split()\n \n+ dev_list = []\n if module.params['pvs']:\n dev_list = module.params['pvs']\n elif state == 'present':\n", "issue": "lvg fails if pvs option omitted when state=absent\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`lvg` module\n##### ANSIBLE VERSION\n\n```\nansible 2.1.2.0\n config file = /Users/jsok/workspace/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nCentOS 6.7\n##### SUMMARY\n\nThe `pvs` option is not necessary when `state=absent`, however failing to supply an empty string will cause the module to fail.\n##### STEPS TO REPRODUCE\n\n```\n\n---\n- name: Remove a volume group\n hosts: localhost\n tasks:\n - name: Remove vg01\n lvg:\n vg: vg01\n state: absent\n```\n##### EXPECTED RESULTS\n\nThe volume group is removed successfully.\n##### ACTUAL RESULTS\n\n```\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"\", \"module_stdout\": \"Traceback (most recent call last):\\r\\n File \\\"/tmp/ansible_tj_9JM/ansible_module_lvg.py\\\", line 255, in <module>\\r\\n main()\\r\\n File \\\"/tmp/ansible_tj_9JM/ansible_module_lvg.py\\\", line 140, in main\\r\\n for idx, dev in enumerate(dev_list):\\r\\nUnboundLocalError: local variable 'dev_list' referenced before assignment\\r\\n\", \"msg\": \"MODULE FAILURE\"}\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>\n# based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nauthor: \"Alexander Bulimov (@abulimov)\"\nmodule: lvg\nshort_description: Configure LVM volume groups\ndescription:\n - This module creates, removes or resizes volume groups.\nversion_added: \"1.1\"\noptions:\n vg:\n description:\n - The name of the volume group.\n required: true\n pvs:\n description:\n - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.\n - The module will take care of running pvcreate if needed. \n required: false\n pesize:\n description:\n - The size of the physical extent in megabytes. Must be a power of 2.\n default: 4\n required: false\n vg_options:\n description:\n - Additional options to pass to C(vgcreate) when creating the volume group.\n default: null\n required: false\n version_added: \"1.6\"\n state:\n choices: [ \"present\", \"absent\" ]\n default: present\n description:\n - Control if the volume group exists.\n required: false\n force:\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n description:\n - If yes, allows to remove volume group with logical volumes.\n required: false\nnotes:\n - module does not modify PE size for already present volume group\n'''\n\nEXAMPLES = '''\n# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.\n- lvg: vg=vg.services pvs=/dev/sda1 pesize=32\n\n# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.\n# If, for example, we already have VG vg.services on top of /dev/sdb1,\n# this VG will be extended by /dev/sdc5. Or if vg.services was created on\n# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,\n# and then reduce by /dev/sda5.\n- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5\n\n# Remove a volume group with name vg.services.\n- lvg: vg=vg.services state=absent\n'''\n\ndef parse_vgs(data):\n vgs = []\n for line in data.splitlines():\n parts = line.strip().split(';')\n vgs.append({\n 'name': parts[0],\n 'pv_count': int(parts[1]),\n 'lv_count': int(parts[2]),\n })\n return vgs\n\ndef find_mapper_device_name(module, dm_device):\n dmsetup_cmd = module.get_bin_path('dmsetup', True)\n mapper_prefix = '/dev/mapper/'\n rc, dm_name, err = module.run_command(\"%s info -C --noheadings -o name %s\" % (dmsetup_cmd, dm_device))\n if rc != 0:\n module.fail_json(msg=\"Failed executing dmsetup command.\", rc=rc, err=err)\n mapper_device = mapper_prefix + dm_name.rstrip()\n return mapper_device\n\ndef parse_pvs(module, data):\n pvs = []\n dm_prefix = '/dev/dm-'\n for line in data.splitlines():\n parts = line.strip().split(';')\n if parts[0].startswith(dm_prefix):\n parts[0] = find_mapper_device_name(module, parts[0])\n pvs.append({\n 'name': parts[0],\n 'vg_name': parts[1],\n })\n return pvs\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n vg=dict(required=True),\n pvs=dict(type='list'),\n pesize=dict(type='int', default=4),\n vg_options=dict(default=''),\n state=dict(choices=[\"absent\", \"present\"], default='present'),\n force=dict(type='bool', default='no'),\n ),\n supports_check_mode=True,\n )\n\n vg = module.params['vg']\n state = module.params['state']\n force = module.boolean(module.params['force'])\n pesize = module.params['pesize']\n vgoptions = module.params['vg_options'].split()\n\n if module.params['pvs']:\n dev_list = module.params['pvs']\n elif state == 'present':\n module.fail_json(msg=\"No physical volumes given.\")\n\n # LVM always uses real paths not symlinks so replace symlinks with actual path\n for idx, dev in enumerate(dev_list):\n dev_list[idx] = os.path.realpath(dev)\n\n if state=='present':\n ### check given devices\n for test_dev in dev_list:\n if not os.path.exists(test_dev):\n module.fail_json(msg=\"Device %s not found.\"%test_dev)\n\n ### get pv list\n pvs_cmd = module.get_bin_path('pvs', True)\n rc,current_pvs,err = module.run_command(\"%s --noheadings -o pv_name,vg_name --separator ';'\" % pvs_cmd)\n if rc != 0:\n module.fail_json(msg=\"Failed executing pvs command.\",rc=rc, err=err)\n\n ### check pv for devices\n pvs = parse_pvs(module, current_pvs)\n used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]\n if used_pvs:\n module.fail_json(msg=\"Device %s is already in %s volume group.\"%(used_pvs[0]['name'],used_pvs[0]['vg_name']))\n\n vgs_cmd = module.get_bin_path('vgs', True)\n rc,current_vgs,err = module.run_command(\"%s --noheadings -o vg_name,pv_count,lv_count --separator ';'\" % vgs_cmd)\n\n if rc != 0:\n module.fail_json(msg=\"Failed executing vgs command.\",rc=rc, err=err)\n\n changed = False\n\n vgs = parse_vgs(current_vgs)\n\n for test_vg in vgs:\n if test_vg['name'] == vg:\n this_vg = test_vg\n break\n else:\n this_vg = None\n\n if this_vg is None:\n if state == 'present':\n ### create VG\n if module.check_mode:\n changed = True\n else:\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in dev_list:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd,current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\" % current_dev, rc=rc, err=err)\n vgcreate_cmd = module.get_bin_path('vgcreate')\n rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating volume group '%s' failed\"%vg, rc=rc, err=err)\n else:\n if state == 'absent':\n if module.check_mode:\n module.exit_json(changed=True)\n else:\n if this_vg['lv_count'] == 0 or force:\n ### remove VG\n vgremove_cmd = module.get_bin_path('vgremove', True)\n rc,_,err = module.run_command(\"%s --force %s\" % (vgremove_cmd, vg))\n if rc == 0:\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"Failed to remove volume group %s\"%(vg),rc=rc, err=err)\n else:\n module.fail_json(msg=\"Refuse to remove non-empty volume group %s without force=yes\"%(vg))\n\n ### resize VG\n current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]\n devs_to_remove = list(set(current_devs) - set(dev_list))\n devs_to_add = list(set(dev_list) - set(current_devs))\n\n if devs_to_add or devs_to_remove:\n if module.check_mode:\n changed = True\n else:\n if devs_to_add:\n devs_to_add_string = ' '.join(devs_to_add)\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in devs_to_add:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd, current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\"%current_dev, rc=rc, err=err)\n ### add PV to our VG\n vgextend_cmd = module.get_bin_path('vgextend', True)\n rc,_,err = module.run_command(\"%s %s %s\" % (vgextend_cmd, vg, devs_to_add_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to extend %s by %s.\"%(vg, devs_to_add_string),rc=rc,err=err)\n\n ### remove some PV from our VG\n if devs_to_remove:\n devs_to_remove_string = ' '.join(devs_to_remove)\n vgreduce_cmd = module.get_bin_path('vgreduce', True)\n rc,_,err = module.run_command(\"%s --force %s %s\" % (vgreduce_cmd, vg, devs_to_remove_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to reduce %s by %s.\"%(vg, devs_to_remove_string),rc=rc,err=err)\n\n module.exit_json(changed=changed)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "system/lvg.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>\n# based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nauthor: \"Alexander Bulimov (@abulimov)\"\nmodule: lvg\nshort_description: Configure LVM volume groups\ndescription:\n - This module creates, removes or resizes volume groups.\nversion_added: \"1.1\"\noptions:\n vg:\n description:\n - The name of the volume group.\n required: true\n pvs:\n description:\n - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.\n - The module will take care of running pvcreate if needed. \n required: false\n pesize:\n description:\n - The size of the physical extent in megabytes. Must be a power of 2.\n default: 4\n required: false\n vg_options:\n description:\n - Additional options to pass to C(vgcreate) when creating the volume group.\n default: null\n required: false\n version_added: \"1.6\"\n state:\n choices: [ \"present\", \"absent\" ]\n default: present\n description:\n - Control if the volume group exists.\n required: false\n force:\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n description:\n - If yes, allows to remove volume group with logical volumes.\n required: false\nnotes:\n - module does not modify PE size for already present volume group\n'''\n\nEXAMPLES = '''\n# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.\n- lvg: vg=vg.services pvs=/dev/sda1 pesize=32\n\n# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.\n# If, for example, we already have VG vg.services on top of /dev/sdb1,\n# this VG will be extended by /dev/sdc5. Or if vg.services was created on\n# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,\n# and then reduce by /dev/sda5.\n- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5\n\n# Remove a volume group with name vg.services.\n- lvg: vg=vg.services state=absent\n'''\n\ndef parse_vgs(data):\n vgs = []\n for line in data.splitlines():\n parts = line.strip().split(';')\n vgs.append({\n 'name': parts[0],\n 'pv_count': int(parts[1]),\n 'lv_count': int(parts[2]),\n })\n return vgs\n\ndef find_mapper_device_name(module, dm_device):\n dmsetup_cmd = module.get_bin_path('dmsetup', True)\n mapper_prefix = '/dev/mapper/'\n rc, dm_name, err = module.run_command(\"%s info -C --noheadings -o name %s\" % (dmsetup_cmd, dm_device))\n if rc != 0:\n module.fail_json(msg=\"Failed executing dmsetup command.\", rc=rc, err=err)\n mapper_device = mapper_prefix + dm_name.rstrip()\n return mapper_device\n\ndef parse_pvs(module, data):\n pvs = []\n dm_prefix = '/dev/dm-'\n for line in data.splitlines():\n parts = line.strip().split(';')\n if parts[0].startswith(dm_prefix):\n parts[0] = find_mapper_device_name(module, parts[0])\n pvs.append({\n 'name': parts[0],\n 'vg_name': parts[1],\n })\n return pvs\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n vg=dict(required=True),\n pvs=dict(type='list'),\n pesize=dict(type='int', default=4),\n vg_options=dict(default=''),\n state=dict(choices=[\"absent\", \"present\"], default='present'),\n force=dict(type='bool', default='no'),\n ),\n supports_check_mode=True,\n )\n\n vg = module.params['vg']\n state = module.params['state']\n force = module.boolean(module.params['force'])\n pesize = module.params['pesize']\n vgoptions = module.params['vg_options'].split()\n\n dev_list = []\n if module.params['pvs']:\n dev_list = module.params['pvs']\n elif state == 'present':\n module.fail_json(msg=\"No physical volumes given.\")\n\n # LVM always uses real paths not symlinks so replace symlinks with actual path\n for idx, dev in enumerate(dev_list):\n dev_list[idx] = os.path.realpath(dev)\n\n if state=='present':\n ### check given devices\n for test_dev in dev_list:\n if not os.path.exists(test_dev):\n module.fail_json(msg=\"Device %s not found.\"%test_dev)\n\n ### get pv list\n pvs_cmd = module.get_bin_path('pvs', True)\n rc,current_pvs,err = module.run_command(\"%s --noheadings -o pv_name,vg_name --separator ';'\" % pvs_cmd)\n if rc != 0:\n module.fail_json(msg=\"Failed executing pvs command.\",rc=rc, err=err)\n\n ### check pv for devices\n pvs = parse_pvs(module, current_pvs)\n used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]\n if used_pvs:\n module.fail_json(msg=\"Device %s is already in %s volume group.\"%(used_pvs[0]['name'],used_pvs[0]['vg_name']))\n\n vgs_cmd = module.get_bin_path('vgs', True)\n rc,current_vgs,err = module.run_command(\"%s --noheadings -o vg_name,pv_count,lv_count --separator ';'\" % vgs_cmd)\n\n if rc != 0:\n module.fail_json(msg=\"Failed executing vgs command.\",rc=rc, err=err)\n\n changed = False\n\n vgs = parse_vgs(current_vgs)\n\n for test_vg in vgs:\n if test_vg['name'] == vg:\n this_vg = test_vg\n break\n else:\n this_vg = None\n\n if this_vg is None:\n if state == 'present':\n ### create VG\n if module.check_mode:\n changed = True\n else:\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in dev_list:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd,current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\" % current_dev, rc=rc, err=err)\n vgcreate_cmd = module.get_bin_path('vgcreate')\n rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating volume group '%s' failed\"%vg, rc=rc, err=err)\n else:\n if state == 'absent':\n if module.check_mode:\n module.exit_json(changed=True)\n else:\n if this_vg['lv_count'] == 0 or force:\n ### remove VG\n vgremove_cmd = module.get_bin_path('vgremove', True)\n rc,_,err = module.run_command(\"%s --force %s\" % (vgremove_cmd, vg))\n if rc == 0:\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"Failed to remove volume group %s\"%(vg),rc=rc, err=err)\n else:\n module.fail_json(msg=\"Refuse to remove non-empty volume group %s without force=yes\"%(vg))\n\n ### resize VG\n current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]\n devs_to_remove = list(set(current_devs) - set(dev_list))\n devs_to_add = list(set(dev_list) - set(current_devs))\n\n if devs_to_add or devs_to_remove:\n if module.check_mode:\n changed = True\n else:\n if devs_to_add:\n devs_to_add_string = ' '.join(devs_to_add)\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in devs_to_add:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd, current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\"%current_dev, rc=rc, err=err)\n ### add PV to our VG\n vgextend_cmd = module.get_bin_path('vgextend', True)\n rc,_,err = module.run_command(\"%s %s %s\" % (vgextend_cmd, vg, devs_to_add_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to extend %s by %s.\"%(vg, devs_to_add_string),rc=rc,err=err)\n\n ### remove some PV from our VG\n if devs_to_remove:\n devs_to_remove_string = ' '.join(devs_to_remove)\n vgreduce_cmd = module.get_bin_path('vgreduce', True)\n rc,_,err = module.run_command(\"%s --force %s %s\" % (vgreduce_cmd, vg, devs_to_remove_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to reduce %s by %s.\"%(vg, devs_to_remove_string),rc=rc,err=err)\n\n module.exit_json(changed=changed)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "system/lvg.py"}]}
| 3,654 | 96 |
gh_patches_debug_18537
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-1306
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove usage of `distutils` which has been deprecated
It turns out that `distutils` has been deprecated and will be removed in Python 3.12. We should stop using it prior to our 0.7 release. Fortunately I only found one usage in: `plasmapy/plasma/sources/openpmd_hdf5.py`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/plasma/sources/openpmd_hdf5.py`
Content:
```
1 __all__ = ["HDF5Reader"]
2
3 import astropy.units as u
4 import numpy as np
5 import os
6
7 from distutils.version import StrictVersion
8
9 from plasmapy.plasma.exceptions import DataStandardError
10 from plasmapy.plasma.plasma_base import GenericPlasma
11
12 _OUTDATED_VERSION = "1.1.0"
13 _NEWER_VERSION = "2.0.0"
14
15 # This is the order what OpenPMD uses to store unit
16 # dimensions for a record.
17 _UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)
18
19
20 def _fetch_units(openPMD_dims):
21 """Converts a collection of OpenPMD dimensions to astropy.units."""
22
23 units = u.dimensionless_unscaled
24 for factor, unit in zip(openPMD_dims, _UNITS):
25 units *= unit ** factor
26 units, *_ = units.compose()
27 return units
28
29
30 def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
31 """Checks if the passed version is supported or not."""
32
33 parsed_version = StrictVersion(openPMD_version)
34 outdated_version = StrictVersion(outdated)
35 newer_version = StrictVersion(newer)
36 return outdated_version <= parsed_version < newer_version
37
38
39 class HDF5Reader(GenericPlasma):
40 """
41 .. _OpenPMD: https://www.openpmd.org/
42
43 Core class for accessing various attributes on HDF5 files that
44 are based on OpenPMD_ standards.
45
46 Parameters
47 ----------
48 hdf5 : `str`
49 Path to HDF5 file.
50
51 **kwargs
52 Any keyword accepted by `GenericPlasma`.
53
54 """
55
56 def __init__(self, hdf5, **kwargs):
57 super().__init__(**kwargs)
58
59 if not os.path.isfile(hdf5):
60 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
61 try:
62 import h5py
63 except (ImportError, ModuleNotFoundError) as e:
64 from plasmapy.optional_deps import h5py_import_error
65
66 raise ImportError(h5py_import_error) from e
67
68 h5 = h5py.File(hdf5, "r")
69 self.h5 = h5
70
71 self._check_valid_openpmd_version()
72
73 self.subname = tuple(self.h5["data"])[0]
74
75 def __enter__(self):
76 return self.h5
77
78 def close(self):
79 self.h5.close()
80
81 def __exit__(self):
82 self.h5.close()
83
84 def _check_valid_openpmd_version(self):
85 try:
86 openPMD_version = self.h5.attrs["openPMD"].decode("utf-8")
87 if _valid_version(openPMD_version):
88 return True
89 else:
90 raise DataStandardError(
91 f"We currently only support HDF5 versions"
92 f"starting from v{_OUTDATED_VERSION} and "
93 f"lower than v{_NEWER_VERSION}. You can "
94 f"however convert your HDF5 to a supported "
95 f"version. For more information; see "
96 f"https://github.com/openPMD/openPMD-updater"
97 )
98 except KeyError:
99 raise DataStandardError(
100 "Input HDF5 file does not go on with standards defined by OpenPMD"
101 )
102
103 @property
104 def electric_field(self):
105 """
106 An (x, y, z) array containing electric field data. (Returned as an astropy
107 `~astropy.units.Quantity`.)
108 """
109 path = f"data/{self.subname}/fields/E"
110 if path in self.h5:
111 units = _fetch_units(self.h5[path].attrs["unitDimension"])
112 axes = [self.h5[path][axis] for axis in self.h5[path]]
113 return np.array(axes) * units
114 else:
115 raise AttributeError("No electric field data available in HDF5 file")
116
117 @property
118 def charge_density(self):
119 """
120 An array containing charge density data. (Returned as an astropy
121 `~astropy.units.Quantity`.)
122 """
123 path = f"data/{self.subname}/fields/rho"
124 if path in self.h5:
125 units = _fetch_units(self.h5[path].attrs["unitDimension"])
126 return np.array(self.h5[path]) * units
127 else:
128 raise AttributeError("No charge density data available in HDF5 file")
129
130 @property
131 def magnetic_field(self):
132 path = f"data/{self.subname}/fields/B"
133 if path in self.h5:
134 units = _fetch_units(self.h5[path].attrs["unitDimension"])
135 axes = [self.h5[path][axis] for axis in self.h5[path]]
136 return np.array(axes) * units
137 else:
138 raise AttributeError("No magnetic field data available in HDF5 file")
139
140 @property
141 def electric_current(self):
142 path = f"data/{self.subname}/fields/J"
143 if path in self.h5:
144 units = _fetch_units(self.h5[path].attrs["unitDimension"])
145 axes = [self.h5[path][axis] for axis in self.h5[path]]
146 return np.array(axes) * units
147 else:
148 raise AttributeError("No electric current data available in HDF5 file")
149
150 @classmethod
151 def is_datasource_for(cls, **kwargs):
152 if "hdf5" not in kwargs:
153 return False
154
155 hdf5 = kwargs.get("hdf5")
156 openPMD = kwargs.get("openPMD")
157
158 isfile = os.path.isfile(hdf5)
159 if not isfile:
160 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
161
162 if "openPMD" not in kwargs:
163 try:
164 import h5py
165 except (ImportError, ModuleNotFoundError) as e:
166 from plasmapy.optional_deps import h5py_import_error
167
168 raise ImportError(h5py_import_error) from e
169
170 h5 = h5py.File(hdf5, "r")
171 try:
172 openPMD = h5.attrs["openPMD"]
173 except KeyError:
174 openPMD = False
175
176 return openPMD
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py
--- a/plasmapy/plasma/sources/openpmd_hdf5.py
+++ b/plasmapy/plasma/sources/openpmd_hdf5.py
@@ -4,7 +4,7 @@
import numpy as np
import os
-from distutils.version import StrictVersion
+from packaging.version import Version
from plasmapy.plasma.exceptions import DataStandardError
from plasmapy.plasma.plasma_base import GenericPlasma
@@ -30,9 +30,9 @@
def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
"""Checks if the passed version is supported or not."""
- parsed_version = StrictVersion(openPMD_version)
- outdated_version = StrictVersion(outdated)
- newer_version = StrictVersion(newer)
+ parsed_version = Version(openPMD_version)
+ outdated_version = Version(outdated)
+ newer_version = Version(newer)
return outdated_version <= parsed_version < newer_version
|
{"golden_diff": "diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py\n--- a/plasmapy/plasma/sources/openpmd_hdf5.py\n+++ b/plasmapy/plasma/sources/openpmd_hdf5.py\n@@ -4,7 +4,7 @@\n import numpy as np\n import os\n \n-from distutils.version import StrictVersion\n+from packaging.version import Version\n \n from plasmapy.plasma.exceptions import DataStandardError\n from plasmapy.plasma.plasma_base import GenericPlasma\n@@ -30,9 +30,9 @@\n def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n \n- parsed_version = StrictVersion(openPMD_version)\n- outdated_version = StrictVersion(outdated)\n- newer_version = StrictVersion(newer)\n+ parsed_version = Version(openPMD_version)\n+ outdated_version = Version(outdated)\n+ newer_version = Version(newer)\n return outdated_version <= parsed_version < newer_version\n", "issue": "Remove usage of `distutils` which has been deprecated\nIt turns out that `distutils` has been deprecated and will be removed in Python 3.12. We should stop using it prior to our 0.7 release. Fortunately I only found one usage in: `plasmapy/plasma/sources/openpmd_hdf5.py`.\n", "before_files": [{"content": "__all__ = [\"HDF5Reader\"]\n\nimport astropy.units as u\nimport numpy as np\nimport os\n\nfrom distutils.version import StrictVersion\n\nfrom plasmapy.plasma.exceptions import DataStandardError\nfrom plasmapy.plasma.plasma_base import GenericPlasma\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"Converts a collection of OpenPMD dimensions to astropy.units.\"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= unit ** factor\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n\n parsed_version = StrictVersion(openPMD_version)\n outdated_version = StrictVersion(outdated)\n newer_version = StrictVersion(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n \"\"\"\n .. _OpenPMD: https://www.openpmd.org/\n\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD_ standards.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n **kwargs\n Any keyword accepted by `GenericPlasma`.\n\n \"\"\"\n\n def __init__(self, hdf5, **kwargs):\n super().__init__(**kwargs)\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5[\"data\"])[0]\n\n def __enter__(self):\n return self.h5\n\n def close(self):\n self.h5.close()\n\n def __exit__(self):\n self.h5.close()\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode(\"utf-8\")\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(\n f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\"\n )\n except KeyError:\n raise DataStandardError(\n \"Input HDF5 file does not go on with standards defined by OpenPMD\"\n )\n\n @property\n def electric_field(self):\n \"\"\"\n An (x, y, z) array containing electric field data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available in HDF5 file\")\n\n @property\n def charge_density(self):\n \"\"\"\n An array containing charge density data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/plasma/sources/openpmd_hdf5.py"}], "after_files": [{"content": "__all__ = [\"HDF5Reader\"]\n\nimport astropy.units as u\nimport numpy as np\nimport os\n\nfrom packaging.version import Version\n\nfrom plasmapy.plasma.exceptions import DataStandardError\nfrom plasmapy.plasma.plasma_base import GenericPlasma\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"Converts a collection of OpenPMD dimensions to astropy.units.\"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= unit ** factor\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n\n parsed_version = Version(openPMD_version)\n outdated_version = Version(outdated)\n newer_version = Version(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n \"\"\"\n .. _OpenPMD: https://www.openpmd.org/\n\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD_ standards.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n **kwargs\n Any keyword accepted by `GenericPlasma`.\n\n \"\"\"\n\n def __init__(self, hdf5, **kwargs):\n super().__init__(**kwargs)\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5[\"data\"])[0]\n\n def __enter__(self):\n return self.h5\n\n def close(self):\n self.h5.close()\n\n def __exit__(self):\n self.h5.close()\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode(\"utf-8\")\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(\n f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\"\n )\n except KeyError:\n raise DataStandardError(\n \"Input HDF5 file does not go on with standards defined by OpenPMD\"\n )\n\n @property\n def electric_field(self):\n \"\"\"\n An (x, y, z) array containing electric field data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available in HDF5 file\")\n\n @property\n def charge_density(self):\n \"\"\"\n An array containing charge density data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/plasma/sources/openpmd_hdf5.py"}]}
| 2,130 | 247 |
gh_patches_debug_20301
|
rasdani/github-patches
|
git_diff
|
UTNkar__moore-134
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing search template
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: admin@utn.se -->
### Prerequisites
* [X] Put an X between the brackets on this line if you have done all of the
following:
* Reproduced the problem with clear cache.
* (If running the application locally:) Made sure your running the newest version on the development branch
* Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues
### Description
The current search is now really working. Although you can search, the results are ugly and very spotty.
Goal: Evaluate the searchable fields within Project Moore and make a search template.
### Steps to Reproduce
1. Visit the home page
2. Search something
3. Be disappointed ☹️
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/materialize/templatetags/materialize.py`
Content:
```
1 from django import template
2 from django.template import loader
3
4 register = template.Library()
5
6
7 def get_widget_name(field):
8 return field.field.widget.__class__.__name__
9
10
11 def append_classes(field):
12 field.field.label_suffix = ''
13 classes = field.field.widget.attrs.get('class', '')
14 classes += ' validate'
15 if field.errors:
16 classes += ' invalid'
17 field.field.widget.attrs['class'] = classes
18
19
20 def render_field(template, field, prefix=None):
21 t = loader.get_template(template)
22 c = {
23 'field': field,
24 'prefix': prefix,
25 }
26 html = t.render(c)
27 return html
28
29
30 @register.simple_tag
31 def materialize_field(field, prefix=None):
32 widget = get_widget_name(field)
33 if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',
34 'Textarea']:
35 append_classes(field)
36 return render_field('materialize/form/input.html', field, prefix)
37 else:
38 return field.as_widget()
39
```
Path: `website/search/views.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
4 from django.shortcuts import render
5
6 from wagtail.wagtailcore.models import Page
7 from wagtail.wagtailsearch.models import Query
8
9
10 def search(request):
11 search_query = request.GET.get('query', None)
12 page = request.GET.get('page', 1)
13
14 # Search
15 if search_query:
16 search_results = Page.objects.live().search(search_query)
17 query = Query.get(search_query)
18
19 # Record hit
20 query.add_hit()
21 else:
22 search_results = Page.objects.none()
23
24 # Pagination
25 paginator = Paginator(search_results, 10)
26 try:
27 search_results = paginator.page(page)
28 except PageNotAnInteger:
29 search_results = paginator.page(1)
30 except EmptyPage:
31 search_results = paginator.page(paginator.num_pages)
32
33 return render(request, 'search/search.html', {
34 'search_query': search_query,
35 'search_results': search_results,
36 })
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py
--- a/website/materialize/templatetags/materialize.py
+++ b/website/materialize/templatetags/materialize.py
@@ -23,8 +23,7 @@
'field': field,
'prefix': prefix,
}
- html = t.render(c)
- return html
+ return t.render(c)
@register.simple_tag
@@ -36,3 +35,12 @@
return render_field('materialize/form/input.html', field, prefix)
else:
return field.as_widget()
+
+
+@register.inclusion_tag('materialize/pagination.html')
+def materialize_pagination(page, url):
+ return {
+ 'page': page,
+ 'paginator': page.paginator,
+ 'url': url,
+ }
diff --git a/website/search/views.py b/website/search/views.py
--- a/website/search/views.py
+++ b/website/search/views.py
@@ -22,7 +22,7 @@
search_results = Page.objects.none()
# Pagination
- paginator = Paginator(search_results, 10)
+ paginator = Paginator(search_results, 25)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
|
{"golden_diff": "diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py\n--- a/website/materialize/templatetags/materialize.py\n+++ b/website/materialize/templatetags/materialize.py\n@@ -23,8 +23,7 @@\n 'field': field,\n 'prefix': prefix,\n }\n- html = t.render(c)\n- return html\n+ return t.render(c)\n \n \n @register.simple_tag\n@@ -36,3 +35,12 @@\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n+\n+\n+@register.inclusion_tag('materialize/pagination.html')\n+def materialize_pagination(page, url):\n+ return {\n+ 'page': page,\n+ 'paginator': page.paginator,\n+ 'url': url,\n+ }\ndiff --git a/website/search/views.py b/website/search/views.py\n--- a/website/search/views.py\n+++ b/website/search/views.py\n@@ -22,7 +22,7 @@\n search_results = Page.objects.none()\n \n # Pagination\n- paginator = Paginator(search_results, 10)\n+ paginator = Paginator(search_results, 25)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n", "issue": "Missing search template\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: admin@utn.se -->\r\n\r\n### Prerequisites\r\n\r\n* [X] Put an X between the brackets on this line if you have done all of the\r\nfollowing:\r\n * Reproduced the problem with clear cache.\r\n * (If running the application locally:) Made sure your running the newest version on the development branch\r\n * Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues\r\n\r\n### Description\r\n\r\nThe current search is now really working. Although you can search, the results are ugly and very spotty. \r\n\r\nGoal: Evaluate the searchable fields within Project Moore and make a search template.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Visit the home page\r\n2. Search something\r\n3. Be disappointed \u2639\ufe0f \r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\n", "before_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\ndef get_widget_name(field):\n return field.field.widget.__class__.__name__\n\n\ndef append_classes(field):\n field.field.label_suffix = ''\n classes = field.field.widget.attrs.get('class', '')\n classes += ' validate'\n if field.errors:\n classes += ' invalid'\n field.field.widget.attrs['class'] = classes\n\n\ndef render_field(template, field, prefix=None):\n t = loader.get_template(template)\n c = {\n 'field': field,\n 'prefix': prefix,\n }\n html = t.render(c)\n return html\n\n\n@register.simple_tag\ndef materialize_field(field, prefix=None):\n widget = get_widget_name(field)\n if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',\n 'Textarea']:\n append_classes(field)\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n", "path": "website/materialize/templatetags/materialize.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.shortcuts import render\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n else:\n search_results = Page.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n })\n", "path": "website/search/views.py"}], "after_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\ndef get_widget_name(field):\n return field.field.widget.__class__.__name__\n\n\ndef append_classes(field):\n field.field.label_suffix = ''\n classes = field.field.widget.attrs.get('class', '')\n classes += ' validate'\n if field.errors:\n classes += ' invalid'\n field.field.widget.attrs['class'] = classes\n\n\ndef render_field(template, field, prefix=None):\n t = loader.get_template(template)\n c = {\n 'field': field,\n 'prefix': prefix,\n }\n return t.render(c)\n\n\n@register.simple_tag\ndef materialize_field(field, prefix=None):\n widget = get_widget_name(field)\n if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',\n 'Textarea']:\n append_classes(field)\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n\n\n@register.inclusion_tag('materialize/pagination.html')\ndef materialize_pagination(page, url):\n return {\n 'page': page,\n 'paginator': page.paginator,\n 'url': url,\n }\n", "path": "website/materialize/templatetags/materialize.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.shortcuts import render\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n else:\n search_results = Page.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 25)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n })\n", "path": "website/search/views.py"}]}
| 1,053 | 301 |
gh_patches_debug_12092
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-86
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeDecodeError in staged_files_only
```
$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.
Traceback (most recent call last):
File ".../bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File ".../lib/python2.6/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File ".../lib/python2.6/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File ".../lib/python2.6/site-packages/pre_commit/commands.py", line 254, in run
with ctx:
File "/usr/lib64/python2.6/contextlib.py", line 16, in __enter__
return self.gen.next()
File ".../lib/python2.6/site-packages/pre_commit/staged_files_only.py", line 32, in staged_files_only
patch_file.write(diff_stdout)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 3795: ordinal not in range(128)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/staged_files_only.py`
Content:
```
1 import contextlib
2 import logging
3 import time
4
5 from pre_commit.prefixed_command_runner import CalledProcessError
6
7
8 logger = logging.getLogger('pre_commit')
9
10
11 @contextlib.contextmanager
12 def staged_files_only(cmd_runner):
13 """Clear any unstaged changes from the git working directory inside this
14 context.
15
16 Args:
17 cmd_runner - PrefixedCommandRunner
18 """
19 # Determine if there are unstaged files
20 retcode, diff_stdout, _ = cmd_runner.run(
21 ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
22 retcode=None,
23 )
24 if retcode and diff_stdout.strip():
25 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
26 logger.warning('Unstaged files detected.')
27 logger.info(
28 'Stashing unstaged files to {0}.'.format(patch_filename),
29 )
30 # Save the current unstaged changes as a patch
31 with open(patch_filename, 'w') as patch_file:
32 patch_file.write(diff_stdout)
33
34 # Clear the working directory of unstaged changes
35 cmd_runner.run(['git', 'checkout', '--', '.'])
36 try:
37 yield
38 finally:
39 # Try to apply the patch we saved
40 try:
41 cmd_runner.run(['git', 'apply', patch_filename])
42 except CalledProcessError:
43 logger.warning(
44 'Stashed changes conflicted with hook auto-fixes... '
45 'Rolling back fixes...'
46 )
47 # We failed to apply the patch, presumably due to fixes made
48 # by hooks.
49 # Roll back the changes made by hooks.
50 cmd_runner.run(['git', 'checkout', '--', '.'])
51 cmd_runner.run(['git', 'apply', patch_filename])
52 logger.info('Restored changes from {0}.'.format(patch_filename))
53 else:
54 # There weren't any staged files so we don't need to do anything
55 # special
56 yield
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -1,4 +1,5 @@
import contextlib
+import io
import logging
import time
@@ -28,7 +29,7 @@
'Stashing unstaged files to {0}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
- with open(patch_filename, 'w') as patch_file:
+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
|
{"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -1,4 +1,5 @@\n import contextlib\n+import io\n import logging\n import time\n \n@@ -28,7 +29,7 @@\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n- with open(patch_filename, 'w') as patch_file:\n+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:\n patch_file.write(diff_stdout)\n \n # Clear the working directory of unstaged changes\n", "issue": "UnicodeDecodeError in staged_files_only\n```\n$ pre-commit\n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.\nTraceback (most recent call last):\n File \".../bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \".../lib/python2.6/site-packages/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \".../lib/python2.6/site-packages/pre_commit/run.py\", line 59, in run\n return commands.run(runner, args)\n File \".../lib/python2.6/site-packages/pre_commit/commands.py\", line 254, in run\n with ctx:\n File \"/usr/lib64/python2.6/contextlib.py\", line 16, in __enter__\n return self.gen.next()\n File \".../lib/python2.6/site-packages/pre_commit/staged_files_only.py\", line 32, in staged_files_only\n patch_file.write(diff_stdout)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xfc' in position 3795: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport time\n\nfrom pre_commit.prefixed_command_runner import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\n@contextlib.contextmanager\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout, _ = cmd_runner.run(\n ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n )\n if retcode and diff_stdout.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with open(patch_filename, 'w') as patch_file:\n patch_file.write(diff_stdout)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "import contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.prefixed_command_runner import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\n@contextlib.contextmanager\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout, _ = cmd_runner.run(\n ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n )\n if retcode and diff_stdout.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:\n patch_file.write(diff_stdout)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]}
| 1,070 | 160 |
gh_patches_debug_23952
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-458
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Owner change does not modify notifications.
When a certificate changes ownership, we should try to also update the notifications by removing the old defaults owners and applying new defaults.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/certificates/schemas.py`
Content:
```
1 """
2 .. module: lemur.certificates.schemas
3 :platform: unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
7 """
8 from flask import current_app
9 from marshmallow import fields, validates_schema, post_load, pre_load
10 from marshmallow.exceptions import ValidationError
11
12 from lemur.schemas import AssociatedAuthoritySchema, AssociatedDestinationSchema, AssociatedCertificateSchema, \
13 AssociatedNotificationSchema, PluginInputSchema, ExtensionSchema, AssociatedRoleSchema, EndpointNestedOutputSchema
14
15 from lemur.authorities.schemas import AuthorityNestedOutputSchema
16 from lemur.destinations.schemas import DestinationNestedOutputSchema
17 from lemur.notifications.schemas import NotificationNestedOutputSchema
18 from lemur.roles.schemas import RoleNestedOutputSchema
19 from lemur.domains.schemas import DomainNestedOutputSchema
20 from lemur.users.schemas import UserNestedOutputSchema
21
22 from lemur.common.schema import LemurInputSchema, LemurOutputSchema
23 from lemur.common import validators, missing
24 from lemur.notifications import service as notification_service
25
26
27 class CertificateSchema(LemurInputSchema):
28 owner = fields.Email(required=True)
29 description = fields.String()
30
31
32 class CertificateCreationSchema(CertificateSchema):
33 @post_load
34 def default_notification(self, data):
35 if not data['notifications']:
36 notification_name = "DEFAULT_{0}".format(data['owner'].split('@')[0].upper())
37 data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])
38
39 notification_name = 'DEFAULT_SECURITY'
40 data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL'))
41 return data
42
43
44 class CertificateInputSchema(CertificateCreationSchema):
45 name = fields.String()
46 common_name = fields.String(required=True, validate=validators.sensitive_domain)
47 authority = fields.Nested(AssociatedAuthoritySchema, required=True)
48
49 validity_start = fields.DateTime()
50 validity_end = fields.DateTime()
51 validity_years = fields.Integer()
52
53 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
54 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
55 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
56 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
57
58 csr = fields.String(validate=validators.csr)
59
60 # certificate body fields
61 organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))
62 organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))
63 location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))
64 country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))
65 state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))
66
67 extensions = fields.Nested(ExtensionSchema)
68
69 @validates_schema
70 def validate_dates(self, data):
71 validators.dates(data)
72
73 @pre_load
74 def ensure_dates(self, data):
75 return missing.convert_validity_years(data)
76
77
78 class CertificateEditInputSchema(CertificateSchema):
79 notify = fields.Boolean()
80 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
81 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
82 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
83 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
84
85
86 class CertificateNestedOutputSchema(LemurOutputSchema):
87 __envelope__ = False
88 id = fields.Integer()
89 active = fields.Boolean()
90 bits = fields.Integer()
91 body = fields.String()
92 chain = fields.String()
93 description = fields.String()
94 name = fields.String()
95 cn = fields.String()
96 not_after = fields.DateTime()
97 not_before = fields.DateTime()
98 owner = fields.Email()
99 status = fields.Boolean()
100 creator = fields.Nested(UserNestedOutputSchema)
101 issuer = fields.Nested(AuthorityNestedOutputSchema)
102
103
104 class CertificateOutputSchema(LemurOutputSchema):
105 id = fields.Integer()
106 active = fields.Boolean()
107 notify = fields.Boolean()
108 bits = fields.Integer()
109 body = fields.String()
110 chain = fields.String()
111 deleted = fields.Boolean(default=False)
112 description = fields.String()
113 issuer = fields.String()
114 name = fields.String()
115 cn = fields.String()
116 not_after = fields.DateTime()
117 not_before = fields.DateTime()
118 owner = fields.Email()
119 san = fields.Boolean()
120 serial = fields.String()
121 signing_algorithm = fields.String()
122 status = fields.Boolean()
123 user = fields.Nested(UserNestedOutputSchema)
124 domains = fields.Nested(DomainNestedOutputSchema, many=True)
125 destinations = fields.Nested(DestinationNestedOutputSchema, many=True)
126 notifications = fields.Nested(NotificationNestedOutputSchema, many=True)
127 replaces = fields.Nested(CertificateNestedOutputSchema, many=True)
128 authority = fields.Nested(AuthorityNestedOutputSchema)
129 roles = fields.Nested(RoleNestedOutputSchema, many=True)
130 endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])
131
132
133 class CertificateUploadInputSchema(CertificateCreationSchema):
134 name = fields.String()
135 notify = fields.Boolean(missing=True)
136
137 private_key = fields.String(validate=validators.private_key)
138 body = fields.String(required=True, validate=validators.public_certificate)
139 chain = fields.String(validate=validators.public_certificate) # TODO this could be multiple certificates
140
141 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
142 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
143 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
144 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
145
146 @validates_schema
147 def keys(self, data):
148 if data.get('destinations'):
149 if not data.get('private_key'):
150 raise ValidationError('Destinations require private key.')
151
152
153 class CertificateExportInputSchema(LemurInputSchema):
154 plugin = fields.Nested(PluginInputSchema)
155
156
157 certificate_input_schema = CertificateInputSchema()
158 certificate_output_schema = CertificateOutputSchema()
159 certificates_output_schema = CertificateOutputSchema(many=True)
160 certificate_upload_input_schema = CertificateUploadInputSchema()
161 certificate_export_input_schema = CertificateExportInputSchema()
162 certificate_edit_input_schema = CertificateEditInputSchema()
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py
--- a/lemur/certificates/schemas.py
+++ b/lemur/certificates/schemas.py
@@ -77,11 +77,25 @@
class CertificateEditInputSchema(CertificateSchema):
notify = fields.Boolean()
+ owner = fields.String()
destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
+ @post_load
+ def enforce_notifications(self, data):
+ """
+ Ensures that when an owner changes, default notifications are added for the new owner.
+ Old owner notifications are retained unless explicitly removed.
+ :param data:
+ :return:
+ """
+ if data['owner']:
+ notification_name = "DEFAULT_{0}".format(data['owner'].split('@')[0].upper())
+ data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])
+ return data
+
class CertificateNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
|
{"golden_diff": "diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py\n--- a/lemur/certificates/schemas.py\n+++ b/lemur/certificates/schemas.py\n@@ -77,11 +77,25 @@\n \n class CertificateEditInputSchema(CertificateSchema):\n notify = fields.Boolean()\n+ owner = fields.String()\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n \n+ @post_load\n+ def enforce_notifications(self, data):\n+ \"\"\"\n+ Ensures that when an owner changes, default notifications are added for the new owner.\n+ Old owner notifications are retained unless explicitly removed.\n+ :param data:\n+ :return:\n+ \"\"\"\n+ if data['owner']:\n+ notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n+ data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n+ return data\n+\n \n class CertificateNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n", "issue": "Owner change does not modify notifications.\nWhen a certificate changes ownership, we should try to also update the notifications by removing the old defaults owners and applying new defaults. \n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.certificates.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>\n\"\"\"\nfrom flask import current_app\nfrom marshmallow import fields, validates_schema, post_load, pre_load\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.schemas import AssociatedAuthoritySchema, AssociatedDestinationSchema, AssociatedCertificateSchema, \\\n AssociatedNotificationSchema, PluginInputSchema, ExtensionSchema, AssociatedRoleSchema, EndpointNestedOutputSchema\n\nfrom lemur.authorities.schemas import AuthorityNestedOutputSchema\nfrom lemur.destinations.schemas import DestinationNestedOutputSchema\nfrom lemur.notifications.schemas import NotificationNestedOutputSchema\nfrom lemur.roles.schemas import RoleNestedOutputSchema\nfrom lemur.domains.schemas import DomainNestedOutputSchema\nfrom lemur.users.schemas import UserNestedOutputSchema\n\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.common import validators, missing\nfrom lemur.notifications import service as notification_service\n\n\nclass CertificateSchema(LemurInputSchema):\n owner = fields.Email(required=True)\n description = fields.String()\n\n\nclass CertificateCreationSchema(CertificateSchema):\n @post_load\n def default_notification(self, data):\n if not data['notifications']:\n notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n\n notification_name = 'DEFAULT_SECURITY'\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL'))\n return data\n\n\nclass CertificateInputSchema(CertificateCreationSchema):\n name = fields.String()\n common_name = fields.String(required=True, validate=validators.sensitive_domain)\n authority = fields.Nested(AssociatedAuthoritySchema, required=True)\n\n validity_start = fields.DateTime()\n validity_end = fields.DateTime()\n validity_years = fields.Integer()\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n csr = fields.String(validate=validators.csr)\n\n # certificate body fields\n organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))\n organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))\n location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))\n country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))\n state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))\n\n extensions = fields.Nested(ExtensionSchema)\n\n @validates_schema\n def validate_dates(self, data):\n validators.dates(data)\n\n @pre_load\n def ensure_dates(self, data):\n return missing.convert_validity_years(data)\n\n\nclass CertificateEditInputSchema(CertificateSchema):\n notify = fields.Boolean()\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n\nclass CertificateNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n active = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n description = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n status = fields.Boolean()\n creator = fields.Nested(UserNestedOutputSchema)\n issuer = fields.Nested(AuthorityNestedOutputSchema)\n\n\nclass CertificateOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n active = fields.Boolean()\n notify = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n deleted = fields.Boolean(default=False)\n description = fields.String()\n issuer = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n san = fields.Boolean()\n serial = fields.String()\n signing_algorithm = fields.String()\n status = fields.Boolean()\n user = fields.Nested(UserNestedOutputSchema)\n domains = fields.Nested(DomainNestedOutputSchema, many=True)\n destinations = fields.Nested(DestinationNestedOutputSchema, many=True)\n notifications = fields.Nested(NotificationNestedOutputSchema, many=True)\n replaces = fields.Nested(CertificateNestedOutputSchema, many=True)\n authority = fields.Nested(AuthorityNestedOutputSchema)\n roles = fields.Nested(RoleNestedOutputSchema, many=True)\n endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])\n\n\nclass CertificateUploadInputSchema(CertificateCreationSchema):\n name = fields.String()\n notify = fields.Boolean(missing=True)\n\n private_key = fields.String(validate=validators.private_key)\n body = fields.String(required=True, validate=validators.public_certificate)\n chain = fields.String(validate=validators.public_certificate) # TODO this could be multiple certificates\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n @validates_schema\n def keys(self, data):\n if data.get('destinations'):\n if not data.get('private_key'):\n raise ValidationError('Destinations require private key.')\n\n\nclass CertificateExportInputSchema(LemurInputSchema):\n plugin = fields.Nested(PluginInputSchema)\n\n\ncertificate_input_schema = CertificateInputSchema()\ncertificate_output_schema = CertificateOutputSchema()\ncertificates_output_schema = CertificateOutputSchema(many=True)\ncertificate_upload_input_schema = CertificateUploadInputSchema()\ncertificate_export_input_schema = CertificateExportInputSchema()\ncertificate_edit_input_schema = CertificateEditInputSchema()\n", "path": "lemur/certificates/schemas.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.certificates.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>\n\"\"\"\nfrom flask import current_app\nfrom marshmallow import fields, validates_schema, post_load, pre_load\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.schemas import AssociatedAuthoritySchema, AssociatedDestinationSchema, AssociatedCertificateSchema, \\\n AssociatedNotificationSchema, PluginInputSchema, ExtensionSchema, AssociatedRoleSchema, EndpointNestedOutputSchema\n\nfrom lemur.authorities.schemas import AuthorityNestedOutputSchema\nfrom lemur.destinations.schemas import DestinationNestedOutputSchema\nfrom lemur.notifications.schemas import NotificationNestedOutputSchema\nfrom lemur.roles.schemas import RoleNestedOutputSchema\nfrom lemur.domains.schemas import DomainNestedOutputSchema\nfrom lemur.users.schemas import UserNestedOutputSchema\n\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.common import validators, missing\nfrom lemur.notifications import service as notification_service\n\n\nclass CertificateSchema(LemurInputSchema):\n owner = fields.Email(required=True)\n description = fields.String()\n\n\nclass CertificateCreationSchema(CertificateSchema):\n @post_load\n def default_notification(self, data):\n if not data['notifications']:\n notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n\n notification_name = 'DEFAULT_SECURITY'\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL'))\n return data\n\n\nclass CertificateInputSchema(CertificateCreationSchema):\n name = fields.String()\n common_name = fields.String(required=True, validate=validators.sensitive_domain)\n authority = fields.Nested(AssociatedAuthoritySchema, required=True)\n\n validity_start = fields.DateTime()\n validity_end = fields.DateTime()\n validity_years = fields.Integer()\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n csr = fields.String(validate=validators.csr)\n\n # certificate body fields\n organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))\n organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))\n location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))\n country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))\n state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))\n\n extensions = fields.Nested(ExtensionSchema)\n\n @validates_schema\n def validate_dates(self, data):\n validators.dates(data)\n\n @pre_load\n def ensure_dates(self, data):\n return missing.convert_validity_years(data)\n\n\nclass CertificateEditInputSchema(CertificateSchema):\n notify = fields.Boolean()\n owner = fields.String()\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n @post_load\n def enforce_notifications(self, data):\n \"\"\"\n Ensures that when an owner changes, default notifications are added for the new owner.\n Old owner notifications are retained unless explicitly removed.\n :param data:\n :return:\n \"\"\"\n if data['owner']:\n notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n return data\n\n\nclass CertificateNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n active = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n description = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n status = fields.Boolean()\n creator = fields.Nested(UserNestedOutputSchema)\n issuer = fields.Nested(AuthorityNestedOutputSchema)\n\n\nclass CertificateOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n active = fields.Boolean()\n notify = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n deleted = fields.Boolean(default=False)\n description = fields.String()\n issuer = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n san = fields.Boolean()\n serial = fields.String()\n signing_algorithm = fields.String()\n status = fields.Boolean()\n user = fields.Nested(UserNestedOutputSchema)\n domains = fields.Nested(DomainNestedOutputSchema, many=True)\n destinations = fields.Nested(DestinationNestedOutputSchema, many=True)\n notifications = fields.Nested(NotificationNestedOutputSchema, many=True)\n replaces = fields.Nested(CertificateNestedOutputSchema, many=True)\n authority = fields.Nested(AuthorityNestedOutputSchema)\n roles = fields.Nested(RoleNestedOutputSchema, many=True)\n endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])\n\n\nclass CertificateUploadInputSchema(CertificateCreationSchema):\n name = fields.String()\n notify = fields.Boolean(missing=True)\n\n private_key = fields.String(validate=validators.private_key)\n body = fields.String(required=True, validate=validators.public_certificate)\n chain = fields.String(validate=validators.public_certificate) # TODO this could be multiple certificates\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n @validates_schema\n def keys(self, data):\n if data.get('destinations'):\n if not data.get('private_key'):\n raise ValidationError('Destinations require private key.')\n\n\nclass CertificateExportInputSchema(LemurInputSchema):\n plugin = fields.Nested(PluginInputSchema)\n\n\ncertificate_input_schema = CertificateInputSchema()\ncertificate_output_schema = CertificateOutputSchema()\ncertificates_output_schema = CertificateOutputSchema(many=True)\ncertificate_upload_input_schema = CertificateUploadInputSchema()\ncertificate_export_input_schema = CertificateExportInputSchema()\ncertificate_edit_input_schema = CertificateEditInputSchema()\n", "path": "lemur/certificates/schemas.py"}]}
| 2,100 | 297 |
gh_patches_debug_13481
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-382
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve the logic in the ACSHandleStage
**Version**
main
**Describe the bug**
If there are ACS that point to the same content, pick the first RA instead of last
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/plugin/repo_version_utils.py`
Content:
```
1 from collections import defaultdict
2 from gettext import gettext as _
3 import logging
4
5 from django.db.models import Q
6
7
8 _logger = logging.getLogger(__name__)
9
10
11 def remove_duplicates(repository_version):
12 """
13 Inspect content additions in the `RepositoryVersion` and replace repository duplicates.
14
15 Some content can have two instances A and B which are unique, but cannot both exist together in
16 one repository. For example, pulp_file's content has `relative_path` for that file within the
17 repository.
18
19 Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked
20 against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer
21 "repository duplicates" replace existing "repository duplicates". Each Content model can define
22 a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
23 contain the same value for two content units, they are considered "repository duplicates".
24
25 After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
26 :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
27 parameter to `run()`.
28
29 Args:
30 repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
31 and possibly modified.
32 """
33 query_for_repo_duplicates_by_type = defaultdict(lambda: Q())
34 for item in repository_version.added():
35 detail_item = item.cast()
36 if detail_item.repo_key_fields == ():
37 continue
38 unit_q_dict = {
39 field: getattr(detail_item, field) for field in detail_item.repo_key_fields
40 }
41 item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)
42 query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query
43
44 for model in query_for_repo_duplicates_by_type:
45 _logger.debug(_("Removing duplicates for type: {}".format(model)))
46 qs = model.objects.filter(query_for_repo_duplicates_by_type[model])
47 repository_version.remove_content(qs)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py
--- a/pulpcore/plugin/repo_version_utils.py
+++ b/pulpcore/plugin/repo_version_utils.py
@@ -22,10 +22,6 @@
a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
contain the same value for two content units, they are considered "repository duplicates".
- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
- parameter to `run()`.
-
Args:
repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
and possibly modified.
|
{"golden_diff": "diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py\n--- a/pulpcore/plugin/repo_version_utils.py\n+++ b/pulpcore/plugin/repo_version_utils.py\n@@ -22,10 +22,6 @@\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n \n- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n- parameter to `run()`.\n-\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n", "issue": "Improve the logic in the ACSHandleStage\n**Version**\r\nmain\r\n\r\n**Describe the bug**\r\nIf there are ACS that point to the same content, pick the first RA instead of last\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom gettext import gettext as _\nimport logging\n\nfrom django.db.models import Q\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef remove_duplicates(repository_version):\n \"\"\"\n Inspect content additions in the `RepositoryVersion` and replace repository duplicates.\n\n Some content can have two instances A and B which are unique, but cannot both exist together in\n one repository. For example, pulp_file's content has `relative_path` for that file within the\n repository.\n\n Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked\n against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer\n \"repository duplicates\" replace existing \"repository duplicates\". Each Content model can define\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n\n After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n parameter to `run()`.\n\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n \"\"\"\n query_for_repo_duplicates_by_type = defaultdict(lambda: Q())\n for item in repository_version.added():\n detail_item = item.cast()\n if detail_item.repo_key_fields == ():\n continue\n unit_q_dict = {\n field: getattr(detail_item, field) for field in detail_item.repo_key_fields\n }\n item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)\n query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query\n\n for model in query_for_repo_duplicates_by_type:\n _logger.debug(_(\"Removing duplicates for type: {}\".format(model)))\n qs = model.objects.filter(query_for_repo_duplicates_by_type[model])\n repository_version.remove_content(qs)\n", "path": "pulpcore/plugin/repo_version_utils.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom gettext import gettext as _\nimport logging\n\nfrom django.db.models import Q\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef remove_duplicates(repository_version):\n \"\"\"\n Inspect content additions in the `RepositoryVersion` and replace repository duplicates.\n\n Some content can have two instances A and B which are unique, but cannot both exist together in\n one repository. For example, pulp_file's content has `relative_path` for that file within the\n repository.\n\n Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked\n against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer\n \"repository duplicates\" replace existing \"repository duplicates\". Each Content model can define\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n \"\"\"\n query_for_repo_duplicates_by_type = defaultdict(lambda: Q())\n for item in repository_version.added():\n detail_item = item.cast()\n if detail_item.repo_key_fields == ():\n continue\n unit_q_dict = {\n field: getattr(detail_item, field) for field in detail_item.repo_key_fields\n }\n item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)\n query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query\n\n for model in query_for_repo_duplicates_by_type:\n _logger.debug(_(\"Removing duplicates for type: {}\".format(model)))\n qs = model.objects.filter(query_for_repo_duplicates_by_type[model])\n repository_version.remove_content(qs)\n", "path": "pulpcore/plugin/repo_version_utils.py"}]}
| 885 | 183 |
gh_patches_debug_34351
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-2065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ak.fill_none` doesn't support option-in-record
### Version of Awkward Array
main
### Description and code to reproduce
The following example is left unchanged by `ak.fill_none`:
```python
import awkward as ak
record = ak.zip({
'x': [1, None],
'y': [2, 3]
})
result = ak.fill_none(record, 0, axis=-1)
result.show()
```
I'm fairly sure that this is a bug, because `ak.is_none` walks successfully through records. My understanding of records is that it's only reducers that cannot peer through them.
Of course, this example would imply
```python
record = ak.zip({
"x": [1, 4],
"y": [2, 3]
}).mask[[True, False]]
assert ak.fill_none(record, 0, axis=-1).to_list() == [
{"x": 1, "y": 2},
0
]
```
I think this is reasonable; again, `is_none` behaves identically to this. Maybe we _do_ want a parameter in future, e.g. `visit_records` or some such.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_fill_none.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak._nplikes.NumpyMetadata.instance()
8 cpu = ak._backends.NumpyBackend.instance()
9
10
11 def fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 value: Data with which to replace None.
16 axis (None or int): If None, replace all None values in the array
17 with the given value; if an int, The dimension at which this
18 operation is applied. The outermost dimension is `0`, followed
19 by `1`, etc., and negative values count backward from the
20 innermost: `-1` is the innermost dimension, `-2` is the next
21 level up, etc.
22 highlevel (bool): If True, return an #ak.Array; otherwise, return
23 a low-level #ak.contents.Content subclass.
24 behavior (None or dict): Custom #ak.behavior for the output array, if
25 high-level.
26
27 Replaces missing values (None) with a given `value`.
28
29 For example, in the following
30
31 >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
32
33 The None values could be replaced with `0` by
34
35 >>> ak.fill_none(array, 0)
36 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
37
38 The replacement value doesn't strictly need the same type as the
39 surrounding data. For example, the None values could also be replaced
40 by a string.
41
42 >>> ak.fill_none(array, "hi")
43 <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>
44
45 The list content now has a union type:
46
47 >>> ak.fill_none(array, "hi").type.show()
48 3 * var * union[
49 float64,
50 string
51 ]
52
53 The values could be floating-point numbers or strings.
54 """
55 with ak._errors.OperationErrorContext(
56 "ak.fill_none",
57 dict(
58 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
59 ),
60 ):
61 return _impl(array, value, axis, highlevel, behavior)
62
63
64 def _impl(array, value, axis, highlevel, behavior):
65 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
66 behavior = ak._util.behavior_of(array, behavior=behavior)
67 backend = ak._backends.backend_of(arraylayout, default=cpu)
68
69 # Convert value type to appropriate layout
70 if (
71 isinstance(value, np.ndarray)
72 and issubclass(value.dtype.type, (np.bool_, np.number))
73 and len(value.shape) != 0
74 ):
75 valuelayout = ak.operations.to_layout(
76 backend.nplike.asarray(value)[np.newaxis],
77 allow_record=False,
78 allow_other=False,
79 )
80 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
81 isinstance(value, np.ndarray)
82 and issubclass(value.dtype.type, (np.bool_, np.number))
83 ):
84 valuelayout = ak.operations.to_layout(
85 backend.nplike.asarray(value), allow_record=False, allow_other=False
86 )
87 elif (
88 ak._util.is_sized_iterable(value)
89 and not (isinstance(value, (str, bytes)))
90 or isinstance(value, (ak.highlevel.Record, ak.record.Record))
91 ):
92 valuelayout = ak.operations.to_layout(
93 value, allow_record=True, allow_other=False
94 )
95 if isinstance(valuelayout, ak.record.Record):
96 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
97 elif len(valuelayout) == 0:
98 offsets = ak.index.Index64(
99 backend.index_nplike.array([0, 0], dtype=np.int64)
100 )
101 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)
102 else:
103 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)
104 else:
105 valuelayout = ak.operations.to_layout(
106 [value], allow_record=False, allow_other=False
107 )
108
109 def maybe_fillna(layout):
110 if layout.is_option:
111 return ak._do.fill_none(layout, valuelayout)
112 else:
113 return layout
114
115 if axis is None:
116
117 def action(layout, continuation, **kwargs):
118 return maybe_fillna(continuation())
119
120 else:
121
122 def action(layout, depth, **kwargs):
123 posaxis = ak._util.maybe_posaxis(layout, axis, depth)
124 if posaxis is not None and posaxis + 1 < depth:
125 return layout
126 elif posaxis is not None and posaxis + 1 == depth:
127 return maybe_fillna(layout)
128 elif layout.is_leaf:
129 raise ak._errors.wrap_error(
130 np.AxisError(
131 f"axis={axis} exceeds the depth of this array ({depth})"
132 )
133 )
134
135 out = ak._do.recursively_apply(arraylayout, action, behavior)
136
137 return ak._util.wrap(out, behavior, highlevel)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py
--- a/src/awkward/operations/ak_fill_none.py
+++ b/src/awkward/operations/ak_fill_none.py
@@ -63,7 +63,7 @@
def _impl(array, value, axis, highlevel, behavior):
arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
- behavior = ak._util.behavior_of(array, behavior=behavior)
+ behavior = ak._util.behavior_of(array, value, behavior=behavior)
backend = ak._backends.backend_of(arraylayout, default=cpu)
# Convert value type to appropriate layout
@@ -106,25 +106,24 @@
[value], allow_record=False, allow_other=False
)
- def maybe_fillna(layout):
- if layout.is_option:
- return ak._do.fill_none(layout, valuelayout)
- else:
- return layout
-
if axis is None:
def action(layout, continuation, **kwargs):
- return maybe_fillna(continuation())
+ if layout.is_option:
+ return ak._do.fill_none(continuation(), valuelayout)
else:
def action(layout, depth, **kwargs):
posaxis = ak._util.maybe_posaxis(layout, axis, depth)
- if posaxis is not None and posaxis + 1 < depth:
- return layout
- elif posaxis is not None and posaxis + 1 == depth:
- return maybe_fillna(layout)
+ if posaxis is not None and posaxis + 1 == depth:
+ if layout.is_union or layout.is_record:
+ return None
+ elif layout.is_option:
+ return ak._do.fill_none(layout, valuelayout)
+ else:
+ return layout
+
elif layout.is_leaf:
raise ak._errors.wrap_error(
np.AxisError(
@@ -133,5 +132,4 @@
)
out = ak._do.recursively_apply(arraylayout, action, behavior)
-
return ak._util.wrap(out, behavior, highlevel)
|
{"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -63,7 +63,7 @@\n \n def _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n- behavior = ak._util.behavior_of(array, behavior=behavior)\n+ behavior = ak._util.behavior_of(array, value, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n \n # Convert value type to appropriate layout\n@@ -106,25 +106,24 @@\n [value], allow_record=False, allow_other=False\n )\n \n- def maybe_fillna(layout):\n- if layout.is_option:\n- return ak._do.fill_none(layout, valuelayout)\n- else:\n- return layout\n-\n if axis is None:\n \n def action(layout, continuation, **kwargs):\n- return maybe_fillna(continuation())\n+ if layout.is_option:\n+ return ak._do.fill_none(continuation(), valuelayout)\n \n else:\n \n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n- if posaxis is not None and posaxis + 1 < depth:\n- return layout\n- elif posaxis is not None and posaxis + 1 == depth:\n- return maybe_fillna(layout)\n+ if posaxis is not None and posaxis + 1 == depth:\n+ if layout.is_union or layout.is_record:\n+ return None\n+ elif layout.is_option:\n+ return ak._do.fill_none(layout, valuelayout)\n+ else:\n+ return layout\n+\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n@@ -133,5 +132,4 @@\n )\n \n out = ak._do.recursively_apply(arraylayout, action, behavior)\n-\n return ak._util.wrap(out, behavior, highlevel)\n", "issue": "`ak.fill_none` doesn't support option-in-record\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThe following example is left unchanged by `ak.fill_none`:\r\n```python\r\nimport awkward as ak\r\n\r\nrecord = ak.zip({\r\n 'x': [1, None],\r\n 'y': [2, 3]\r\n})\r\n\r\nresult = ak.fill_none(record, 0, axis=-1)\r\nresult.show()\r\n```\r\n\r\nI'm fairly sure that this is a bug, because `ak.is_none` walks successfully through records. My understanding of records is that it's only reducers that cannot peer through them. \r\n\r\nOf course, this example would imply\r\n```python\r\nrecord = ak.zip({\r\n \"x\": [1, 4], \r\n\t\"y\": [2, 3]\r\n}).mask[[True, False]]\r\nassert ak.fill_none(record, 0, axis=-1).to_list() == [\r\n {\"x\": 1, \"y\": 2},\r\n 0\r\n]\r\n```\r\n\r\nI think this is reasonable; again, `is_none` behaves identically to this. Maybe we _do_ want a parameter in future, e.g. `visit_records` or some such.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak._nplikes.NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following\n\n >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.fill_none(array, \"hi\").type.show()\n 3 * var * union[\n float64,\n string\n ]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n behavior = ak._util.behavior_of(array, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value)[np.newaxis],\n allow_record=False,\n allow_other=False,\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n backend.index_nplike.array([0, 0], dtype=np.int64)\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_option:\n return ak._do.fill_none(layout, valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n return maybe_fillna(continuation())\n\n else:\n\n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if posaxis is not None and posaxis + 1 < depth:\n return layout\n elif posaxis is not None and posaxis + 1 == depth:\n return maybe_fillna(layout)\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n f\"axis={axis} exceeds the depth of this array ({depth})\"\n )\n )\n\n out = ak._do.recursively_apply(arraylayout, action, behavior)\n\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak._nplikes.NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following\n\n >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.fill_none(array, \"hi\").type.show()\n 3 * var * union[\n float64,\n string\n ]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n behavior = ak._util.behavior_of(array, value, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value)[np.newaxis],\n allow_record=False,\n allow_other=False,\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n backend.index_nplike.array([0, 0], dtype=np.int64)\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n if layout.is_option:\n return ak._do.fill_none(continuation(), valuelayout)\n\n else:\n\n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if posaxis is not None and posaxis + 1 == depth:\n if layout.is_union or layout.is_record:\n return None\n elif layout.is_option:\n return ak._do.fill_none(layout, valuelayout)\n else:\n return layout\n\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n f\"axis={axis} exceeds the depth of this array ({depth})\"\n )\n )\n\n out = ak._do.recursively_apply(arraylayout, action, behavior)\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]}
| 2,029 | 490 |
gh_patches_debug_13262
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot is empty when log scale is used
Bokeh incorrectly creates an empty plot with the following:
``` python
from bokeh.plotting import figure, output_file, show
ys = [4.471799184102565e-05, 0.0009856299875536934, 0.0011045119899790734]
xs = range(len(ys))
output_file("log.html")
p = figure(y_axis_type="log")
p.line(xs, ys)
show(p)
```
The problem only occurs if `y_axis_type="log"` is used.
It seems when the range of y values is too narrow bokeh fails to compute the y-axis range. If I multiple the y values by 100, it plots properly.
Tested on bokeh version: 0.11.1 py34_0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/ranges.py`
Content:
```
1 """ Models for describing different kinds of ranges of values
2 in different kinds of spaces (e.g., continuous or categorical)
3 and with options for "auto sizing".
4
5 """
6 from __future__ import absolute_import
7
8 from ..model import Model
9 from ..core.enums import StartEnd
10 from ..core.properties import abstract
11 from ..core.properties import (
12 Auto, Bool, Int, Float, String, Datetime, TimeDelta, Instance, List,
13 Either, Enum, MinMaxBounds,
14 )
15 from .callbacks import Callback
16 from .renderers import Renderer
17
18
19 @abstract
20 class Range(Model):
21 """ A base class for all range types. ``Range`` is not generally
22 useful to instantiate on its own.
23
24 """
25
26 callback = Instance(Callback, help="""
27 A callback to run in the browser whenever the range is updated.
28 """)
29
30
31 class Range1d(Range):
32 """ A fixed, closed range [start, end] in a continuous scalar
33 dimension.
34
35 In addition to supplying ``start`` and ``end`` keyword arguments
36 to the ``Range1d`` initializer, you can also instantiate with
37 the convenience syntax::
38
39 Range(0, 10) # equivalent to Range(start=0, end=10)
40
41 """
42
43 start = Either(Float, Datetime, Int, default=0, help="""
44 The start of the range.
45 """)
46
47 end = Either(Float, Datetime, Int, default=1, help="""
48 The end of the range.
49 """)
50
51 bounds = MinMaxBounds(accept_datetime=True, default=None, help="""
52 The bounds that the range is allowed to go to - typically used to prevent
53 the user from panning/zooming/etc away from the data.
54
55 If set to ``'auto'``, the bounds will be computed to the start and end of the Range.
56
57 Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is
58 increasing or decreasing, the first item should be the minimum value of the range and the
59 second item should be the maximum. Setting min > max will result in a ``ValueError``.
60
61 By default, bounds are ``None`` and your plot to pan/zoom as far as you want. If you only
62 want to constrain one end of the plot, you can set min or max to None.
63
64 Examples:
65
66 Range1d(0, 1, bounds='auto') # Auto-bounded to 0 and 1 (Default behavior)
67 Range1d(start=0, end=1, bounds=(0, None)) # Maximum is unbounded, minimum bounded to 0
68 """)
69
70 min_interval = Either(Float, TimeDelta, Int, default=None, help="""
71 The level that the range is allowed to zoom in, expressed as the
72 minimum visible interval. If set to ``None`` (default), the minimum
73 interval is not bound. Can be a timedelta. """)
74
75 max_interval = Either(Float, TimeDelta, Int, default=None, help="""
76 The level that the range is allowed to zoom out, expressed as the
77 maximum visible interval. Can be a timedelta. Note that ``bounds`` can
78 impose an implicit constraint on the maximum interval as well. """)
79
80 def __init__(self, *args, **kwargs):
81 if args and ('start' in kwargs or 'end' in kwargs):
82 raise ValueError("'start' and 'end' keywords cannot be used with positional arguments")
83 if args and len(args) != 2:
84 raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')
85
86 if args:
87 kwargs['start'] = args[0]
88 kwargs['end'] = args[1]
89
90 super(Range1d, self).__init__(**kwargs)
91
92
93 @abstract
94 class DataRange(Range):
95 """ A base class for all data range types. ``DataRange`` is not
96 generally useful to instantiate on its own.
97
98 """
99
100 names = List(String, help="""
101 A list of names to query for. If set, only renderers that
102 have a matching value for their ``name`` attribute will be used
103 for autoranging.
104 """)
105
106 renderers = List(Instance(Renderer), help="""
107 An explicit list of renderers to autorange against. If unset,
108 defaults to all renderers on a plot.
109 """)
110
111
112 class DataRange1d(DataRange):
113 """ An auto-fitting range in a continuous scalar dimension.
114 The upper and lower bounds are set to the min and max of the data.
115 """
116
117 range_padding = Float(default=0.1, help="""
118 A percentage of the total range size to add as padding to
119 the range start and end.
120 """)
121
122 start = Float(help="""
123 An explicitly supplied range start. If provided, will override
124 automatically computed start value.
125 """)
126
127 end = Float(help="""
128 An explicitly supplied range end. If provided, will override
129 automatically computed end value.
130 """)
131
132 bounds = MinMaxBounds(accept_datetime=False, default=None, help="""
133 The bounds that the range is allowed to go to - typically used to prevent
134 the user from panning/zooming/etc away from the data.
135
136 By default, the bounds will be None, allowing your plot to pan/zoom as far as you want.
137 If bounds are 'auto' they will be computed to be the same as the start and end of the DataRange1d.
138
139 Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is
140 increasing or decreasing, the first item should be the minimum value of the range and the
141 second item should be the maximum. Setting min > max will result in a ``ValueError``.
142
143 If you only want to constrain one end of the plot, you can set min or max to
144 ``None`` e.g. ``DataRange1d(bounds=(None, 12))``
145 """)
146
147 min_interval = Float(default=None, help="""
148 The level that the range is allowed to zoom in, expressed as the
149 minimum visible interval. If set to ``None`` (default), the minimum
150 interval is not bound.""")
151
152 max_interval = Float(default=None, help="""
153 The level that the range is allowed to zoom out, expressed as the
154 maximum visible interval. Note that ``bounds`` can impose an
155 implicit constraint on the maximum interval as well.""")
156
157 flipped = Bool(default=False, help="""
158 Whether the range should be "flipped" from its normal direction when
159 auto-ranging.
160 """)
161
162 follow = Enum(StartEnd, default=None, help="""
163 Configure the data to follow one or the other data extreme, with a
164 maximum range size of ``follow_interval``.
165
166 If set to ``"start"`` then the range will adjust so that ``start`` always
167 corresponds to the minimum data value (or maximum, if ``flipped`` is
168 ``True``).
169
170 If set to ``"end"`` then the range will adjust so that ``end`` always
171 corresponds to the maximum data value (or minimum, if ``flipped`` is
172 ``True``).
173
174 If set to ``None`` (default), then auto-ranging does not follow, and
175 the range will encompass both the minimum and maximum data values.
176
177 ``follow`` cannot be used with bounds, and if set, bounds will be set to ``None``.
178 """)
179
180 follow_interval = Float(default=None, help="""
181 If ``follow`` is set to ``"start"`` or ``"end"`` then the range will
182 always be constrained to that::
183
184 abs(r.start - r.end) <= follow_interval
185
186 is maintained.
187
188 """)
189
190 default_span = Float(default=2.0, help="""
191 A default width for the interval, in case ``start`` is equal to ``end``.
192 """)
193
194 def __init__(self, *args, **kwargs):
195 if kwargs.get('follow') is not None:
196 kwargs['bounds'] = None
197 super(DataRange1d, self).__init__(**kwargs)
198
199
200 class FactorRange(Range):
201 """ A range in a categorical dimension.
202
203 In addition to supplying ``factors`` keyword argument to the
204 ``FactorRange`` initializer, you can also instantiate with
205 the convenience syntax::
206
207 FactorRange("foo", "bar") # equivalent to FactorRange(factors=["foo", "bar"])
208
209 .. note::
210 ``FactorRange`` may be renamed to ``CategoricalRange`` in
211 the future.
212
213 """
214
215 offset = Float(0, help="""
216 An offset to the (synthetic) range (default: 0)
217
218 .. note::
219 The primary usage of this is to support compatibility and integration
220 with other plotting systems, and will not generally of interest to
221 most users.
222
223 """)
224
225 factors = Either(List(String), List(Int), help="""
226 A list of string or integer factors (categories) to comprise
227 this categorical range.
228 """)
229
230 bounds = Either(Auto, List(String), List(Int), default=None, help="""
231 The bounds that the range is allowed to go to - typically used to prevent
232 the user from panning/zooming/etc away from the data.
233
234 Unlike Range1d and DataRange1d, factors do not have an order and so a min and max cannot be
235 provied in the same way. bounds accepts a list of factors, that constrain the displayed factors.
236
237 By default, bounds are ``None``, allows unlimited panning or zooming.
238
239 If ``bounds='auto'``, bounds will be the same as factors and the plot will not be able to
240 pan or zoom beyond the first and last items in factors.
241
242 If you provide a list, then only the factors that are in that list will be displayed on the
243 plot and the plot will not pan or zoom outside the first and last items in the shortened
244 factors list. Note the order of factors is the defining order for your plot.
245
246 Values of bounds that are not in factors are acceptable and will simply have no impact
247 on the plot.
248
249 Examples:
250
251 Auto behavior:
252 x_range = FactorRange(factors=["apples", "dogs", "peaches", "bananas", "pigs"], bounds='auto')
253
254 The plot will display all the factors and you will not be able to pan left of apples or right
255 of pigs.
256
257 Constraining behavior:
258 x_range = FactorRange(factors=["apples", "dogs", "peaches", "bananas", "pigs"], bounds=["apples", "bananas", "peaches"])
259
260 The plot will display the chart with only the factors ["apples", "peaches", "bananas"] (in that order)
261 and the plot will not pan left of apples or right of bananas.
262 """)
263
264 min_interval = Int(default=None, help="""
265 The level that the range is allowed to zoom in, expressed as the
266 minimum number of visible categories. If set to ``None`` (default),
267 the minimum interval is not bound.""")
268
269 max_interval = Int(default=None, help="""
270 The level that the range is allowed to zoom out, expressed as the
271 maximum number of visible categories. Note that ``bounds`` can
272 impose an implicit constraint on the maximum interval as well.""")
273
274 def __init__(self, *args, **kwargs):
275 if args and "factors" in kwargs:
276 raise ValueError("'factors' keyword cannot be used with positional arguments")
277 elif args:
278 kwargs['factors'] = list(args)
279 super(FactorRange, self).__init__(**kwargs)
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/models/ranges.py b/bokeh/models/ranges.py
--- a/bokeh/models/ranges.py
+++ b/bokeh/models/ranges.py
@@ -115,7 +115,7 @@
"""
range_padding = Float(default=0.1, help="""
- A percentage of the total range size to add as padding to
+ A fraction of the total range size to add as padding to
the range start and end.
""")
@@ -188,7 +188,8 @@
""")
default_span = Float(default=2.0, help="""
- A default width for the interval, in case ``start`` is equal to ``end``.
+ A default width for the interval, in case ``start`` is equal to ``end``
+ (if used with a log axis, default_span is in powers of 10).
""")
def __init__(self, *args, **kwargs):
|
{"golden_diff": "diff --git a/bokeh/models/ranges.py b/bokeh/models/ranges.py\n--- a/bokeh/models/ranges.py\n+++ b/bokeh/models/ranges.py\n@@ -115,7 +115,7 @@\n \"\"\"\n \n range_padding = Float(default=0.1, help=\"\"\"\n- A percentage of the total range size to add as padding to\n+ A fraction of the total range size to add as padding to\n the range start and end.\n \"\"\")\n \n@@ -188,7 +188,8 @@\n \"\"\")\n \n default_span = Float(default=2.0, help=\"\"\"\n- A default width for the interval, in case ``start`` is equal to ``end``.\n+ A default width for the interval, in case ``start`` is equal to ``end``\n+ (if used with a log axis, default_span is in powers of 10).\n \"\"\")\n \n def __init__(self, *args, **kwargs):\n", "issue": "plot is empty when log scale is used\nBokeh incorrectly creates an empty plot with the following:\n\n``` python\nfrom bokeh.plotting import figure, output_file, show\nys = [4.471799184102565e-05, 0.0009856299875536934, 0.0011045119899790734]\nxs = range(len(ys))\noutput_file(\"log.html\")\np = figure(y_axis_type=\"log\")\np.line(xs, ys)\nshow(p)\n```\n\nThe problem only occurs if `y_axis_type=\"log\"` is used.\n\nIt seems when the range of y values is too narrow bokeh fails to compute the y-axis range. If I multiple the y values by 100, it plots properly.\n\nTested on bokeh version: 0.11.1 py34_0\n\n", "before_files": [{"content": "\"\"\" Models for describing different kinds of ranges of values\nin different kinds of spaces (e.g., continuous or categorical)\nand with options for \"auto sizing\".\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.enums import StartEnd\nfrom ..core.properties import abstract\nfrom ..core.properties import (\n Auto, Bool, Int, Float, String, Datetime, TimeDelta, Instance, List,\n Either, Enum, MinMaxBounds,\n)\nfrom .callbacks import Callback\nfrom .renderers import Renderer\n\n\n@abstract\nclass Range(Model):\n \"\"\" A base class for all range types. ``Range`` is not generally\n useful to instantiate on its own.\n\n \"\"\"\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the range is updated.\n \"\"\")\n\n\nclass Range1d(Range):\n \"\"\" A fixed, closed range [start, end] in a continuous scalar\n dimension.\n\n In addition to supplying ``start`` and ``end`` keyword arguments\n to the ``Range1d`` initializer, you can also instantiate with\n the convenience syntax::\n\n Range(0, 10) # equivalent to Range(start=0, end=10)\n\n \"\"\"\n\n start = Either(Float, Datetime, Int, default=0, help=\"\"\"\n The start of the range.\n \"\"\")\n\n end = Either(Float, Datetime, Int, default=1, help=\"\"\"\n The end of the range.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=True, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n If set to ``'auto'``, the bounds will be computed to the start and end of the Range.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n By default, bounds are ``None`` and your plot to pan/zoom as far as you want. If you only\n want to constrain one end of the plot, you can set min or max to None.\n\n Examples:\n\n Range1d(0, 1, bounds='auto') # Auto-bounded to 0 and 1 (Default behavior)\n Range1d(start=0, end=1, bounds=(0, None)) # Maximum is unbounded, minimum bounded to 0\n \"\"\")\n\n min_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound. Can be a timedelta. \"\"\")\n\n max_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Can be a timedelta. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well. \"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and ('start' in kwargs or 'end' in kwargs):\n raise ValueError(\"'start' and 'end' keywords cannot be used with positional arguments\")\n if args and len(args) != 2:\n raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')\n\n if args:\n kwargs['start'] = args[0]\n kwargs['end'] = args[1]\n\n super(Range1d, self).__init__(**kwargs)\n\n\n@abstract\nclass DataRange(Range):\n \"\"\" A base class for all data range types. ``DataRange`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used\n for autoranging.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to autorange against. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n\nclass DataRange1d(DataRange):\n \"\"\" An auto-fitting range in a continuous scalar dimension.\n The upper and lower bounds are set to the min and max of the data.\n \"\"\"\n\n range_padding = Float(default=0.1, help=\"\"\"\n A percentage of the total range size to add as padding to\n the range start and end.\n \"\"\")\n\n start = Float(help=\"\"\"\n An explicitly supplied range start. If provided, will override\n automatically computed start value.\n \"\"\")\n\n end = Float(help=\"\"\"\n An explicitly supplied range end. If provided, will override\n automatically computed end value.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=False, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n By default, the bounds will be None, allowing your plot to pan/zoom as far as you want.\n If bounds are 'auto' they will be computed to be the same as the start and end of the DataRange1d.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n If you only want to constrain one end of the plot, you can set min or max to\n ``None`` e.g. ``DataRange1d(bounds=(None, 12))``\n \"\"\")\n\n min_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound.\"\"\")\n\n max_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Note that ``bounds`` can impose an\n implicit constraint on the maximum interval as well.\"\"\")\n\n flipped = Bool(default=False, help=\"\"\"\n Whether the range should be \"flipped\" from its normal direction when\n auto-ranging.\n \"\"\")\n\n follow = Enum(StartEnd, default=None, help=\"\"\"\n Configure the data to follow one or the other data extreme, with a\n maximum range size of ``follow_interval``.\n\n If set to ``\"start\"`` then the range will adjust so that ``start`` always\n corresponds to the minimum data value (or maximum, if ``flipped`` is\n ``True``).\n\n If set to ``\"end\"`` then the range will adjust so that ``end`` always\n corresponds to the maximum data value (or minimum, if ``flipped`` is\n ``True``).\n\n If set to ``None`` (default), then auto-ranging does not follow, and\n the range will encompass both the minimum and maximum data values.\n\n ``follow`` cannot be used with bounds, and if set, bounds will be set to ``None``.\n \"\"\")\n\n follow_interval = Float(default=None, help=\"\"\"\n If ``follow`` is set to ``\"start\"`` or ``\"end\"`` then the range will\n always be constrained to that::\n\n abs(r.start - r.end) <= follow_interval\n\n is maintained.\n\n \"\"\")\n\n default_span = Float(default=2.0, help=\"\"\"\n A default width for the interval, in case ``start`` is equal to ``end``.\n \"\"\")\n\n def __init__(self, *args, **kwargs):\n if kwargs.get('follow') is not None:\n kwargs['bounds'] = None\n super(DataRange1d, self).__init__(**kwargs)\n\n\nclass FactorRange(Range):\n \"\"\" A range in a categorical dimension.\n\n In addition to supplying ``factors`` keyword argument to the\n ``FactorRange`` initializer, you can also instantiate with\n the convenience syntax::\n\n FactorRange(\"foo\", \"bar\") # equivalent to FactorRange(factors=[\"foo\", \"bar\"])\n\n .. note::\n ``FactorRange`` may be renamed to ``CategoricalRange`` in\n the future.\n\n \"\"\"\n\n offset = Float(0, help=\"\"\"\n An offset to the (synthetic) range (default: 0)\n\n .. note::\n The primary usage of this is to support compatibility and integration\n with other plotting systems, and will not generally of interest to\n most users.\n\n \"\"\")\n\n factors = Either(List(String), List(Int), help=\"\"\"\n A list of string or integer factors (categories) to comprise\n this categorical range.\n \"\"\")\n\n bounds = Either(Auto, List(String), List(Int), default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n Unlike Range1d and DataRange1d, factors do not have an order and so a min and max cannot be\n provied in the same way. bounds accepts a list of factors, that constrain the displayed factors.\n\n By default, bounds are ``None``, allows unlimited panning or zooming.\n\n If ``bounds='auto'``, bounds will be the same as factors and the plot will not be able to\n pan or zoom beyond the first and last items in factors.\n\n If you provide a list, then only the factors that are in that list will be displayed on the\n plot and the plot will not pan or zoom outside the first and last items in the shortened\n factors list. Note the order of factors is the defining order for your plot.\n\n Values of bounds that are not in factors are acceptable and will simply have no impact\n on the plot.\n\n Examples:\n\n Auto behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds='auto')\n\n The plot will display all the factors and you will not be able to pan left of apples or right\n of pigs.\n\n Constraining behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds=[\"apples\", \"bananas\", \"peaches\"])\n\n The plot will display the chart with only the factors [\"apples\", \"peaches\", \"bananas\"] (in that order)\n and the plot will not pan left of apples or right of bananas.\n \"\"\")\n\n min_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum number of visible categories. If set to ``None`` (default),\n the minimum interval is not bound.\"\"\")\n\n max_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum number of visible categories. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well.\"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and \"factors\" in kwargs:\n raise ValueError(\"'factors' keyword cannot be used with positional arguments\")\n elif args:\n kwargs['factors'] = list(args)\n super(FactorRange, self).__init__(**kwargs)\n", "path": "bokeh/models/ranges.py"}], "after_files": [{"content": "\"\"\" Models for describing different kinds of ranges of values\nin different kinds of spaces (e.g., continuous or categorical)\nand with options for \"auto sizing\".\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.enums import StartEnd\nfrom ..core.properties import abstract\nfrom ..core.properties import (\n Auto, Bool, Int, Float, String, Datetime, TimeDelta, Instance, List,\n Either, Enum, MinMaxBounds,\n)\nfrom .callbacks import Callback\nfrom .renderers import Renderer\n\n\n@abstract\nclass Range(Model):\n \"\"\" A base class for all range types. ``Range`` is not generally\n useful to instantiate on its own.\n\n \"\"\"\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the range is updated.\n \"\"\")\n\n\nclass Range1d(Range):\n \"\"\" A fixed, closed range [start, end] in a continuous scalar\n dimension.\n\n In addition to supplying ``start`` and ``end`` keyword arguments\n to the ``Range1d`` initializer, you can also instantiate with\n the convenience syntax::\n\n Range(0, 10) # equivalent to Range(start=0, end=10)\n\n \"\"\"\n\n start = Either(Float, Datetime, Int, default=0, help=\"\"\"\n The start of the range.\n \"\"\")\n\n end = Either(Float, Datetime, Int, default=1, help=\"\"\"\n The end of the range.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=True, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n If set to ``'auto'``, the bounds will be computed to the start and end of the Range.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n By default, bounds are ``None`` and your plot to pan/zoom as far as you want. If you only\n want to constrain one end of the plot, you can set min or max to None.\n\n Examples:\n\n Range1d(0, 1, bounds='auto') # Auto-bounded to 0 and 1 (Default behavior)\n Range1d(start=0, end=1, bounds=(0, None)) # Maximum is unbounded, minimum bounded to 0\n \"\"\")\n\n min_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound. Can be a timedelta. \"\"\")\n\n max_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Can be a timedelta. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well. \"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and ('start' in kwargs or 'end' in kwargs):\n raise ValueError(\"'start' and 'end' keywords cannot be used with positional arguments\")\n if args and len(args) != 2:\n raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')\n\n if args:\n kwargs['start'] = args[0]\n kwargs['end'] = args[1]\n\n super(Range1d, self).__init__(**kwargs)\n\n\n@abstract\nclass DataRange(Range):\n \"\"\" A base class for all data range types. ``DataRange`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used\n for autoranging.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to autorange against. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n\nclass DataRange1d(DataRange):\n \"\"\" An auto-fitting range in a continuous scalar dimension.\n The upper and lower bounds are set to the min and max of the data.\n \"\"\"\n\n range_padding = Float(default=0.1, help=\"\"\"\n A fraction of the total range size to add as padding to\n the range start and end.\n \"\"\")\n\n start = Float(help=\"\"\"\n An explicitly supplied range start. If provided, will override\n automatically computed start value.\n \"\"\")\n\n end = Float(help=\"\"\"\n An explicitly supplied range end. If provided, will override\n automatically computed end value.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=False, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n By default, the bounds will be None, allowing your plot to pan/zoom as far as you want.\n If bounds are 'auto' they will be computed to be the same as the start and end of the DataRange1d.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n If you only want to constrain one end of the plot, you can set min or max to\n ``None`` e.g. ``DataRange1d(bounds=(None, 12))``\n \"\"\")\n\n min_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound.\"\"\")\n\n max_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Note that ``bounds`` can impose an\n implicit constraint on the maximum interval as well.\"\"\")\n\n flipped = Bool(default=False, help=\"\"\"\n Whether the range should be \"flipped\" from its normal direction when\n auto-ranging.\n \"\"\")\n\n follow = Enum(StartEnd, default=None, help=\"\"\"\n Configure the data to follow one or the other data extreme, with a\n maximum range size of ``follow_interval``.\n\n If set to ``\"start\"`` then the range will adjust so that ``start`` always\n corresponds to the minimum data value (or maximum, if ``flipped`` is\n ``True``).\n\n If set to ``\"end\"`` then the range will adjust so that ``end`` always\n corresponds to the maximum data value (or minimum, if ``flipped`` is\n ``True``).\n\n If set to ``None`` (default), then auto-ranging does not follow, and\n the range will encompass both the minimum and maximum data values.\n\n ``follow`` cannot be used with bounds, and if set, bounds will be set to ``None``.\n \"\"\")\n\n follow_interval = Float(default=None, help=\"\"\"\n If ``follow`` is set to ``\"start\"`` or ``\"end\"`` then the range will\n always be constrained to that::\n\n abs(r.start - r.end) <= follow_interval\n\n is maintained.\n\n \"\"\")\n\n default_span = Float(default=2.0, help=\"\"\"\n A default width for the interval, in case ``start`` is equal to ``end``\n (if used with a log axis, default_span is in powers of 10).\n \"\"\")\n\n def __init__(self, *args, **kwargs):\n if kwargs.get('follow') is not None:\n kwargs['bounds'] = None\n super(DataRange1d, self).__init__(**kwargs)\n\n\nclass FactorRange(Range):\n \"\"\" A range in a categorical dimension.\n\n In addition to supplying ``factors`` keyword argument to the\n ``FactorRange`` initializer, you can also instantiate with\n the convenience syntax::\n\n FactorRange(\"foo\", \"bar\") # equivalent to FactorRange(factors=[\"foo\", \"bar\"])\n\n .. note::\n ``FactorRange`` may be renamed to ``CategoricalRange`` in\n the future.\n\n \"\"\"\n\n offset = Float(0, help=\"\"\"\n An offset to the (synthetic) range (default: 0)\n\n .. note::\n The primary usage of this is to support compatibility and integration\n with other plotting systems, and will not generally of interest to\n most users.\n\n \"\"\")\n\n factors = Either(List(String), List(Int), help=\"\"\"\n A list of string or integer factors (categories) to comprise\n this categorical range.\n \"\"\")\n\n bounds = Either(Auto, List(String), List(Int), default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n Unlike Range1d and DataRange1d, factors do not have an order and so a min and max cannot be\n provied in the same way. bounds accepts a list of factors, that constrain the displayed factors.\n\n By default, bounds are ``None``, allows unlimited panning or zooming.\n\n If ``bounds='auto'``, bounds will be the same as factors and the plot will not be able to\n pan or zoom beyond the first and last items in factors.\n\n If you provide a list, then only the factors that are in that list will be displayed on the\n plot and the plot will not pan or zoom outside the first and last items in the shortened\n factors list. Note the order of factors is the defining order for your plot.\n\n Values of bounds that are not in factors are acceptable and will simply have no impact\n on the plot.\n\n Examples:\n\n Auto behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds='auto')\n\n The plot will display all the factors and you will not be able to pan left of apples or right\n of pigs.\n\n Constraining behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds=[\"apples\", \"bananas\", \"peaches\"])\n\n The plot will display the chart with only the factors [\"apples\", \"peaches\", \"bananas\"] (in that order)\n and the plot will not pan left of apples or right of bananas.\n \"\"\")\n\n min_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum number of visible categories. If set to ``None`` (default),\n the minimum interval is not bound.\"\"\")\n\n max_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum number of visible categories. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well.\"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and \"factors\" in kwargs:\n raise ValueError(\"'factors' keyword cannot be used with positional arguments\")\n elif args:\n kwargs['factors'] = list(args)\n super(FactorRange, self).__init__(**kwargs)\n", "path": "bokeh/models/ranges.py"}]}
| 3,777 | 222 |
gh_patches_debug_27593
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-185
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`*with_loss` wrappers failed for transformers 4.19.0
# Problem
As new the new argument `num_choices` added in `transformers.onnx.config.generate_dummy_inputs`, the wrappers will fail to match correct arguments
### Error Message
------------
```
Traceback (most recent call last):
File "test_onnxruntime_train.py", line 116, in test_ort_trainer
ort_eval_metrics = trainer.evaluate(inference_with_ort=inference_with_ort)
File "/workspace/optimum/onnxruntime/trainer.py", line 631, in evaluate
output = eval_loop(
File "/workspace/optimum/onnxruntime/trainer.py", line 767, in evaluation_loop_ort
self._export(onnx_model_path, with_loss=with_loss)
File "/workspace/optimum/onnxruntime/trainer.py", line 1230, in _export
_ = export(preprocessor=self.tokenizer, model=model, config=onnx_config, opset=opset, output=model_path)
File "/usr/local/lib/python3.8/dist-packages/transformers/onnx/convert.py", line 313, in export
return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer)
File "/usr/local/lib/python3.8/dist-packages/transformers/onnx/convert.py", line 138, in export_pytorch
model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH)
File "/workspace/optimum/onnx/configuration.py", line 170, in generate_dummy_inputs
dummy_inputs = super().generate_dummy_inputs(
File "/usr/local/lib/python3.8/dist-packages/transformers/onnx/config.py", line 308, in generate_dummy_inputs
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
File "/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_fast.py", line 289, in num_special_tokens_to_add
return self._tokenizer.num_special_tokens_to_add(pair)
TypeError: Can't convert <TensorType.PYTORCH: 'pt'> to PyBool
```
# Solution
Add keys for arguments
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimum/onnx/configuration.py`
Content:
```
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 from abc import ABC
17 from collections import OrderedDict
18 from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
19
20 from transformers.file_utils import TensorType, is_tf_available, is_torch_available
21 from transformers.onnx.utils import compute_effective_axis_dimension
22 from transformers.utils import logging
23
24
25 if TYPE_CHECKING:
26 from transformers.configuration_utils import PretrainedConfig
27 from transformers.feature_extraction_utils import FeatureExtractionMixin
28 from transformers.tokenization_utils_base import PreTrainedTokenizerBase
29
30 from transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
31
32
33 logger = logging.get_logger(__name__)
34
35
36 class OnnxConfigWithLoss(OnnxConfig, ABC):
37 """
38 Wrapper for the children classes of `transformers.onnx.OnnxConfig` to export the model through the ONNX format with loss in outputs.
39 """
40
41 _tasks_to_extra_inputs = {
42 "default": OrderedDict({"labels": {0: "batch"}}),
43 "masked-lm": OrderedDict({"labels": {0: "batch", 1: "sequence"}}),
44 "causal-lm": OrderedDict({"labels": {0: "batch", 1: "sequence"}}),
45 "seq2seq-lm": OrderedDict({"labels": {0: "batch", 1: "sequence"}}),
46 "sequence-classification": OrderedDict({"labels": {0: "batch"}}),
47 "token-classification": OrderedDict({"labels": {0: "batch", 1: "sequence"}}),
48 "multiple-choice": OrderedDict({"labels": {0: "batch"}}),
49 "question-answering": OrderedDict(
50 {
51 "start_positions": {0: "batch"},
52 "end_positions": {0: "batch"},
53 }
54 ),
55 "image-classification": OrderedDict({"labels": {0: "batch"}}),
56 }
57 _tasks_to_extra_outputs = {
58 "default": OrderedDict({"loss": {}}),
59 }
60
61 def __init__(self, config: OnnxConfig):
62 self.__dict__ = copy.deepcopy(config.__dict__)
63 self._onnx_config = config
64 if self.task not in self._tasks_to_extra_inputs:
65 raise ValueError(
66 f"{self.task} is not a supported task, supported tasks: {self._tasks_to_extra_inputs.keys()}"
67 )
68
69 @classmethod
70 def from_model_config(cls, config: OnnxConfig) -> "OnnxConfigWithLoss":
71 """
72 Instantiate a OnnxConfigWithLoss for a specific model
73 Args:
74 config: The model's configuration to use when exporting to ONNX
75 Returns:
76 OnnxConfigWithLoss for this model
77 """
78 return cls(config)
79
80 @property
81 def inputs(self) -> Mapping[str, Mapping[int, str]]:
82 """
83 Mapping containing the axis definition of the input tensors(including labels) to provide to the model
84 Returns:
85 For each input: its name associated to the axes symbolic name and the axis position within the tensor
86 """
87 inputs = self._onnx_config.inputs
88 inputs.update(self._tasks_to_extra_inputs[self.task])
89 return inputs
90
91 @property
92 def outputs(self) -> Mapping[str, Mapping[int, str]]:
93 """
94 Mapping containing the axis definition of the output tensors(including loss) to provide to the model
95 Returns:
96 For each output: its name associated to the axes symbolic name and the axis position within the tensor
97 """
98 common_outputs = self._onnx_config.outputs
99 extra_outputs = self._tasks_to_extra_outputs["default"]
100 common_outputs.update(extra_outputs)
101 for key in reversed(extra_outputs.keys()):
102 common_outputs.move_to_end(key, last=False)
103 return copy.deepcopy(common_outputs)
104
105 def _generate_extra_dummy_inputs_pt(
106 self,
107 dummy_inputs,
108 batch_size,
109 seq_length,
110 ) -> Mapping[str, Any]:
111 import torch
112
113 for label, input in self._tasks_to_extra_inputs[self.task].items():
114 if "sequence" in input.values():
115 dummy_inputs[label] = torch.zeros(batch_size, seq_length, dtype=torch.long)
116 else:
117 dummy_inputs[label] = torch.zeros(batch_size, dtype=torch.long)
118 return dummy_inputs
119
120 def _generate_extra_dummy_inputs_tf(
121 self,
122 dummy_inputs,
123 batch_size,
124 seq_length,
125 ) -> Mapping[str, Any]:
126 import tensorflow as tf
127
128 for label, input in self._tasks_to_extra_inputs[self.task].items():
129 if "sequence" in input.values():
130 dummy_inputs[label] = tf.zeros(batch_size, seq_length, dtype=tf.int64)
131 else:
132 dummy_inputs[label] = tf.zeros(batch_size, dtype=tf.int64)
133 return dummy_inputs
134
135 def generate_dummy_inputs(
136 self,
137 preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
138 batch_size: int = -1,
139 seq_length: int = -1,
140 is_pair: bool = False,
141 framework: Optional[TensorType] = None,
142 num_channels: int = 3,
143 image_width: int = 40,
144 image_height: int = 40,
145 tokenizer: "PreTrainedTokenizerBase" = None,
146 ) -> Mapping[str, Any]:
147 """
148 Generate inputs to provide to the ONNX exporter for the specific framework
149 Args:
150 preprocessor: ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):
151 The preprocessor associated with this model configuration.
152 batch_size (`int`, *optional*, defaults to -1):
153 The batch size to export the model for (-1 means dynamic axis).
154 seq_length (`int`, *optional*, defaults to -1):
155 The sequence length to export the model for (-1 means dynamic axis).
156 is_pair (`bool`, *optional*, defaults to `False`):
157 Indicate if the input is a pair (sentence 1, sentence 2)
158 framework (`TensorType`, *optional*, defaults to `None`):
159 The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
160 num_channels (`int`, *optional*, defaults to 3):
161 The number of channels of the generated images.
162 image_width (`int`, *optional*, defaults to 40):
163 The width of the generated images.
164 image_height (`int`, *optional*, defaults to 40):
165 The height of the generated images.
166 Returns:
167 Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
168 """
169 # Generate dummy labels
170 dummy_inputs = super().generate_dummy_inputs(
171 preprocessor,
172 batch_size,
173 seq_length,
174 is_pair,
175 framework,
176 num_channels,
177 image_width,
178 image_height,
179 tokenizer,
180 )
181 label_batch_size = compute_effective_axis_dimension(
182 batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0
183 )
184 label_seq_length = compute_effective_axis_dimension(
185 seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0
186 )
187
188 if framework == TensorType.PYTORCH:
189 if is_torch_available():
190 return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)
191 else:
192 raise RuntimeError(f"Could not generate dummy inputs because no PyTorch installation was found.")
193 elif framework == TensorType.TENSORFLOW:
194 if is_tf_available():
195 return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)
196 else:
197 raise RuntimeError(f"Could not generate dummy inputs because no TensorFlow installation was found.")
198 else:
199 raise ValueError(
200 f"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided."
201 )
202
203
204 class OnnxConfigWithPastAndLoss(OnnxConfigWithLoss, ABC):
205 def __init__(
206 self,
207 config: OnnxConfigWithPast,
208 use_past: bool = False,
209 ):
210 super().__init__(config)
211 self.use_past = use_past
212
213 @classmethod
214 def with_past(cls, config: OnnxConfigWithPast) -> "OnnxConfigWithPast":
215 """
216 Instantiate a OnnxConfigWithPastAndLoss with `use_past` attribute set to True
217 Args:
218 config: The underlying model's config to use when exporting to ONNX
219 Returns:
220 OnnxConfigWithPastAndLoss with `.use_past = True`
221 """
222 return cls(config, use_past=True)
223
224 @property
225 def outputs(self) -> Mapping[str, Mapping[int, str]]:
226 common_outputs = super().outputs
227 if self.use_past:
228 self._onnx_config.fill_with_past_key_values_(common_outputs, direction="outputs")
229
230 return common_outputs
231
232 def generate_dummy_inputs(
233 self,
234 tokenizer: "PreTrainedTokenizerBase",
235 batch_size: int = -1,
236 seq_length: int = -1,
237 is_pair: bool = False,
238 framework: Optional[TensorType] = None,
239 ) -> Mapping[str, Any]:
240
241 dummy_inputs = self._onnx_config.generate_dummy_inputs(
242 tokenizer,
243 batch_size,
244 seq_length,
245 is_pair,
246 framework,
247 )
248 label_batch_size = compute_effective_axis_dimension(
249 batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0
250 )
251 label_seq_length = compute_effective_axis_dimension(
252 seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0
253 )
254
255 if framework == TensorType.PYTORCH:
256 if is_torch_available():
257 return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)
258 else:
259 raise RuntimeError(f"Could not generate dummy inputs because no PyTorch installation was found.")
260 elif framework == TensorType.TENSORFLOW:
261 if is_tf_available():
262 return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)
263 else:
264 raise RuntimeError(f"Could not generate dummy inputs because no TensorFlow installation was found.")
265 else:
266 raise ValueError(
267 f"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided."
268 )
269
270
271 class OnnxSeq2SeqConfigWithPastAndLoss(OnnxConfigWithPastAndLoss):
272 @property
273 def outputs(self) -> Mapping[str, Mapping[int, str]]:
274 common_outputs = self._onnx_config.outputs
275 extra_outputs = self._tasks_to_extra_outputs["default"]
276 common_outputs.update(extra_outputs)
277 for key in reversed(extra_outputs.keys()):
278 common_outputs.move_to_end(key, last=False)
279 return copy.deepcopy(common_outputs)
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optimum/onnx/configuration.py b/optimum/onnx/configuration.py
--- a/optimum/onnx/configuration.py
+++ b/optimum/onnx/configuration.py
@@ -168,15 +168,15 @@
"""
# Generate dummy labels
dummy_inputs = super().generate_dummy_inputs(
- preprocessor,
- batch_size,
- seq_length,
- is_pair,
- framework,
- num_channels,
- image_width,
- image_height,
- tokenizer,
+ preprocessor=preprocessor,
+ batch_size=batch_size,
+ seq_length=seq_length,
+ is_pair=is_pair,
+ framework=framework,
+ num_channels=num_channels,
+ image_width=image_width,
+ image_height=image_height,
+ tokenizer=tokenizer,
)
label_batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0
@@ -240,10 +240,10 @@
dummy_inputs = self._onnx_config.generate_dummy_inputs(
tokenizer,
- batch_size,
- seq_length,
- is_pair,
- framework,
+ batch_size=batch_size,
+ seq_length=seq_length,
+ is_pair=is_pair,
+ framework=framework,
)
label_batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0
|
{"golden_diff": "diff --git a/optimum/onnx/configuration.py b/optimum/onnx/configuration.py\n--- a/optimum/onnx/configuration.py\n+++ b/optimum/onnx/configuration.py\n@@ -168,15 +168,15 @@\n \"\"\"\n # Generate dummy labels\n dummy_inputs = super().generate_dummy_inputs(\n- preprocessor,\n- batch_size,\n- seq_length,\n- is_pair,\n- framework,\n- num_channels,\n- image_width,\n- image_height,\n- tokenizer,\n+ preprocessor=preprocessor,\n+ batch_size=batch_size,\n+ seq_length=seq_length,\n+ is_pair=is_pair,\n+ framework=framework,\n+ num_channels=num_channels,\n+ image_width=image_width,\n+ image_height=image_height,\n+ tokenizer=tokenizer,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n@@ -240,10 +240,10 @@\n \n dummy_inputs = self._onnx_config.generate_dummy_inputs(\n tokenizer,\n- batch_size,\n- seq_length,\n- is_pair,\n- framework,\n+ batch_size=batch_size,\n+ seq_length=seq_length,\n+ is_pair=is_pair,\n+ framework=framework,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n", "issue": "`*with_loss` wrappers failed for transformers 4.19.0\n# Problem\r\nAs new the new argument `num_choices` added in `transformers.onnx.config.generate_dummy_inputs`, the wrappers will fail to match correct arguments\r\n\r\n### Error Message\r\n------------\r\n```\r\nTraceback (most recent call last):\r\n File \"test_onnxruntime_train.py\", line 116, in test_ort_trainer\r\n ort_eval_metrics = trainer.evaluate(inference_with_ort=inference_with_ort)\r\n File \"/workspace/optimum/onnxruntime/trainer.py\", line 631, in evaluate\r\n output = eval_loop(\r\n File \"/workspace/optimum/onnxruntime/trainer.py\", line 767, in evaluation_loop_ort\r\n self._export(onnx_model_path, with_loss=with_loss)\r\n File \"/workspace/optimum/onnxruntime/trainer.py\", line 1230, in _export\r\n _ = export(preprocessor=self.tokenizer, model=model, config=onnx_config, opset=opset, output=model_path)\r\n File \"/usr/local/lib/python3.8/dist-packages/transformers/onnx/convert.py\", line 313, in export\r\n return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer)\r\n File \"/usr/local/lib/python3.8/dist-packages/transformers/onnx/convert.py\", line 138, in export_pytorch\r\n model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH)\r\n File \"/workspace/optimum/onnx/configuration.py\", line 170, in generate_dummy_inputs\r\n dummy_inputs = super().generate_dummy_inputs(\r\n File \"/usr/local/lib/python3.8/dist-packages/transformers/onnx/config.py\", line 308, in generate_dummy_inputs\r\n token_to_add = preprocessor.num_special_tokens_to_add(is_pair)\r\n File \"/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_fast.py\", line 289, in num_special_tokens_to_add\r\n return self._tokenizer.num_special_tokens_to_add(pair)\r\nTypeError: Can't convert <TensorType.PYTORCH: 'pt'> to PyBool\r\n```\r\n\r\n# Solution\r\nAdd keys for arguments\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom abc import ABC\nfrom collections import OrderedDict\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Union\n\nfrom transformers.file_utils import TensorType, is_tf_available, is_torch_available\nfrom transformers.onnx.utils import compute_effective_axis_dimension\nfrom transformers.utils import logging\n\n\nif TYPE_CHECKING:\n from transformers.configuration_utils import PretrainedConfig\n from transformers.feature_extraction_utils import FeatureExtractionMixin\n from transformers.tokenization_utils_base import PreTrainedTokenizerBase\n\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass OnnxConfigWithLoss(OnnxConfig, ABC):\n \"\"\"\n Wrapper for the children classes of `transformers.onnx.OnnxConfig` to export the model through the ONNX format with loss in outputs.\n \"\"\"\n\n _tasks_to_extra_inputs = {\n \"default\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"masked-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"causal-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"seq2seq-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"sequence-classification\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"token-classification\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"multiple-choice\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"question-answering\": OrderedDict(\n {\n \"start_positions\": {0: \"batch\"},\n \"end_positions\": {0: \"batch\"},\n }\n ),\n \"image-classification\": OrderedDict({\"labels\": {0: \"batch\"}}),\n }\n _tasks_to_extra_outputs = {\n \"default\": OrderedDict({\"loss\": {}}),\n }\n\n def __init__(self, config: OnnxConfig):\n self.__dict__ = copy.deepcopy(config.__dict__)\n self._onnx_config = config\n if self.task not in self._tasks_to_extra_inputs:\n raise ValueError(\n f\"{self.task} is not a supported task, supported tasks: {self._tasks_to_extra_inputs.keys()}\"\n )\n\n @classmethod\n def from_model_config(cls, config: OnnxConfig) -> \"OnnxConfigWithLoss\":\n \"\"\"\n Instantiate a OnnxConfigWithLoss for a specific model\n Args:\n config: The model's configuration to use when exporting to ONNX\n Returns:\n OnnxConfigWithLoss for this model\n \"\"\"\n return cls(config)\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n \"\"\"\n Mapping containing the axis definition of the input tensors(including labels) to provide to the model\n Returns:\n For each input: its name associated to the axes symbolic name and the axis position within the tensor\n \"\"\"\n inputs = self._onnx_config.inputs\n inputs.update(self._tasks_to_extra_inputs[self.task])\n return inputs\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n \"\"\"\n Mapping containing the axis definition of the output tensors(including loss) to provide to the model\n Returns:\n For each output: its name associated to the axes symbolic name and the axis position within the tensor\n \"\"\"\n common_outputs = self._onnx_config.outputs\n extra_outputs = self._tasks_to_extra_outputs[\"default\"]\n common_outputs.update(extra_outputs)\n for key in reversed(extra_outputs.keys()):\n common_outputs.move_to_end(key, last=False)\n return copy.deepcopy(common_outputs)\n\n def _generate_extra_dummy_inputs_pt(\n self,\n dummy_inputs,\n batch_size,\n seq_length,\n ) -> Mapping[str, Any]:\n import torch\n\n for label, input in self._tasks_to_extra_inputs[self.task].items():\n if \"sequence\" in input.values():\n dummy_inputs[label] = torch.zeros(batch_size, seq_length, dtype=torch.long)\n else:\n dummy_inputs[label] = torch.zeros(batch_size, dtype=torch.long)\n return dummy_inputs\n\n def _generate_extra_dummy_inputs_tf(\n self,\n dummy_inputs,\n batch_size,\n seq_length,\n ) -> Mapping[str, Any]:\n import tensorflow as tf\n\n for label, input in self._tasks_to_extra_inputs[self.task].items():\n if \"sequence\" in input.values():\n dummy_inputs[label] = tf.zeros(batch_size, seq_length, dtype=tf.int64)\n else:\n dummy_inputs[label] = tf.zeros(batch_size, dtype=tf.int64)\n return dummy_inputs\n\n def generate_dummy_inputs(\n self,\n preprocessor: Union[\"PreTrainedTokenizerBase\", \"FeatureExtractionMixin\"],\n batch_size: int = -1,\n seq_length: int = -1,\n is_pair: bool = False,\n framework: Optional[TensorType] = None,\n num_channels: int = 3,\n image_width: int = 40,\n image_height: int = 40,\n tokenizer: \"PreTrainedTokenizerBase\" = None,\n ) -> Mapping[str, Any]:\n \"\"\"\n Generate inputs to provide to the ONNX exporter for the specific framework\n Args:\n preprocessor: ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):\n The preprocessor associated with this model configuration.\n batch_size (`int`, *optional*, defaults to -1):\n The batch size to export the model for (-1 means dynamic axis).\n seq_length (`int`, *optional*, defaults to -1):\n The sequence length to export the model for (-1 means dynamic axis).\n is_pair (`bool`, *optional*, defaults to `False`):\n Indicate if the input is a pair (sentence 1, sentence 2)\n framework (`TensorType`, *optional*, defaults to `None`):\n The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.\n num_channels (`int`, *optional*, defaults to 3):\n The number of channels of the generated images.\n image_width (`int`, *optional*, defaults to 40):\n The width of the generated images.\n image_height (`int`, *optional*, defaults to 40):\n The height of the generated images.\n Returns:\n Mapping[str, Tensor] holding the kwargs to provide to the model's forward function\n \"\"\"\n # Generate dummy labels\n dummy_inputs = super().generate_dummy_inputs(\n preprocessor,\n batch_size,\n seq_length,\n is_pair,\n framework,\n num_channels,\n image_width,\n image_height,\n tokenizer,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n )\n label_seq_length = compute_effective_axis_dimension(\n seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0\n )\n\n if framework == TensorType.PYTORCH:\n if is_torch_available():\n return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no PyTorch installation was found.\")\n elif framework == TensorType.TENSORFLOW:\n if is_tf_available():\n return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no TensorFlow installation was found.\")\n else:\n raise ValueError(\n f\"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided.\"\n )\n\n\nclass OnnxConfigWithPastAndLoss(OnnxConfigWithLoss, ABC):\n def __init__(\n self,\n config: OnnxConfigWithPast,\n use_past: bool = False,\n ):\n super().__init__(config)\n self.use_past = use_past\n\n @classmethod\n def with_past(cls, config: OnnxConfigWithPast) -> \"OnnxConfigWithPast\":\n \"\"\"\n Instantiate a OnnxConfigWithPastAndLoss with `use_past` attribute set to True\n Args:\n config: The underlying model's config to use when exporting to ONNX\n Returns:\n OnnxConfigWithPastAndLoss with `.use_past = True`\n \"\"\"\n return cls(config, use_past=True)\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n common_outputs = super().outputs\n if self.use_past:\n self._onnx_config.fill_with_past_key_values_(common_outputs, direction=\"outputs\")\n\n return common_outputs\n\n def generate_dummy_inputs(\n self,\n tokenizer: \"PreTrainedTokenizerBase\",\n batch_size: int = -1,\n seq_length: int = -1,\n is_pair: bool = False,\n framework: Optional[TensorType] = None,\n ) -> Mapping[str, Any]:\n\n dummy_inputs = self._onnx_config.generate_dummy_inputs(\n tokenizer,\n batch_size,\n seq_length,\n is_pair,\n framework,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n )\n label_seq_length = compute_effective_axis_dimension(\n seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0\n )\n\n if framework == TensorType.PYTORCH:\n if is_torch_available():\n return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no PyTorch installation was found.\")\n elif framework == TensorType.TENSORFLOW:\n if is_tf_available():\n return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no TensorFlow installation was found.\")\n else:\n raise ValueError(\n f\"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided.\"\n )\n\n\nclass OnnxSeq2SeqConfigWithPastAndLoss(OnnxConfigWithPastAndLoss):\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n common_outputs = self._onnx_config.outputs\n extra_outputs = self._tasks_to_extra_outputs[\"default\"]\n common_outputs.update(extra_outputs)\n for key in reversed(extra_outputs.keys()):\n common_outputs.move_to_end(key, last=False)\n return copy.deepcopy(common_outputs)\n", "path": "optimum/onnx/configuration.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom abc import ABC\nfrom collections import OrderedDict\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Union\n\nfrom transformers.file_utils import TensorType, is_tf_available, is_torch_available\nfrom transformers.onnx.utils import compute_effective_axis_dimension\nfrom transformers.utils import logging\n\n\nif TYPE_CHECKING:\n from transformers.configuration_utils import PretrainedConfig\n from transformers.feature_extraction_utils import FeatureExtractionMixin\n from transformers.tokenization_utils_base import PreTrainedTokenizerBase\n\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass OnnxConfigWithLoss(OnnxConfig, ABC):\n \"\"\"\n Wrapper for the children classes of `transformers.onnx.OnnxConfig` to export the model through the ONNX format with loss in outputs.\n \"\"\"\n\n _tasks_to_extra_inputs = {\n \"default\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"masked-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"causal-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"seq2seq-lm\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"sequence-classification\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"token-classification\": OrderedDict({\"labels\": {0: \"batch\", 1: \"sequence\"}}),\n \"multiple-choice\": OrderedDict({\"labels\": {0: \"batch\"}}),\n \"question-answering\": OrderedDict(\n {\n \"start_positions\": {0: \"batch\"},\n \"end_positions\": {0: \"batch\"},\n }\n ),\n \"image-classification\": OrderedDict({\"labels\": {0: \"batch\"}}),\n }\n _tasks_to_extra_outputs = {\n \"default\": OrderedDict({\"loss\": {}}),\n }\n\n def __init__(self, config: OnnxConfig):\n self.__dict__ = copy.deepcopy(config.__dict__)\n self._onnx_config = config\n if self.task not in self._tasks_to_extra_inputs:\n raise ValueError(\n f\"{self.task} is not a supported task, supported tasks: {self._tasks_to_extra_inputs.keys()}\"\n )\n\n @classmethod\n def from_model_config(cls, config: OnnxConfig) -> \"OnnxConfigWithLoss\":\n \"\"\"\n Instantiate a OnnxConfigWithLoss for a specific model\n Args:\n config: The model's configuration to use when exporting to ONNX\n Returns:\n OnnxConfigWithLoss for this model\n \"\"\"\n return cls(config)\n\n @property\n def inputs(self) -> Mapping[str, Mapping[int, str]]:\n \"\"\"\n Mapping containing the axis definition of the input tensors(including labels) to provide to the model\n Returns:\n For each input: its name associated to the axes symbolic name and the axis position within the tensor\n \"\"\"\n inputs = self._onnx_config.inputs\n inputs.update(self._tasks_to_extra_inputs[self.task])\n return inputs\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n \"\"\"\n Mapping containing the axis definition of the output tensors(including loss) to provide to the model\n Returns:\n For each output: its name associated to the axes symbolic name and the axis position within the tensor\n \"\"\"\n common_outputs = self._onnx_config.outputs\n extra_outputs = self._tasks_to_extra_outputs[\"default\"]\n common_outputs.update(extra_outputs)\n for key in reversed(extra_outputs.keys()):\n common_outputs.move_to_end(key, last=False)\n return copy.deepcopy(common_outputs)\n\n def _generate_extra_dummy_inputs_pt(\n self,\n dummy_inputs,\n batch_size,\n seq_length,\n ) -> Mapping[str, Any]:\n import torch\n\n for label, input in self._tasks_to_extra_inputs[self.task].items():\n if \"sequence\" in input.values():\n dummy_inputs[label] = torch.zeros(batch_size, seq_length, dtype=torch.long)\n else:\n dummy_inputs[label] = torch.zeros(batch_size, dtype=torch.long)\n return dummy_inputs\n\n def _generate_extra_dummy_inputs_tf(\n self,\n dummy_inputs,\n batch_size,\n seq_length,\n ) -> Mapping[str, Any]:\n import tensorflow as tf\n\n for label, input in self._tasks_to_extra_inputs[self.task].items():\n if \"sequence\" in input.values():\n dummy_inputs[label] = tf.zeros(batch_size, seq_length, dtype=tf.int64)\n else:\n dummy_inputs[label] = tf.zeros(batch_size, dtype=tf.int64)\n return dummy_inputs\n\n def generate_dummy_inputs(\n self,\n preprocessor: Union[\"PreTrainedTokenizerBase\", \"FeatureExtractionMixin\"],\n batch_size: int = -1,\n seq_length: int = -1,\n is_pair: bool = False,\n framework: Optional[TensorType] = None,\n num_channels: int = 3,\n image_width: int = 40,\n image_height: int = 40,\n tokenizer: \"PreTrainedTokenizerBase\" = None,\n ) -> Mapping[str, Any]:\n \"\"\"\n Generate inputs to provide to the ONNX exporter for the specific framework\n Args:\n preprocessor: ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):\n The preprocessor associated with this model configuration.\n batch_size (`int`, *optional*, defaults to -1):\n The batch size to export the model for (-1 means dynamic axis).\n seq_length (`int`, *optional*, defaults to -1):\n The sequence length to export the model for (-1 means dynamic axis).\n is_pair (`bool`, *optional*, defaults to `False`):\n Indicate if the input is a pair (sentence 1, sentence 2)\n framework (`TensorType`, *optional*, defaults to `None`):\n The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.\n num_channels (`int`, *optional*, defaults to 3):\n The number of channels of the generated images.\n image_width (`int`, *optional*, defaults to 40):\n The width of the generated images.\n image_height (`int`, *optional*, defaults to 40):\n The height of the generated images.\n Returns:\n Mapping[str, Tensor] holding the kwargs to provide to the model's forward function\n \"\"\"\n # Generate dummy labels\n dummy_inputs = super().generate_dummy_inputs(\n preprocessor=preprocessor,\n batch_size=batch_size,\n seq_length=seq_length,\n is_pair=is_pair,\n framework=framework,\n num_channels=num_channels,\n image_width=image_width,\n image_height=image_height,\n tokenizer=tokenizer,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n )\n label_seq_length = compute_effective_axis_dimension(\n seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0\n )\n\n if framework == TensorType.PYTORCH:\n if is_torch_available():\n return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no PyTorch installation was found.\")\n elif framework == TensorType.TENSORFLOW:\n if is_tf_available():\n return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no TensorFlow installation was found.\")\n else:\n raise ValueError(\n f\"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided.\"\n )\n\n\nclass OnnxConfigWithPastAndLoss(OnnxConfigWithLoss, ABC):\n def __init__(\n self,\n config: OnnxConfigWithPast,\n use_past: bool = False,\n ):\n super().__init__(config)\n self.use_past = use_past\n\n @classmethod\n def with_past(cls, config: OnnxConfigWithPast) -> \"OnnxConfigWithPast\":\n \"\"\"\n Instantiate a OnnxConfigWithPastAndLoss with `use_past` attribute set to True\n Args:\n config: The underlying model's config to use when exporting to ONNX\n Returns:\n OnnxConfigWithPastAndLoss with `.use_past = True`\n \"\"\"\n return cls(config, use_past=True)\n\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n common_outputs = super().outputs\n if self.use_past:\n self._onnx_config.fill_with_past_key_values_(common_outputs, direction=\"outputs\")\n\n return common_outputs\n\n def generate_dummy_inputs(\n self,\n tokenizer: \"PreTrainedTokenizerBase\",\n batch_size: int = -1,\n seq_length: int = -1,\n is_pair: bool = False,\n framework: Optional[TensorType] = None,\n ) -> Mapping[str, Any]:\n\n dummy_inputs = self._onnx_config.generate_dummy_inputs(\n tokenizer,\n batch_size=batch_size,\n seq_length=seq_length,\n is_pair=is_pair,\n framework=framework,\n )\n label_batch_size = compute_effective_axis_dimension(\n batch_size, fixed_dimension=self.default_fixed_batch, num_token_to_add=0\n )\n label_seq_length = compute_effective_axis_dimension(\n seq_length, fixed_dimension=self.default_fixed_sequence, num_token_to_add=0\n )\n\n if framework == TensorType.PYTORCH:\n if is_torch_available():\n return self._generate_extra_dummy_inputs_pt(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no PyTorch installation was found.\")\n elif framework == TensorType.TENSORFLOW:\n if is_tf_available():\n return self._generate_extra_dummy_inputs_tf(dummy_inputs, label_batch_size, label_seq_length)\n else:\n raise RuntimeError(f\"Could not generate dummy inputs because no TensorFlow installation was found.\")\n else:\n raise ValueError(\n f\"Only two frameworks are supported for ONNX export: PyTorch or TensorFlow, but {framework} was provided.\"\n )\n\n\nclass OnnxSeq2SeqConfigWithPastAndLoss(OnnxConfigWithPastAndLoss):\n @property\n def outputs(self) -> Mapping[str, Mapping[int, str]]:\n common_outputs = self._onnx_config.outputs\n extra_outputs = self._tasks_to_extra_outputs[\"default\"]\n common_outputs.update(extra_outputs)\n for key in reversed(extra_outputs.keys()):\n common_outputs.move_to_end(key, last=False)\n return copy.deepcopy(common_outputs)\n", "path": "optimum/onnx/configuration.py"}]}
| 3,912 | 318 |
gh_patches_debug_4095
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-6083
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install mne should install numpy and scipy
Currently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright (C) 2011-2017 Alexandre Gramfort
4 # <alexandre.gramfort@telecom-paristech.fr>
5
6 import os
7 import os.path as op
8
9 from setuptools import setup
10
11 # get the version (don't import mne here, so dependencies are not needed)
12 version = None
13 with open(op.join('mne', '__init__.py'), 'r') as fid:
14 for line in (line.strip() for line in fid):
15 if line.startswith('__version__'):
16 version = line.split('=')[1].strip().strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 descr = """MNE python project for MEG and EEG data analysis."""
23
24 DISTNAME = 'mne'
25 DESCRIPTION = descr
26 MAINTAINER = 'Alexandre Gramfort'
27 MAINTAINER_EMAIL = 'alexandre.gramfort@telecom-paristech.fr'
28 URL = 'http://martinos.org/mne'
29 LICENSE = 'BSD (3-clause)'
30 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
31 VERSION = version
32
33
34 def package_tree(pkgroot):
35 """Get the submodule list."""
36 # Adapted from VisPy
37 path = op.dirname(__file__)
38 subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
39 for i in os.walk(op.join(path, pkgroot))
40 if '__init__.py' in i[2]]
41 return sorted(subdirs)
42
43
44 if __name__ == "__main__":
45 if op.exists('MANIFEST'):
46 os.remove('MANIFEST')
47
48 setup(name=DISTNAME,
49 maintainer=MAINTAINER,
50 include_package_data=True,
51 maintainer_email=MAINTAINER_EMAIL,
52 description=DESCRIPTION,
53 license=LICENSE,
54 url=URL,
55 version=VERSION,
56 download_url=DOWNLOAD_URL,
57 long_description=open('README.rst').read(),
58 zip_safe=False, # the package can run out of an .egg file
59 classifiers=['Intended Audience :: Science/Research',
60 'Intended Audience :: Developers',
61 'License :: OSI Approved',
62 'Programming Language :: Python',
63 'Topic :: Software Development',
64 'Topic :: Scientific/Engineering',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Operating System :: Unix',
68 'Operating System :: MacOS',
69 'Programming Language :: Python :: 3',
70 ],
71 platforms='any',
72 packages=package_tree('mne'),
73 package_data={'mne': [
74 op.join('data', '*.sel'),
75 op.join('data', 'icos.fif.gz'),
76 op.join('data', 'coil_def*.dat'),
77 op.join('data', 'helmets', '*.fif.gz'),
78 op.join('data', 'FreeSurferColorLUT.txt'),
79 op.join('data', 'image', '*gif'),
80 op.join('data', 'image', '*lout'),
81 op.join('data', 'fsaverage', '*.fif'),
82 op.join('channels', 'data', 'layouts', '*.lout'),
83 op.join('channels', 'data', 'layouts', '*.lay'),
84 op.join('channels', 'data', 'montages', '*.sfp'),
85 op.join('channels', 'data', 'montages', '*.txt'),
86 op.join('channels', 'data', 'montages', '*.elc'),
87 op.join('channels', 'data', 'neighbors', '*.mat'),
88 op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),
89 op.join('gui', 'help', '*.json'),
90 op.join('html', '*.js'),
91 op.join('html', '*.css'),
92 op.join('io', 'artemis123', 'resources', '*.csv'),
93 op.join('io', 'edf', 'gdf_encodes.txt')
94 ]},
95 scripts=['bin/mne'])
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,6 +69,7 @@
'Programming Language :: Python :: 3',
],
platforms='any',
+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],
packages=package_tree('mne'),
package_data={'mne': [
op.join('data', '*.sel'),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,6 +69,7 @@\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n", "issue": "pip install mne should install numpy and scipy\nCurrently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (C) 2011-2017 Alexandre Gramfort\n# <alexandre.gramfort@telecom-paristech.fr>\n\nimport os\nimport os.path as op\n\nfrom setuptools import setup\n\n# get the version (don't import mne here, so dependencies are not needed)\nversion = None\nwith open(op.join('mne', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"MNE python project for MEG and EEG data analysis.\"\"\"\n\nDISTNAME = 'mne'\nDESCRIPTION = descr\nMAINTAINER = 'Alexandre Gramfort'\nMAINTAINER_EMAIL = 'alexandre.gramfort@telecom-paristech.fr'\nURL = 'http://martinos.org/mne'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'\nVERSION = version\n\n\ndef package_tree(pkgroot):\n \"\"\"Get the submodule list.\"\"\"\n # Adapted from VisPy\n path = op.dirname(__file__)\n subdirs = [op.relpath(i[0], path).replace(op.sep, '.')\n for i in os.walk(op.join(path, pkgroot))\n if '__init__.py' in i[2]]\n return sorted(subdirs)\n\n\nif __name__ == \"__main__\":\n if op.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n zip_safe=False, # the package can run out of an .egg file\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n op.join('data', 'icos.fif.gz'),\n op.join('data', 'coil_def*.dat'),\n op.join('data', 'helmets', '*.fif.gz'),\n op.join('data', 'FreeSurferColorLUT.txt'),\n op.join('data', 'image', '*gif'),\n op.join('data', 'image', '*lout'),\n op.join('data', 'fsaverage', '*.fif'),\n op.join('channels', 'data', 'layouts', '*.lout'),\n op.join('channels', 'data', 'layouts', '*.lay'),\n op.join('channels', 'data', 'montages', '*.sfp'),\n op.join('channels', 'data', 'montages', '*.txt'),\n op.join('channels', 'data', 'montages', '*.elc'),\n op.join('channels', 'data', 'neighbors', '*.mat'),\n op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),\n op.join('gui', 'help', '*.json'),\n op.join('html', '*.js'),\n op.join('html', '*.css'),\n op.join('io', 'artemis123', 'resources', '*.csv'),\n op.join('io', 'edf', 'gdf_encodes.txt')\n ]},\n scripts=['bin/mne'])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (C) 2011-2017 Alexandre Gramfort\n# <alexandre.gramfort@telecom-paristech.fr>\n\nimport os\nimport os.path as op\n\nfrom setuptools import setup\n\n# get the version (don't import mne here, so dependencies are not needed)\nversion = None\nwith open(op.join('mne', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"MNE python project for MEG and EEG data analysis.\"\"\"\n\nDISTNAME = 'mne'\nDESCRIPTION = descr\nMAINTAINER = 'Alexandre Gramfort'\nMAINTAINER_EMAIL = 'alexandre.gramfort@telecom-paristech.fr'\nURL = 'http://martinos.org/mne'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'\nVERSION = version\n\n\ndef package_tree(pkgroot):\n \"\"\"Get the submodule list.\"\"\"\n # Adapted from VisPy\n path = op.dirname(__file__)\n subdirs = [op.relpath(i[0], path).replace(op.sep, '.')\n for i in os.walk(op.join(path, pkgroot))\n if '__init__.py' in i[2]]\n return sorted(subdirs)\n\n\nif __name__ == \"__main__\":\n if op.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n zip_safe=False, # the package can run out of an .egg file\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n op.join('data', 'icos.fif.gz'),\n op.join('data', 'coil_def*.dat'),\n op.join('data', 'helmets', '*.fif.gz'),\n op.join('data', 'FreeSurferColorLUT.txt'),\n op.join('data', 'image', '*gif'),\n op.join('data', 'image', '*lout'),\n op.join('data', 'fsaverage', '*.fif'),\n op.join('channels', 'data', 'layouts', '*.lout'),\n op.join('channels', 'data', 'layouts', '*.lay'),\n op.join('channels', 'data', 'montages', '*.sfp'),\n op.join('channels', 'data', 'montages', '*.txt'),\n op.join('channels', 'data', 'montages', '*.elc'),\n op.join('channels', 'data', 'neighbors', '*.mat'),\n op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),\n op.join('gui', 'help', '*.json'),\n op.join('html', '*.js'),\n op.join('html', '*.css'),\n op.join('io', 'artemis123', 'resources', '*.csv'),\n op.join('io', 'edf', 'gdf_encodes.txt')\n ]},\n scripts=['bin/mne'])\n", "path": "setup.py"}]}
| 1,344 | 100 |
gh_patches_debug_7655
|
rasdani/github-patches
|
git_diff
|
openfun__marsha-99
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Staging, PreProduction & Production settings
## Purpose
At the time of writing, available configurations for Marsha are: `Development` and `Test`. We need more settings for missing environments.
## Proposal
- [ ] Add `Production` configuration
- [ ] Derive `Staging` and `Preproduction` configurations from `Production`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `marsha/settings.py`
Content:
```
1 """Django settings for marsha project.
2
3 Uses django-configurations to manage environments inheritance and the loading of some
4 config from the environment
5
6 """
7
8 from datetime import timedelta
9 import os
10
11 from django.utils.translation import gettext_lazy as _
12
13 from configurations import Configuration, values
14 import sentry_sdk
15 from sentry_sdk.integrations.django import DjangoIntegration
16
17
18 class Base(Configuration):
19 """Base configuration every configuration (aka environment) should inherit from.
20
21 It depends on an environment variable that SHOULD be defined:
22 - DJANGO_SECRET_KEY
23
24 You may also want to override default configuration by setting the following
25 environment variables:
26 - DJANGO_DEBUG
27 - DATABASE_URL
28 """
29
30 BASE_DIR = os.path.dirname(__file__)
31 DATA_DIR = values.Value(os.path.join("/", "data"))
32
33 # Static files (CSS, JavaScript, Images)
34 STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
35 STATIC_URL = "/static/"
36 MEDIA_URL = "/media/"
37 # Allow to configure location of static/media files for non-Docker installation
38 MEDIA_ROOT = values.Value(os.path.join(str(DATA_DIR), "media"))
39 STATIC_ROOT = values.Value(os.path.join(str(DATA_DIR), "static"))
40
41 SECRET_KEY = values.SecretValue()
42
43 DEBUG = values.BooleanValue(False)
44
45 DATABASES = {
46 "default": {
47 "ENGINE": values.Value(
48 "django.db.backends.postgresql_psycopg2",
49 environ_name="DATABASE_ENGINE",
50 environ_prefix=None,
51 ),
52 "NAME": values.Value(
53 "marsha", environ_name="POSTGRES_DB", environ_prefix=None
54 ),
55 "USER": values.Value(
56 "marsha_user", environ_name="POSTGRES_USER", environ_prefix=None
57 ),
58 "PASSWORD": values.Value(
59 "pass", environ_name="POSTGRES_PASSWORD", environ_prefix=None
60 ),
61 "HOST": values.Value(
62 "localhost", environ_name="POSTGRES_HOST", environ_prefix=None
63 ),
64 "PORT": values.Value(
65 5432, environ_name="POSTGRES_PORT", environ_prefix=None
66 ),
67 }
68 }
69
70 ALLOWED_HOSTS = []
71
72 # Application definition
73
74 INSTALLED_APPS = [
75 "django.contrib.admin.apps.SimpleAdminConfig",
76 "django.contrib.auth",
77 "django.contrib.contenttypes",
78 "django.contrib.sessions",
79 "django.contrib.messages",
80 "django.contrib.staticfiles",
81 "django_extensions",
82 "rest_framework",
83 "marsha.core.apps.CoreConfig",
84 ]
85
86 MIDDLEWARE = [
87 "django.middleware.security.SecurityMiddleware",
88 "django.contrib.sessions.middleware.SessionMiddleware",
89 "django.middleware.common.CommonMiddleware",
90 "django.middleware.csrf.CsrfViewMiddleware",
91 "django.contrib.auth.middleware.AuthenticationMiddleware",
92 "django.contrib.messages.middleware.MessageMiddleware",
93 "django.middleware.clickjacking.XFrameOptionsMiddleware",
94 ]
95
96 ROOT_URLCONF = "marsha.urls"
97
98 TEMPLATES = [
99 {
100 "BACKEND": "django.template.backends.django.DjangoTemplates",
101 "DIRS": [],
102 "APP_DIRS": True,
103 "OPTIONS": {
104 "context_processors": [
105 "django.template.context_processors.debug",
106 "django.template.context_processors.request",
107 "django.contrib.auth.context_processors.auth",
108 "django.contrib.messages.context_processors.messages",
109 ]
110 },
111 }
112 ]
113
114 AUTH_USER_MODEL = "core.User"
115
116 WSGI_APPLICATION = "marsha.wsgi.application"
117
118 REST_FRAMEWORK = {
119 "DEFAULT_AUTHENTICATION_CLASSES": (
120 "rest_framework_simplejwt.authentication.JWTTokenUserAuthentication",
121 )
122 }
123
124 # Password validation
125 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
126 AUTH_PASSWORD_VALIDATORS = [
127 {
128 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
129 },
130 {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
131 {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
132 {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
133 ]
134
135 JWT_SIGNING_KEY = values.SecretValue()
136
137 # Internationalization
138 # https://docs.djangoproject.com/en/2.0/topics/i18n/
139
140 LANGUAGE_CODE = "en-us"
141
142 LANGUAGES = [("en", _("english")), ("fr", _("french"))]
143
144 # Internationalization
145 TIME_ZONE = "UTC"
146 USE_I18N = True
147 USE_L10N = True
148 USE_TZ = True
149
150 VIDEO_RESOLUTIONS = [144, 240, 480, 720, 1080]
151
152 # AWS
153 AWS_ACCESS_KEY_ID = values.SecretValue()
154 AWS_SECRET_ACCESS_KEY = values.SecretValue()
155 AWS_DEFAULT_REGION = values.Value("eu-west-1")
156 UPDATE_STATE_SHARED_SECRETS = values.ListValue()
157
158 # Cloud Front key pair for signed urls
159 CLOUDFRONT_URL = values.SecretValue()
160 CLOUDFRONT_ACCESS_KEY_ID = values.Value(None)
161 CLOUDFRONT_PRIVATE_KEY_PATH = values.Value(
162 os.path.join(BASE_DIR, "..", ".ssh", "cloudfront_private_key")
163 )
164 CLOUDFRONT_SIGNED_URLS_ACTIVE = True
165 CLOUDFRONT_SIGNED_URLS_VALIDITY = 2 * 60 * 60 # 2 hours
166
167 # pylint: disable=invalid-name
168 @property
169 def SIMPLE_JWT(self):
170 """Define settings for `djangorestframework_simplejwt`.
171
172 The JWT_SIGNING_KEY must be evaluated late as the jwt library check for string type.
173 """
174 return {
175 "ACCESS_TOKEN_LIFETIME": timedelta(days=1),
176 "ALGORITHM": "HS256",
177 "SIGNING_KEY": str(self.JWT_SIGNING_KEY),
178 "USER_ID_CLAIM": "video_id",
179 "AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
180 }
181
182 @classmethod
183 def post_setup(cls):
184 """Post setup configuration.
185
186 This is the place where you can configure settings that require other
187 settings to be loaded.
188 """
189 super().post_setup()
190
191 # The DJANGO_SENTRY_DSN environment variable should be set to activate
192 # sentry for an environment
193 sentry_dsn = values.Value(None, environ_name="SENTRY_DSN")
194 if sentry_dsn is not None:
195 sentry_sdk.init(
196 dsn=sentry_dsn,
197 environment=cls.__name__.lower(),
198 integrations=[DjangoIntegration()],
199 )
200
201
202 class Development(Base):
203 """Development environment settings.
204
205 We set ``DEBUG`` to ``True`` by default, configure the server to respond to all hosts,
206 and use a local sqlite database by default.
207 """
208
209 DEBUG = values.BooleanValue(True)
210 ALLOWED_HOSTS = ["*"]
211
212 AWS_SOURCE_BUCKET_NAME = "development-marsha-source"
213
214
215 class Test(Base):
216 """Test environment settings."""
217
218 AWS_SOURCE_BUCKET_NAME = "test-marsha-source"
219
220 CLOUDFRONT_SIGNED_URLS_ACTIVE = False
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/marsha/settings.py b/marsha/settings.py
--- a/marsha/settings.py
+++ b/marsha/settings.py
@@ -218,3 +218,27 @@
AWS_SOURCE_BUCKET_NAME = "test-marsha-source"
CLOUDFRONT_SIGNED_URLS_ACTIVE = False
+
+
+class Production(Base):
+ """Production environment settings.
+
+ You must define the DJANGO_ALLOWED_HOSTS environment variable in Production
+ configuration (and derived configurations):
+
+ DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
+ """
+
+ ALLOWED_HOSTS = values.ListValue(None)
+
+
+class Staging(Production):
+ """Staging environment settings."""
+
+ pass
+
+
+class PreProduction(Production):
+ """Pre-production environment settings."""
+
+ pass
|
{"golden_diff": "diff --git a/marsha/settings.py b/marsha/settings.py\n--- a/marsha/settings.py\n+++ b/marsha/settings.py\n@@ -218,3 +218,27 @@\n AWS_SOURCE_BUCKET_NAME = \"test-marsha-source\"\n \n CLOUDFRONT_SIGNED_URLS_ACTIVE = False\n+\n+\n+class Production(Base):\n+ \"\"\"Production environment settings.\n+\n+ You must define the DJANGO_ALLOWED_HOSTS environment variable in Production\n+ configuration (and derived configurations):\n+\n+ DJANGO_ALLOWED_HOSTS=\"foo.com,foo.fr\"\n+ \"\"\"\n+\n+ ALLOWED_HOSTS = values.ListValue(None)\n+\n+\n+class Staging(Production):\n+ \"\"\"Staging environment settings.\"\"\"\n+\n+ pass\n+\n+\n+class PreProduction(Production):\n+ \"\"\"Pre-production environment settings.\"\"\"\n+\n+ pass\n", "issue": "Add Staging, PreProduction & Production settings\n## Purpose\r\n\r\nAt the time of writing, available configurations for Marsha are: `Development` and `Test`. We need more settings for missing environments. \r\n\r\n## Proposal\r\n\r\n- [ ] Add `Production` configuration\r\n- [ ] Derive `Staging` and `Preproduction` configurations from `Production`\n", "before_files": [{"content": "\"\"\"Django settings for marsha project.\n\nUses django-configurations to manage environments inheritance and the loading of some\nconfig from the environment\n\n\"\"\"\n\nfrom datetime import timedelta\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom configurations import Configuration, values\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\n\nclass Base(Configuration):\n \"\"\"Base configuration every configuration (aka environment) should inherit from.\n\n It depends on an environment variable that SHOULD be defined:\n - DJANGO_SECRET_KEY\n\n You may also want to override default configuration by setting the following\n environment variables:\n - DJANGO_DEBUG\n - DATABASE_URL\n \"\"\"\n\n BASE_DIR = os.path.dirname(__file__)\n DATA_DIR = values.Value(os.path.join(\"/\", \"data\"))\n\n # Static files (CSS, JavaScript, Images)\n STATICFILES_DIRS = (os.path.join(BASE_DIR, \"static\"),)\n STATIC_URL = \"/static/\"\n MEDIA_URL = \"/media/\"\n # Allow to configure location of static/media files for non-Docker installation\n MEDIA_ROOT = values.Value(os.path.join(str(DATA_DIR), \"media\"))\n STATIC_ROOT = values.Value(os.path.join(str(DATA_DIR), \"static\"))\n\n SECRET_KEY = values.SecretValue()\n\n DEBUG = values.BooleanValue(False)\n\n DATABASES = {\n \"default\": {\n \"ENGINE\": values.Value(\n \"django.db.backends.postgresql_psycopg2\",\n environ_name=\"DATABASE_ENGINE\",\n environ_prefix=None,\n ),\n \"NAME\": values.Value(\n \"marsha\", environ_name=\"POSTGRES_DB\", environ_prefix=None\n ),\n \"USER\": values.Value(\n \"marsha_user\", environ_name=\"POSTGRES_USER\", environ_prefix=None\n ),\n \"PASSWORD\": values.Value(\n \"pass\", environ_name=\"POSTGRES_PASSWORD\", environ_prefix=None\n ),\n \"HOST\": values.Value(\n \"localhost\", environ_name=\"POSTGRES_HOST\", environ_prefix=None\n ),\n \"PORT\": values.Value(\n 5432, environ_name=\"POSTGRES_PORT\", environ_prefix=None\n ),\n }\n }\n\n ALLOWED_HOSTS = []\n\n # Application definition\n\n INSTALLED_APPS = [\n \"django.contrib.admin.apps.SimpleAdminConfig\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"rest_framework\",\n \"marsha.core.apps.CoreConfig\",\n ]\n\n MIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n ]\n\n ROOT_URLCONF = \"marsha.urls\"\n\n TEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n },\n }\n ]\n\n AUTH_USER_MODEL = \"core.User\"\n\n WSGI_APPLICATION = \"marsha.wsgi.application\"\n\n REST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework_simplejwt.authentication.JWTTokenUserAuthentication\",\n )\n }\n\n # Password validation\n # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n AUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n ]\n\n JWT_SIGNING_KEY = values.SecretValue()\n\n # Internationalization\n # https://docs.djangoproject.com/en/2.0/topics/i18n/\n\n LANGUAGE_CODE = \"en-us\"\n\n LANGUAGES = [(\"en\", _(\"english\")), (\"fr\", _(\"french\"))]\n\n # Internationalization\n TIME_ZONE = \"UTC\"\n USE_I18N = True\n USE_L10N = True\n USE_TZ = True\n\n VIDEO_RESOLUTIONS = [144, 240, 480, 720, 1080]\n\n # AWS\n AWS_ACCESS_KEY_ID = values.SecretValue()\n AWS_SECRET_ACCESS_KEY = values.SecretValue()\n AWS_DEFAULT_REGION = values.Value(\"eu-west-1\")\n UPDATE_STATE_SHARED_SECRETS = values.ListValue()\n\n # Cloud Front key pair for signed urls\n CLOUDFRONT_URL = values.SecretValue()\n CLOUDFRONT_ACCESS_KEY_ID = values.Value(None)\n CLOUDFRONT_PRIVATE_KEY_PATH = values.Value(\n os.path.join(BASE_DIR, \"..\", \".ssh\", \"cloudfront_private_key\")\n )\n CLOUDFRONT_SIGNED_URLS_ACTIVE = True\n CLOUDFRONT_SIGNED_URLS_VALIDITY = 2 * 60 * 60 # 2 hours\n\n # pylint: disable=invalid-name\n @property\n def SIMPLE_JWT(self):\n \"\"\"Define settings for `djangorestframework_simplejwt`.\n\n The JWT_SIGNING_KEY must be evaluated late as the jwt library check for string type.\n \"\"\"\n return {\n \"ACCESS_TOKEN_LIFETIME\": timedelta(days=1),\n \"ALGORITHM\": \"HS256\",\n \"SIGNING_KEY\": str(self.JWT_SIGNING_KEY),\n \"USER_ID_CLAIM\": \"video_id\",\n \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.AccessToken\",),\n }\n\n @classmethod\n def post_setup(cls):\n \"\"\"Post setup configuration.\n\n This is the place where you can configure settings that require other\n settings to be loaded.\n \"\"\"\n super().post_setup()\n\n # The DJANGO_SENTRY_DSN environment variable should be set to activate\n # sentry for an environment\n sentry_dsn = values.Value(None, environ_name=\"SENTRY_DSN\")\n if sentry_dsn is not None:\n sentry_sdk.init(\n dsn=sentry_dsn,\n environment=cls.__name__.lower(),\n integrations=[DjangoIntegration()],\n )\n\n\nclass Development(Base):\n \"\"\"Development environment settings.\n\n We set ``DEBUG`` to ``True`` by default, configure the server to respond to all hosts,\n and use a local sqlite database by default.\n \"\"\"\n\n DEBUG = values.BooleanValue(True)\n ALLOWED_HOSTS = [\"*\"]\n\n AWS_SOURCE_BUCKET_NAME = \"development-marsha-source\"\n\n\nclass Test(Base):\n \"\"\"Test environment settings.\"\"\"\n\n AWS_SOURCE_BUCKET_NAME = \"test-marsha-source\"\n\n CLOUDFRONT_SIGNED_URLS_ACTIVE = False\n", "path": "marsha/settings.py"}], "after_files": [{"content": "\"\"\"Django settings for marsha project.\n\nUses django-configurations to manage environments inheritance and the loading of some\nconfig from the environment\n\n\"\"\"\n\nfrom datetime import timedelta\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom configurations import Configuration, values\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\n\nclass Base(Configuration):\n \"\"\"Base configuration every configuration (aka environment) should inherit from.\n\n It depends on an environment variable that SHOULD be defined:\n - DJANGO_SECRET_KEY\n\n You may also want to override default configuration by setting the following\n environment variables:\n - DJANGO_DEBUG\n - DATABASE_URL\n \"\"\"\n\n BASE_DIR = os.path.dirname(__file__)\n DATA_DIR = values.Value(os.path.join(\"/\", \"data\"))\n\n # Static files (CSS, JavaScript, Images)\n STATICFILES_DIRS = (os.path.join(BASE_DIR, \"static\"),)\n STATIC_URL = \"/static/\"\n MEDIA_URL = \"/media/\"\n # Allow to configure location of static/media files for non-Docker installation\n MEDIA_ROOT = values.Value(os.path.join(str(DATA_DIR), \"media\"))\n STATIC_ROOT = values.Value(os.path.join(str(DATA_DIR), \"static\"))\n\n SECRET_KEY = values.SecretValue()\n\n DEBUG = values.BooleanValue(False)\n\n DATABASES = {\n \"default\": {\n \"ENGINE\": values.Value(\n \"django.db.backends.postgresql_psycopg2\",\n environ_name=\"DATABASE_ENGINE\",\n environ_prefix=None,\n ),\n \"NAME\": values.Value(\n \"marsha\", environ_name=\"POSTGRES_DB\", environ_prefix=None\n ),\n \"USER\": values.Value(\n \"marsha_user\", environ_name=\"POSTGRES_USER\", environ_prefix=None\n ),\n \"PASSWORD\": values.Value(\n \"pass\", environ_name=\"POSTGRES_PASSWORD\", environ_prefix=None\n ),\n \"HOST\": values.Value(\n \"localhost\", environ_name=\"POSTGRES_HOST\", environ_prefix=None\n ),\n \"PORT\": values.Value(\n 5432, environ_name=\"POSTGRES_PORT\", environ_prefix=None\n ),\n }\n }\n\n ALLOWED_HOSTS = []\n\n # Application definition\n\n INSTALLED_APPS = [\n \"django.contrib.admin.apps.SimpleAdminConfig\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"rest_framework\",\n \"marsha.core.apps.CoreConfig\",\n ]\n\n MIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n ]\n\n ROOT_URLCONF = \"marsha.urls\"\n\n TEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n },\n }\n ]\n\n AUTH_USER_MODEL = \"core.User\"\n\n WSGI_APPLICATION = \"marsha.wsgi.application\"\n\n REST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework_simplejwt.authentication.JWTTokenUserAuthentication\",\n )\n }\n\n # Password validation\n # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n AUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n ]\n\n JWT_SIGNING_KEY = values.SecretValue()\n\n # Internationalization\n # https://docs.djangoproject.com/en/2.0/topics/i18n/\n\n LANGUAGE_CODE = \"en-us\"\n\n LANGUAGES = [(\"en\", _(\"english\")), (\"fr\", _(\"french\"))]\n\n # Internationalization\n TIME_ZONE = \"UTC\"\n USE_I18N = True\n USE_L10N = True\n USE_TZ = True\n\n VIDEO_RESOLUTIONS = [144, 240, 480, 720, 1080]\n\n # AWS\n AWS_ACCESS_KEY_ID = values.SecretValue()\n AWS_SECRET_ACCESS_KEY = values.SecretValue()\n AWS_DEFAULT_REGION = values.Value(\"eu-west-1\")\n UPDATE_STATE_SHARED_SECRETS = values.ListValue()\n\n # Cloud Front key pair for signed urls\n CLOUDFRONT_URL = values.SecretValue()\n CLOUDFRONT_ACCESS_KEY_ID = values.Value(None)\n CLOUDFRONT_PRIVATE_KEY_PATH = values.Value(\n os.path.join(BASE_DIR, \"..\", \".ssh\", \"cloudfront_private_key\")\n )\n CLOUDFRONT_SIGNED_URLS_ACTIVE = True\n CLOUDFRONT_SIGNED_URLS_VALIDITY = 2 * 60 * 60 # 2 hours\n\n # pylint: disable=invalid-name\n @property\n def SIMPLE_JWT(self):\n \"\"\"Define settings for `djangorestframework_simplejwt`.\n\n The JWT_SIGNING_KEY must be evaluated late as the jwt library check for string type.\n \"\"\"\n return {\n \"ACCESS_TOKEN_LIFETIME\": timedelta(days=1),\n \"ALGORITHM\": \"HS256\",\n \"SIGNING_KEY\": str(self.JWT_SIGNING_KEY),\n \"USER_ID_CLAIM\": \"video_id\",\n \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.AccessToken\",),\n }\n\n @classmethod\n def post_setup(cls):\n \"\"\"Post setup configuration.\n\n This is the place where you can configure settings that require other\n settings to be loaded.\n \"\"\"\n super().post_setup()\n\n # The DJANGO_SENTRY_DSN environment variable should be set to activate\n # sentry for an environment\n sentry_dsn = values.Value(None, environ_name=\"SENTRY_DSN\")\n if sentry_dsn is not None:\n sentry_sdk.init(\n dsn=sentry_dsn,\n environment=cls.__name__.lower(),\n integrations=[DjangoIntegration()],\n )\n\n\nclass Development(Base):\n \"\"\"Development environment settings.\n\n We set ``DEBUG`` to ``True`` by default, configure the server to respond to all hosts,\n and use a local sqlite database by default.\n \"\"\"\n\n DEBUG = values.BooleanValue(True)\n ALLOWED_HOSTS = [\"*\"]\n\n AWS_SOURCE_BUCKET_NAME = \"development-marsha-source\"\n\n\nclass Test(Base):\n \"\"\"Test environment settings.\"\"\"\n\n AWS_SOURCE_BUCKET_NAME = \"test-marsha-source\"\n\n CLOUDFRONT_SIGNED_URLS_ACTIVE = False\n\n\nclass Production(Base):\n \"\"\"Production environment settings.\n\n You must define the DJANGO_ALLOWED_HOSTS environment variable in Production\n configuration (and derived configurations):\n\n DJANGO_ALLOWED_HOSTS=\"foo.com,foo.fr\"\n \"\"\"\n\n ALLOWED_HOSTS = values.ListValue(None)\n\n\nclass Staging(Production):\n \"\"\"Staging environment settings.\"\"\"\n\n pass\n\n\nclass PreProduction(Production):\n \"\"\"Pre-production environment settings.\"\"\"\n\n pass\n", "path": "marsha/settings.py"}]}
| 2,420 | 187 |
gh_patches_debug_4828
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2078
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Run REPL using `python -m hy`
Figuring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/__main__.py`
Content:
```
1 import hy # NOQA
2 import sys
3
4 # This just mocks the normalish behavior of the Python interp. Helpful to aid
5 # with shimming existing apps that don't really "work" with Hy.
6 #
7 # You could say this script helps Hyjack a file.
8 #
9
10
11 if len(sys.argv) > 1:
12 sys.argv.pop(0)
13 hy.importer._import_from_path('__main__', sys.argv[0])
14 sys.exit(0) # right?
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hy/__main__.py b/hy/__main__.py
--- a/hy/__main__.py
+++ b/hy/__main__.py
@@ -1,14 +1,8 @@
-import hy # NOQA
import sys
-# This just mocks the normalish behavior of the Python interp. Helpful to aid
-# with shimming existing apps that don't really "work" with Hy.
-#
-# You could say this script helps Hyjack a file.
-#
+from hy.cmdline import hy_main
+# Running hy as a module (e.g. `python -m hy`)
+# is equivalent to running the main `hy` command.
-if len(sys.argv) > 1:
- sys.argv.pop(0)
- hy.importer._import_from_path('__main__', sys.argv[0])
- sys.exit(0) # right?
+sys.exit(hy_main())
|
{"golden_diff": "diff --git a/hy/__main__.py b/hy/__main__.py\n--- a/hy/__main__.py\n+++ b/hy/__main__.py\n@@ -1,14 +1,8 @@\n-import hy # NOQA\n import sys\n \n-# This just mocks the normalish behavior of the Python interp. Helpful to aid\n-# with shimming existing apps that don't really \"work\" with Hy.\n-#\n-# You could say this script helps Hyjack a file.\n-#\n+from hy.cmdline import hy_main\n \n+# Running hy as a module (e.g. `python -m hy`)\n+# is equivalent to running the main `hy` command.\n \n-if len(sys.argv) > 1:\n- sys.argv.pop(0)\n- hy.importer._import_from_path('__main__', sys.argv[0])\n- sys.exit(0) # right?\n+sys.exit(hy_main())\n", "issue": "Run REPL using `python -m hy`\nFiguring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?\n", "before_files": [{"content": "import hy # NOQA\nimport sys\n\n# This just mocks the normalish behavior of the Python interp. Helpful to aid\n# with shimming existing apps that don't really \"work\" with Hy.\n#\n# You could say this script helps Hyjack a file.\n#\n\n\nif len(sys.argv) > 1:\n sys.argv.pop(0)\n hy.importer._import_from_path('__main__', sys.argv[0])\n sys.exit(0) # right?\n", "path": "hy/__main__.py"}], "after_files": [{"content": "import sys\n\nfrom hy.cmdline import hy_main\n\n# Running hy as a module (e.g. `python -m hy`)\n# is equivalent to running the main `hy` command.\n\nsys.exit(hy_main())\n", "path": "hy/__main__.py"}]}
| 418 | 199 |
gh_patches_debug_21487
|
rasdani/github-patches
|
git_diff
|
apache__tvm-14908
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ci] `last-successful` job is disabled
The job from #10056: https://github.com/apache/tvm/actions/workflows/update_last_successful_branch.yml was disabled a few months ago in concert with the change to split up the Jenkins CI job from one job into many jobs (see #13337). To fix this we'd need to update the CI checking script to know about the new job names and find the last commit in which they all passed.
cc @Mousius @areusch @gigiblender @leandron
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ci/scripts/github/update_branch.py`
Content:
```
1 #!/usr/bin/env python3
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import os
20 import json
21 import argparse
22 import sys
23 from pathlib import Path
24 from typing import Any, Dict
25
26 # Hackery to enable importing of utils from ci/scripts/jenkins
27 REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
28 sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
29
30 from git_utils import git, GitHubRepo, parse_remote
31
32
33 _commit_query_fields = """
34 messageHeadline
35 oid
36 statusCheckRollup {
37 contexts(last:100) {
38 nodes {
39 ... on CheckRun {
40 conclusion
41 status
42 name
43 checkSuite {
44 workflowRun {
45 workflow {
46 name
47 }
48 }
49 }
50 }
51 ... on StatusContext {
52 context
53 state
54 }
55 }
56 }
57 }
58 """
59
60
61 def commits_query(user: str, repo: str, cursor: str = None):
62 """
63 Create a GraphQL query to find the last N commits along with their statuses
64 and some metadata (paginated after 'cursor')
65 """
66 after = ""
67 if cursor is not None:
68 after = f', after:"{cursor}"'
69
70 return f"""
71 {{
72 repository(name: "{repo}", owner: "{user}") {{
73 defaultBranchRef {{
74 target {{
75 ... on Commit {{
76 history(first: 15{after}) {{
77 edges {{ cursor }}
78 nodes {{
79 {_commit_query_fields}
80 }}
81 }}
82 }}
83 }}
84 }}
85 }}
86 }}
87 """
88
89
90 def commit_passed_ci(commit: Dict[str, Any]) -> bool:
91 """
92 Returns true if all of a commit's statuses are SUCCESS
93 """
94 statuses = commit["statusCheckRollup"]["contexts"]["nodes"]
95
96 # GitHub Actions statuses are different from external GitHub statuses, so
97 # unify them into 1 representation
98 # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
99 unified_statuses = []
100 for status in statuses:
101 if "context" in status:
102 # Parse non-GHA status
103 unified_statuses.append((status["context"], status["state"] == "SUCCESS"))
104 else:
105 # Parse GitHub Actions item
106 workflow = status["checkSuite"]["workflowRun"]["workflow"]["name"]
107 name = f"{workflow} / {status['name']}"
108 unified_statuses.append((name, status["conclusion"] == "SUCCESS"))
109
110 print(f"Statuses on {commit['oid']}:", json.dumps(unified_statuses, indent=2))
111
112 # Assert that specific jobs are present in the commit statuses (i.e. don't
113 # approve if CI was broken and didn't schedule a job)
114 expected_jobs = {"tvm-ci/branch"}
115 job_names = {name for name, status in unified_statuses}
116 for job in expected_jobs:
117 if job not in job_names:
118 # Did not find expected job name
119 return False
120
121 passed_ci = all(status for name, status in unified_statuses)
122 return passed_ci
123
124
125 def update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:
126 git(["fetch", "origin", sha])
127 git(["reset", "--hard", "FETCH_HEAD"])
128 try:
129 git(["branch", "-D", branch_name])
130 except RuntimeError:
131 # Ignore failures (i.e. the branch did not exist in the first place)
132 pass
133 git(["checkout", "-b", branch_name])
134
135 # Create and push the branch
136 git(["push", "origin", "--force", branch_name])
137 print(f"Pushed branch {branch_name} with commit {sha}")
138
139
140 if __name__ == "__main__":
141 help = "Push the a branch to the last commit that passed all CI runs"
142 parser = argparse.ArgumentParser(description=help)
143 parser.add_argument("--remote", default="origin", help="ssh remote to parse")
144 parser.add_argument("--dry-run", action="store_true", help="don't submit to GitHub")
145 parser.add_argument("--branch", default="last-successful", help="branch name")
146 parser.add_argument(
147 "--testonly-json", help="(testing) data to use instead of fetching from GitHub"
148 )
149 args = parser.parse_args()
150
151 remote = git(["config", "--get", f"remote.{args.remote}.url"])
152 user, repo = parse_remote(remote)
153 # TODO: Remove this before landing
154 user, repo = ("apache", "tvm")
155
156 if args.testonly_json:
157 r = json.loads(args.testonly_json)
158 else:
159 github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
160 q = commits_query(user, repo)
161 r = github.graphql(q)
162
163 commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
164
165 # Limit GraphQL pagination
166 MAX_COMMITS_TO_CHECK = 50
167 i = 0
168
169 while i < MAX_COMMITS_TO_CHECK:
170 # Check each commit
171 for commit in commits:
172 if commit_passed_ci(commit):
173 print(f"Found last good commit: {commit['oid']}: {commit['messageHeadline']}")
174 if not args.dry_run:
175 update_branch(
176 user=user,
177 repo=repo,
178 sha=commit["oid"],
179 branch_name=args.branch,
180 )
181 # Nothing to do after updating the branch, exit early
182 exit(0)
183
184 # No good commit found, proceed to next page of results
185 edges = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["edges"]
186 if len(edges) == 0:
187 break
188 else:
189 q = commits_query(user, repo, cursor=edges[-1]["cursor"])
190 r = github.graphql(q)
191 commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
192
193 # Backstop to prevent looking through all the past commits
194 i += len(commits)
195
196 print(f"No good commits found in the last {len(commits)} commits")
197 exit(1)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ci/scripts/github/update_branch.py b/ci/scripts/github/update_branch.py
--- a/ci/scripts/github/update_branch.py
+++ b/ci/scripts/github/update_branch.py
@@ -87,6 +87,22 @@
"""
+EXPECTED_CI_JOBS = [
+ "cross-isa-minimal/branch",
+ "gpu/branch",
+ "hexagon/branch",
+ "arm/branch",
+ "cortexm/branch",
+ "cpu/branch",
+ "docker/branch",
+ "i386/branch",
+ "lint/branch",
+ "minimal/branch",
+ "riscv/branch",
+ "wasm/branch",
+]
+
+
def commit_passed_ci(commit: Dict[str, Any]) -> bool:
"""
Returns true if all of a commit's statuses are SUCCESS
@@ -111,9 +127,8 @@
# Assert that specific jobs are present in the commit statuses (i.e. don't
# approve if CI was broken and didn't schedule a job)
- expected_jobs = {"tvm-ci/branch"}
job_names = {name for name, status in unified_statuses}
- for job in expected_jobs:
+ for job in EXPECTED_CI_JOBS:
if job not in job_names:
# Did not find expected job name
return False
|
{"golden_diff": "diff --git a/ci/scripts/github/update_branch.py b/ci/scripts/github/update_branch.py\n--- a/ci/scripts/github/update_branch.py\n+++ b/ci/scripts/github/update_branch.py\n@@ -87,6 +87,22 @@\n \"\"\"\n \n \n+EXPECTED_CI_JOBS = [\n+ \"cross-isa-minimal/branch\",\n+ \"gpu/branch\",\n+ \"hexagon/branch\",\n+ \"arm/branch\",\n+ \"cortexm/branch\",\n+ \"cpu/branch\",\n+ \"docker/branch\",\n+ \"i386/branch\",\n+ \"lint/branch\",\n+ \"minimal/branch\",\n+ \"riscv/branch\",\n+ \"wasm/branch\",\n+]\n+\n+\n def commit_passed_ci(commit: Dict[str, Any]) -> bool:\n \"\"\"\n Returns true if all of a commit's statuses are SUCCESS\n@@ -111,9 +127,8 @@\n \n # Assert that specific jobs are present in the commit statuses (i.e. don't\n # approve if CI was broken and didn't schedule a job)\n- expected_jobs = {\"tvm-ci/branch\"}\n job_names = {name for name, status in unified_statuses}\n- for job in expected_jobs:\n+ for job in EXPECTED_CI_JOBS:\n if job not in job_names:\n # Did not find expected job name\n return False\n", "issue": "[ci] `last-successful` job is disabled\nThe job from #10056: https://github.com/apache/tvm/actions/workflows/update_last_successful_branch.yml was disabled a few months ago in concert with the change to split up the Jenkins CI job from one job into many jobs (see #13337). To fix this we'd need to update the CI checking script to know about the new job names and find the last commit in which they all passed.\n\ncc @Mousius @areusch @gigiblender @leandron\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport json\nimport argparse\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n# Hackery to enable importing of utils from ci/scripts/jenkins\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent\nsys.path.append(str(REPO_ROOT / \"ci\" / \"scripts\" / \"jenkins\"))\n\nfrom git_utils import git, GitHubRepo, parse_remote\n\n\n_commit_query_fields = \"\"\"\n messageHeadline\n oid\n statusCheckRollup {\n contexts(last:100) {\n nodes {\n ... on CheckRun {\n conclusion\n status\n name\n checkSuite {\n workflowRun {\n workflow {\n name\n }\n }\n }\n }\n ... on StatusContext {\n context\n state\n }\n }\n }\n }\n\"\"\"\n\n\ndef commits_query(user: str, repo: str, cursor: str = None):\n \"\"\"\n Create a GraphQL query to find the last N commits along with their statuses\n and some metadata (paginated after 'cursor')\n \"\"\"\n after = \"\"\n if cursor is not None:\n after = f', after:\"{cursor}\"'\n\n return f\"\"\"\n {{\n repository(name: \"{repo}\", owner: \"{user}\") {{\n defaultBranchRef {{\n target {{\n ... on Commit {{\n history(first: 15{after}) {{\n edges {{ cursor }}\n nodes {{\n {_commit_query_fields}\n }}\n }}\n }}\n }}\n }}\n }}\n }}\n \"\"\"\n\n\ndef commit_passed_ci(commit: Dict[str, Any]) -> bool:\n \"\"\"\n Returns true if all of a commit's statuses are SUCCESS\n \"\"\"\n statuses = commit[\"statusCheckRollup\"][\"contexts\"][\"nodes\"]\n\n # GitHub Actions statuses are different from external GitHub statuses, so\n # unify them into 1 representation\n # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads\n unified_statuses = []\n for status in statuses:\n if \"context\" in status:\n # Parse non-GHA status\n unified_statuses.append((status[\"context\"], status[\"state\"] == \"SUCCESS\"))\n else:\n # Parse GitHub Actions item\n workflow = status[\"checkSuite\"][\"workflowRun\"][\"workflow\"][\"name\"]\n name = f\"{workflow} / {status['name']}\"\n unified_statuses.append((name, status[\"conclusion\"] == \"SUCCESS\"))\n\n print(f\"Statuses on {commit['oid']}:\", json.dumps(unified_statuses, indent=2))\n\n # Assert that specific jobs are present in the commit statuses (i.e. don't\n # approve if CI was broken and didn't schedule a job)\n expected_jobs = {\"tvm-ci/branch\"}\n job_names = {name for name, status in unified_statuses}\n for job in expected_jobs:\n if job not in job_names:\n # Did not find expected job name\n return False\n\n passed_ci = all(status for name, status in unified_statuses)\n return passed_ci\n\n\ndef update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:\n git([\"fetch\", \"origin\", sha])\n git([\"reset\", \"--hard\", \"FETCH_HEAD\"])\n try:\n git([\"branch\", \"-D\", branch_name])\n except RuntimeError:\n # Ignore failures (i.e. the branch did not exist in the first place)\n pass\n git([\"checkout\", \"-b\", branch_name])\n\n # Create and push the branch\n git([\"push\", \"origin\", \"--force\", branch_name])\n print(f\"Pushed branch {branch_name} with commit {sha}\")\n\n\nif __name__ == \"__main__\":\n help = \"Push the a branch to the last commit that passed all CI runs\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--remote\", default=\"origin\", help=\"ssh remote to parse\")\n parser.add_argument(\"--dry-run\", action=\"store_true\", help=\"don't submit to GitHub\")\n parser.add_argument(\"--branch\", default=\"last-successful\", help=\"branch name\")\n parser.add_argument(\n \"--testonly-json\", help=\"(testing) data to use instead of fetching from GitHub\"\n )\n args = parser.parse_args()\n\n remote = git([\"config\", \"--get\", f\"remote.{args.remote}.url\"])\n user, repo = parse_remote(remote)\n # TODO: Remove this before landing\n user, repo = (\"apache\", \"tvm\")\n\n if args.testonly_json:\n r = json.loads(args.testonly_json)\n else:\n github = GitHubRepo(token=os.environ[\"GITHUB_TOKEN\"], user=user, repo=repo)\n q = commits_query(user, repo)\n r = github.graphql(q)\n\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Limit GraphQL pagination\n MAX_COMMITS_TO_CHECK = 50\n i = 0\n\n while i < MAX_COMMITS_TO_CHECK:\n # Check each commit\n for commit in commits:\n if commit_passed_ci(commit):\n print(f\"Found last good commit: {commit['oid']}: {commit['messageHeadline']}\")\n if not args.dry_run:\n update_branch(\n user=user,\n repo=repo,\n sha=commit[\"oid\"],\n branch_name=args.branch,\n )\n # Nothing to do after updating the branch, exit early\n exit(0)\n\n # No good commit found, proceed to next page of results\n edges = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"edges\"]\n if len(edges) == 0:\n break\n else:\n q = commits_query(user, repo, cursor=edges[-1][\"cursor\"])\n r = github.graphql(q)\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Backstop to prevent looking through all the past commits\n i += len(commits)\n\n print(f\"No good commits found in the last {len(commits)} commits\")\n exit(1)\n", "path": "ci/scripts/github/update_branch.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport json\nimport argparse\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n# Hackery to enable importing of utils from ci/scripts/jenkins\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent\nsys.path.append(str(REPO_ROOT / \"ci\" / \"scripts\" / \"jenkins\"))\n\nfrom git_utils import git, GitHubRepo, parse_remote\n\n\n_commit_query_fields = \"\"\"\n messageHeadline\n oid\n statusCheckRollup {\n contexts(last:100) {\n nodes {\n ... on CheckRun {\n conclusion\n status\n name\n checkSuite {\n workflowRun {\n workflow {\n name\n }\n }\n }\n }\n ... on StatusContext {\n context\n state\n }\n }\n }\n }\n\"\"\"\n\n\ndef commits_query(user: str, repo: str, cursor: str = None):\n \"\"\"\n Create a GraphQL query to find the last N commits along with their statuses\n and some metadata (paginated after 'cursor')\n \"\"\"\n after = \"\"\n if cursor is not None:\n after = f', after:\"{cursor}\"'\n\n return f\"\"\"\n {{\n repository(name: \"{repo}\", owner: \"{user}\") {{\n defaultBranchRef {{\n target {{\n ... on Commit {{\n history(first: 15{after}) {{\n edges {{ cursor }}\n nodes {{\n {_commit_query_fields}\n }}\n }}\n }}\n }}\n }}\n }}\n }}\n \"\"\"\n\n\nEXPECTED_CI_JOBS = [\n \"cross-isa-minimal/branch\",\n \"gpu/branch\",\n \"hexagon/branch\",\n \"arm/branch\",\n \"cortexm/branch\",\n \"cpu/branch\",\n \"docker/branch\",\n \"i386/branch\",\n \"lint/branch\",\n \"minimal/branch\",\n \"riscv/branch\",\n \"wasm/branch\",\n]\n\n\ndef commit_passed_ci(commit: Dict[str, Any]) -> bool:\n \"\"\"\n Returns true if all of a commit's statuses are SUCCESS\n \"\"\"\n statuses = commit[\"statusCheckRollup\"][\"contexts\"][\"nodes\"]\n\n # GitHub Actions statuses are different from external GitHub statuses, so\n # unify them into 1 representation\n # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads\n unified_statuses = []\n for status in statuses:\n if \"context\" in status:\n # Parse non-GHA status\n unified_statuses.append((status[\"context\"], status[\"state\"] == \"SUCCESS\"))\n else:\n # Parse GitHub Actions item\n workflow = status[\"checkSuite\"][\"workflowRun\"][\"workflow\"][\"name\"]\n name = f\"{workflow} / {status['name']}\"\n unified_statuses.append((name, status[\"conclusion\"] == \"SUCCESS\"))\n\n print(f\"Statuses on {commit['oid']}:\", json.dumps(unified_statuses, indent=2))\n\n # Assert that specific jobs are present in the commit statuses (i.e. don't\n # approve if CI was broken and didn't schedule a job)\n job_names = {name for name, status in unified_statuses}\n for job in EXPECTED_CI_JOBS:\n if job not in job_names:\n # Did not find expected job name\n return False\n\n passed_ci = all(status for name, status in unified_statuses)\n return passed_ci\n\n\ndef update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:\n git([\"fetch\", \"origin\", sha])\n git([\"reset\", \"--hard\", \"FETCH_HEAD\"])\n try:\n git([\"branch\", \"-D\", branch_name])\n except RuntimeError:\n # Ignore failures (i.e. the branch did not exist in the first place)\n pass\n git([\"checkout\", \"-b\", branch_name])\n\n # Create and push the branch\n git([\"push\", \"origin\", \"--force\", branch_name])\n print(f\"Pushed branch {branch_name} with commit {sha}\")\n\n\nif __name__ == \"__main__\":\n help = \"Push the a branch to the last commit that passed all CI runs\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--remote\", default=\"origin\", help=\"ssh remote to parse\")\n parser.add_argument(\"--dry-run\", action=\"store_true\", help=\"don't submit to GitHub\")\n parser.add_argument(\"--branch\", default=\"last-successful\", help=\"branch name\")\n parser.add_argument(\n \"--testonly-json\", help=\"(testing) data to use instead of fetching from GitHub\"\n )\n args = parser.parse_args()\n\n remote = git([\"config\", \"--get\", f\"remote.{args.remote}.url\"])\n user, repo = parse_remote(remote)\n # TODO: Remove this before landing\n user, repo = (\"apache\", \"tvm\")\n\n if args.testonly_json:\n r = json.loads(args.testonly_json)\n else:\n github = GitHubRepo(token=os.environ[\"GITHUB_TOKEN\"], user=user, repo=repo)\n q = commits_query(user, repo)\n r = github.graphql(q)\n\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Limit GraphQL pagination\n MAX_COMMITS_TO_CHECK = 50\n i = 0\n\n while i < MAX_COMMITS_TO_CHECK:\n # Check each commit\n for commit in commits:\n if commit_passed_ci(commit):\n print(f\"Found last good commit: {commit['oid']}: {commit['messageHeadline']}\")\n if not args.dry_run:\n update_branch(\n user=user,\n repo=repo,\n sha=commit[\"oid\"],\n branch_name=args.branch,\n )\n # Nothing to do after updating the branch, exit early\n exit(0)\n\n # No good commit found, proceed to next page of results\n edges = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"edges\"]\n if len(edges) == 0:\n break\n else:\n q = commits_query(user, repo, cursor=edges[-1][\"cursor\"])\n r = github.graphql(q)\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Backstop to prevent looking through all the past commits\n i += len(commits)\n\n print(f\"No good commits found in the last {len(commits)} commits\")\n exit(1)\n", "path": "ci/scripts/github/update_branch.py"}]}
| 2,357 | 307 |
gh_patches_debug_20714
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-5255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SpatialFullConvolution loaded from Lua doesn't have finput attribute
I am new to python,when i solve the promblem with the help below:
https://github.com/pytorch/pytorch/issues/897
I find some confusion in the code:
https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/JoinTable.py#L7
I set ‘dimension=1;self.dimension = dimension’,it seem ok for me,but i don’t kown how the value of ’dimension‘ was initialled.
Thank you !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/legacy/nn/SpatialFullConvolution.py`
Content:
```
1 import math
2 import torch
3 from .Module import Module
4 from .utils import clear
5
6
7 class SpatialFullConvolution(Module):
8
9 def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):
10 super(SpatialFullConvolution, self).__init__()
11
12 self.nInputPlane = nInputPlane
13 self.nOutputPlane = nOutputPlane
14 self.kW = kW
15 self.kH = kH
16 self.dW = dW
17 self.dH = dH
18 self.padW = padW
19 self.padH = padH if padH is not None else padW
20 self.adjW = adjW
21 self.adjH = adjH
22
23 if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:
24 raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')
25
26 self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
27 self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
28 self.bias = torch.Tensor(self.nOutputPlane)
29 self.gradBias = torch.Tensor(self.nOutputPlane)
30
31 self.ones = torch.Tensor()
32 self.finput = None
33 self.fgradInput = None
34 self.zeroScalar = None
35 self._gradOutput = None
36
37 self.reset()
38
39 def noBias(self):
40 self.bias = None
41 self.gradBias = None
42 return self
43
44 def reset(self, stdv=None):
45 if stdv is not None:
46 stdv = stdv * math.sqrt(3)
47 else:
48 nInputPlane = self.nInputPlane
49 kH = self.kH
50 kW = self.kW
51 stdv = 1 / math.sqrt(kW * kH * nInputPlane)
52
53 self.weight.uniform_(-stdv, stdv)
54 if self.bias is not None:
55 self.bias.uniform_(-stdv, stdv)
56
57 def _makeContiguous(self, input, gradOutput=None):
58 if not input.is_contiguous():
59 if self._input is None:
60 self._input = input.new()
61 self._input.resize_as_(input).copy_(input)
62 input = self._input
63
64 if gradOutput is not None:
65 if not gradOutput.is_contiguous():
66 if self._gradOutput is None:
67 self._gradOutput = gradOutput.new()
68 self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
69 gradOutput = self._gradOutput
70 return input, gradOutput
71
72 return input
73
74 def _calculateAdj(self, targetSize, ker, pad, stride):
75 return (targetSize + 2 * pad - ker) % stride
76
77 def updateOutput(self, input):
78 inputTensor = input
79 adjW, adjH = self.adjW, self.adjH
80
81 # The input can be a table where the second element indicates the target
82 # output size, in which case the adj factors are computed automatically
83 if isinstance(input, list):
84 inputTensor = input[0]
85 targetTensor = input[1]
86 tDims = targetTensor.dim()
87 tH = targetTensor.size(tDims - 2)
88 tW = targetTensor.size(tDims - 1)
89 adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
90 adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
91 if self.finput is None:
92 self.finput = input[0].new()
93 if self.fgradInput is None:
94 self.fgradInput = input[0].new()
95 else:
96 if self.finput is None:
97 self.finput = input.new()
98 if self.fgradInput is None:
99 self.fgradInput = input.new()
100
101 inputTensor = self._makeContiguous(inputTensor)
102 self._backend.SpatialFullConvolution_updateOutput(
103 self._backend.library_state,
104 inputTensor,
105 self.output,
106 self.weight,
107 self.bias,
108 self.finput,
109 self.fgradInput,
110 self.kW, self.kH,
111 self.dW, self.dH,
112 self.padW, self.padH,
113 adjW, adjH
114 )
115 return self.output
116
117 def updateGradInput(self, input, gradOutput):
118 if self.gradInput is None:
119 return
120 inputTensor = input
121 adjW, adjH = self.adjW, self.adjH
122
123 # The input can be a table where the second element indicates the target
124 # output size, in which case the adj factors are computed automatically
125 if isinstance(input, list):
126 inputTensor = input[0]
127 targetTensor = input[1]
128 tDims = targetTensor.dim()
129 tH = targetTensor.size(tDims - 2)
130 tW = targetTensor.size(tDims - 1)
131 adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
132 adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
133 # Momentarily extract the gradInput tensor
134 if isinstance(self.gradInput, list):
135 self.gradInput = self.gradInput[0]
136
137 inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
138 self._backend.SpatialFullConvolution_updateGradInput(
139 self._backend.library_state,
140 inputTensor,
141 gradOutput,
142 self.gradInput,
143 self.weight,
144 self.finput,
145 self.kW, self.kH,
146 self.dW, self.dH,
147 self.padW, self.padH,
148 adjW, adjH
149 )
150
151 if isinstance(input, list):
152 # Create a zero tensor to be expanded and used as gradInput[1].
153 if self.zeroScalar is None:
154 self.zeroScalar = input[1].new(1).zero_()
155 self.ones.resize_(input[1].dim()).fill_(1)
156 zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])
157 self.gradInput = [self.gradInput, zeroTensor]
158
159 return self.gradInput
160
161 def accGradParameters(self, input, gradOutput, scale=1):
162 inputTensor = input
163 adjW, adjH = self.adjW, self.adjH
164
165 # The input can be a table where the second element indicates the target
166 # output size, in which case the adj factors are computed automatically
167 if isinstance(inputTensor, list):
168 inputTensor = input[0]
169 targetTensor = input[1]
170 tDims = targetTensor.dim()
171 tH = targetTensor.size(tDims - 2)
172 tW = targetTensor.size(tDims - 1)
173 adjW = calculateAdj(tW, self.kW, self.padW, self.dW)
174 adjH = calculateAdj(tH, self.kH, self.padH, self.dH)
175
176 inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
177 self._backend.SpatialFullConvolution_accGradParameters(
178 self._backend.library_state,
179 inputTensor,
180 gradOutput,
181 self.gradWeight,
182 self.gradBias,
183 self.finput,
184 self.fgradInput,
185 self.kW, self.kH,
186 self.dW, self.dH,
187 self.padW, self.padH,
188 adjW, adjH,
189 scale
190 )
191
192 def type(self, type=None, tensorCache=None):
193 if self.finput is not None:
194 self.finput = torch.Tensor()
195 if self.fgradInput is not None:
196 self.fgradInput = torch.Tensor()
197 return super(SpatialFullConvolution, self).type(type, tensorCache)
198
199 def __repr__(self):
200 s = super(SpatialFullConvolution, self).__repr__()
201 s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)
202 if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
203 s += ', {}, {}'.format(self.dW, self.dH)
204
205 if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):
206 s += ', {}, {}'.format(self.padW, self.padH)
207
208 if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):
209 s += ', {}, {}'.format(self.adjW, self.adjH)
210
211 s += ')'
212 if self.bias is None:
213 s += ' without bias'
214 return s
215
216 def clearState(self):
217 clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
218 return super(SpatialFullConvolution, self).clearState()
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/legacy/nn/SpatialFullConvolution.py b/torch/legacy/nn/SpatialFullConvolution.py
--- a/torch/legacy/nn/SpatialFullConvolution.py
+++ b/torch/legacy/nn/SpatialFullConvolution.py
@@ -88,14 +88,14 @@
tW = targetTensor.size(tDims - 1)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
- if self.finput is None:
+ if not hasattr(self, 'finput') or self.finput is None:
self.finput = input[0].new()
- if self.fgradInput is None:
+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input[0].new()
else:
- if self.finput is None:
+ if not hasattr(self, 'finput') or self.finput is None:
self.finput = input.new()
- if self.fgradInput is None:
+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input.new()
inputTensor = self._makeContiguous(inputTensor)
|
{"golden_diff": "diff --git a/torch/legacy/nn/SpatialFullConvolution.py b/torch/legacy/nn/SpatialFullConvolution.py\n--- a/torch/legacy/nn/SpatialFullConvolution.py\n+++ b/torch/legacy/nn/SpatialFullConvolution.py\n@@ -88,14 +88,14 @@\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n- if self.finput is None:\n+ if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input[0].new()\n- if self.fgradInput is None:\n+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input[0].new()\n else:\n- if self.finput is None:\n+ if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input.new()\n- if self.fgradInput is None:\n+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input.new()\n \n inputTensor = self._makeContiguous(inputTensor)\n", "issue": "SpatialFullConvolution loaded from Lua doesn't have finput attribute\nI am new to python\uff0cwhen i solve the promblem with the help below\uff1a\r\nhttps://github.com/pytorch/pytorch/issues/897\r\n\r\nI find some confusion in the code\uff1a\r\nhttps://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/JoinTable.py#L7\r\n\r\nI set \u2018dimension=1\uff1bself.dimension = dimension\u2019\uff0cit seem ok for me\uff0cbut i don\u2019t kown how the value of \u2019dimension\u2018 was initialled. \r\n\r\nThank you !\n", "before_files": [{"content": "import math\nimport torch\nfrom .Module import Module\nfrom .utils import clear\n\n\nclass SpatialFullConvolution(Module):\n\n def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):\n super(SpatialFullConvolution, self).__init__()\n\n self.nInputPlane = nInputPlane\n self.nOutputPlane = nOutputPlane\n self.kW = kW\n self.kH = kH\n self.dW = dW\n self.dH = dH\n self.padW = padW\n self.padH = padH if padH is not None else padW\n self.adjW = adjW\n self.adjH = adjH\n\n if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:\n raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')\n\n self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.bias = torch.Tensor(self.nOutputPlane)\n self.gradBias = torch.Tensor(self.nOutputPlane)\n\n self.ones = torch.Tensor()\n self.finput = None\n self.fgradInput = None\n self.zeroScalar = None\n self._gradOutput = None\n\n self.reset()\n\n def noBias(self):\n self.bias = None\n self.gradBias = None\n return self\n\n def reset(self, stdv=None):\n if stdv is not None:\n stdv = stdv * math.sqrt(3)\n else:\n nInputPlane = self.nInputPlane\n kH = self.kH\n kW = self.kW\n stdv = 1 / math.sqrt(kW * kH * nInputPlane)\n\n self.weight.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.uniform_(-stdv, stdv)\n\n def _makeContiguous(self, input, gradOutput=None):\n if not input.is_contiguous():\n if self._input is None:\n self._input = input.new()\n self._input.resize_as_(input).copy_(input)\n input = self._input\n\n if gradOutput is not None:\n if not gradOutput.is_contiguous():\n if self._gradOutput is None:\n self._gradOutput = gradOutput.new()\n self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)\n gradOutput = self._gradOutput\n return input, gradOutput\n\n return input\n\n def _calculateAdj(self, targetSize, ker, pad, stride):\n return (targetSize + 2 * pad - ker) % stride\n\n def updateOutput(self, input):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n if self.finput is None:\n self.finput = input[0].new()\n if self.fgradInput is None:\n self.fgradInput = input[0].new()\n else:\n if self.finput is None:\n self.finput = input.new()\n if self.fgradInput is None:\n self.fgradInput = input.new()\n\n inputTensor = self._makeContiguous(inputTensor)\n self._backend.SpatialFullConvolution_updateOutput(\n self._backend.library_state,\n inputTensor,\n self.output,\n self.weight,\n self.bias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n if self.gradInput is None:\n return\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n # Momentarily extract the gradInput tensor\n if isinstance(self.gradInput, list):\n self.gradInput = self.gradInput[0]\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_updateGradInput(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradInput,\n self.weight,\n self.finput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n\n if isinstance(input, list):\n # Create a zero tensor to be expanded and used as gradInput[1].\n if self.zeroScalar is None:\n self.zeroScalar = input[1].new(1).zero_()\n self.ones.resize_(input[1].dim()).fill_(1)\n zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])\n self.gradInput = [self.gradInput, zeroTensor]\n\n return self.gradInput\n\n def accGradParameters(self, input, gradOutput, scale=1):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(inputTensor, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = calculateAdj(tH, self.kH, self.padH, self.dH)\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_accGradParameters(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradWeight,\n self.gradBias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH,\n scale\n )\n\n def type(self, type=None, tensorCache=None):\n if self.finput is not None:\n self.finput = torch.Tensor()\n if self.fgradInput is not None:\n self.fgradInput = torch.Tensor()\n return super(SpatialFullConvolution, self).type(type, tensorCache)\n\n def __repr__(self):\n s = super(SpatialFullConvolution, self).__repr__()\n s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)\n if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:\n s += ', {}, {}'.format(self.dW, self.dH)\n\n if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):\n s += ', {}, {}'.format(self.padW, self.padH)\n\n if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):\n s += ', {}, {}'.format(self.adjW, self.adjH)\n\n s += ')'\n if self.bias is None:\n s += ' without bias'\n return s\n\n def clearState(self):\n clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')\n return super(SpatialFullConvolution, self).clearState()\n", "path": "torch/legacy/nn/SpatialFullConvolution.py"}], "after_files": [{"content": "import math\nimport torch\nfrom .Module import Module\nfrom .utils import clear\n\n\nclass SpatialFullConvolution(Module):\n\n def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):\n super(SpatialFullConvolution, self).__init__()\n\n self.nInputPlane = nInputPlane\n self.nOutputPlane = nOutputPlane\n self.kW = kW\n self.kH = kH\n self.dW = dW\n self.dH = dH\n self.padW = padW\n self.padH = padH if padH is not None else padW\n self.adjW = adjW\n self.adjH = adjH\n\n if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:\n raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')\n\n self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.bias = torch.Tensor(self.nOutputPlane)\n self.gradBias = torch.Tensor(self.nOutputPlane)\n\n self.ones = torch.Tensor()\n self.finput = None\n self.fgradInput = None\n self.zeroScalar = None\n self._gradOutput = None\n\n self.reset()\n\n def noBias(self):\n self.bias = None\n self.gradBias = None\n return self\n\n def reset(self, stdv=None):\n if stdv is not None:\n stdv = stdv * math.sqrt(3)\n else:\n nInputPlane = self.nInputPlane\n kH = self.kH\n kW = self.kW\n stdv = 1 / math.sqrt(kW * kH * nInputPlane)\n\n self.weight.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.uniform_(-stdv, stdv)\n\n def _makeContiguous(self, input, gradOutput=None):\n if not input.is_contiguous():\n if self._input is None:\n self._input = input.new()\n self._input.resize_as_(input).copy_(input)\n input = self._input\n\n if gradOutput is not None:\n if not gradOutput.is_contiguous():\n if self._gradOutput is None:\n self._gradOutput = gradOutput.new()\n self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)\n gradOutput = self._gradOutput\n return input, gradOutput\n\n return input\n\n def _calculateAdj(self, targetSize, ker, pad, stride):\n return (targetSize + 2 * pad - ker) % stride\n\n def updateOutput(self, input):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input[0].new()\n if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input[0].new()\n else:\n if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input.new()\n if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input.new()\n\n inputTensor = self._makeContiguous(inputTensor)\n self._backend.SpatialFullConvolution_updateOutput(\n self._backend.library_state,\n inputTensor,\n self.output,\n self.weight,\n self.bias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n if self.gradInput is None:\n return\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n # Momentarily extract the gradInput tensor\n if isinstance(self.gradInput, list):\n self.gradInput = self.gradInput[0]\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_updateGradInput(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradInput,\n self.weight,\n self.finput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n\n if isinstance(input, list):\n # Create a zero tensor to be expanded and used as gradInput[1].\n if self.zeroScalar is None:\n self.zeroScalar = input[1].new(1).zero_()\n self.ones.resize_(input[1].dim()).fill_(1)\n zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])\n self.gradInput = [self.gradInput, zeroTensor]\n\n return self.gradInput\n\n def accGradParameters(self, input, gradOutput, scale=1):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(inputTensor, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = calculateAdj(tH, self.kH, self.padH, self.dH)\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_accGradParameters(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradWeight,\n self.gradBias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH,\n scale\n )\n\n def type(self, type=None, tensorCache=None):\n if self.finput is not None:\n self.finput = torch.Tensor()\n if self.fgradInput is not None:\n self.fgradInput = torch.Tensor()\n return super(SpatialFullConvolution, self).type(type, tensorCache)\n\n def __repr__(self):\n s = super(SpatialFullConvolution, self).__repr__()\n s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)\n if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:\n s += ', {}, {}'.format(self.dW, self.dH)\n\n if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):\n s += ', {}, {}'.format(self.padW, self.padH)\n\n if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):\n s += ', {}, {}'.format(self.adjW, self.adjH)\n\n s += ')'\n if self.bias is None:\n s += ' without bias'\n return s\n\n def clearState(self):\n clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')\n return super(SpatialFullConvolution, self).clearState()\n", "path": "torch/legacy/nn/SpatialFullConvolution.py"}]}
| 2,915 | 303 |
gh_patches_debug_10600
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1162
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception BO
feeder_electricity_1 | Exception while fetching production of BO
feeder_electricity_1 | Traceback (most recent call last):
feeder_electricity_1 | File "feeder_electricity.py", line 111, in fetch_production
feeder_electricity_1 | objs = parser(country_code, session)
feeder_electricity_1 | File "/home/feeder/lib_electricitymap/parsers/BO.py", line 149, in fetch_production
feeder_electricity_1 | sum(datapoint['production'].values()) != 0.0]):
feeder_electricity_1 | AttributeError: 'NoneType' object has no attribute 'values'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/BO.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # The arrow library is used to handle datetimes
4 import arrow
5 # The request library is used to fetch content through HTTP
6 import requests
7 # The numpy and pandas libraries are used to manipulate real time data
8 import pandas as pd
9 # The BeautifulSoup library is used parse web html
10 from bs4 import BeautifulSoup
11
12 tz_bo = 'America/La_Paz'
13
14 MAP_GENERATION = {
15 'hydro': 'Hidro',
16 'unknown': 'Termo',
17 'wind': 'Intermitentes'
18 }
19
20
21 def webparser(resp):
22 """Takes content from the corresponding webpage and returns the necessary outputs in a dataframe"""
23 # get the response as an html
24 soup = BeautifulSoup(resp.text, 'html.parser')
25 # Each variable correspond to a row
26 rows = soup.find_all("row")
27 # Extract the name of variables and position
28 variables = []
29 corresponding_row = []
30 hours = []
31 for i_row in range(len(rows)):
32 for tag in rows[i_row].find_all("string"):
33 if not tag.get_text().isdigit():
34 variables.append(tag.get_text().strip())
35 corresponding_row.append(i_row)
36 else:
37 hours.append(int(tag.get_text()))
38 # Define output frame
39 obj = pd.DataFrame(0, index=range(24), columns=['hour'] + variables)
40 # Fill it with hours and variables' value
41 obj.hour = hours
42 for i_row, row in enumerate(corresponding_row):
43 numbers = [float(numb.text) for numb in rows[row].find_all("number")]
44 for i_num, num in enumerate(numbers):
45 obj.loc[i_num, (variables[i_row])] = num
46 # Define negative values to NaN
47 obj[obj < 0] = 0
48
49 return obj
50
51
52 def fetch_hourly_production(country_code, obj, date):
53 """Returns a list of dictionaries."""
54
55 production_by_hour = []
56 for index, row in obj.iterrows():
57
58 data = {
59 'countryCode': country_code,
60 'production': {},
61 'storage': {},
62 'source': 'cndc.bo',
63 }
64 # Fill datetime variable
65 # Datetime are recorded from hour 1 to 24 in the web service
66 if row['hour'] == 24:
67 row['hour'] = 0
68 date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')
69 # date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')
70 data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,
71 hour=int(row['hour'])).datetime
72
73 # Fill production types
74 for i_type in MAP_GENERATION.keys():
75 try:
76 data['production'][i_type] = row[MAP_GENERATION[i_type]]
77 except KeyError as e:
78 data['production'] = None
79 break
80
81 production_by_hour.append(data)
82
83 return production_by_hour
84
85
86 def fetch_production(country_code='BO', session=None):
87 """
88 Requests the last known production mix (in MW) of a given country
89 Arguments:
90 country_code (optional) -- used in case a parser is able to fetch multiple countries
91 Return:
92 A dictionary in the form:
93 {
94 'countryCode': 'FR',
95 'datetime': '2017-01-01T00:00:00Z',
96 'production': {
97 'biomass': 0.0,
98 'coal': 0.0,
99 'gas': 0.0,
100 'hydro': 0.0,
101 'nuclear': null,
102 'oil': 0.0,
103 'solar': 0.0,
104 'wind': 0.0,
105 'geothermal': 0.0,
106 'unknown': 0.0
107 },
108 'storage': {
109 'hydro': -10.0,
110 },
111 'source': 'mysource.com'
112 }
113 """
114
115 # Define actual and previous day (for midnight data).
116 now = arrow.now(tz=tz_bo)
117 formatted_date = now.format('YYYY-MM-DD')
118 past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format(
119 'YYYY-MM-DD')
120
121 # initial path for url to request
122 url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='
123
124 # Start with data for previous day in order to get midnight data.
125 url = url_init + past_formatted_date
126 r = session or requests.session()
127 response = r.get(url)
128 obj = webparser(response)
129 data_yesterday = fetch_hourly_production(country_code, obj, past_formatted_date)
130
131 # Now get data for rest of today.
132 url = url_init + formatted_date
133 r = session or requests.session()
134 response = r.get(url)
135 obj = webparser(response)
136 data_today = fetch_hourly_production(country_code, obj, formatted_date)
137
138 data = data_yesterday + data_today
139
140 # Drop any datapoints where;
141 # 1) A type of generation is totally missing resulting in None.
142 # 2) Datapoint is in the future.
143 # 3) All production values are zero, this can happen because the data source
144 # updates ~5mins after the hour so condition 2 will pass.
145 valid_data = []
146 for datapoint in data:
147 if all([datapoint['production'] is not None,
148 now.datetime > datapoint['datetime'],
149 sum(datapoint['production'].values()) != 0.0]):
150 valid_data.append(datapoint)
151
152 return valid_data
153
154
155 def fetch_hourly_generation_forecast(country_code, obj, date):
156 """Returns a list of dictionaries."""
157
158 hourly_forecast = []
159 for index, row in obj.iterrows():
160 data = {
161 'countryCode': country_code,
162 'value': {},
163 'source': 'cndc.bo',
164 }
165
166 # Fill forecasted value
167 data['value'] = row['Gen.Prevista']
168
169 # Fill datetime variable - changing format if midnight (datetime are recorded from hour 1 to 24 in the webservice)
170 if row['hour'] == 24:
171 row['hour'] = 0
172 date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')
173 data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,
174 hour=int(row['hour'])).datetime
175
176 hourly_forecast.append(data)
177
178 return hourly_forecast
179
180
181 def fetch_generation_forecast(country_code='BO', session=None):
182 # Define actual and last day (for midnight data)
183 formatted_date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')
184
185 # initial path for url to request
186 url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='
187 url = url_init + formatted_date
188
189 r = session or requests.session()
190 response = r.get(url)
191 obj = webparser(response)
192 forecast = fetch_hourly_generation_forecast('BO', obj, formatted_date)
193
194 return forecast
195
196
197 if __name__ == '__main__':
198 """Main method, never used by the Electricity Map backend, but handy for testing."""
199 print('fetch_production() ->')
200 print(fetch_production())
201 print('fetch_generation_forecast() ->')
202 print(fetch_generation_forecast())
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/BO.py b/parsers/BO.py
--- a/parsers/BO.py
+++ b/parsers/BO.py
@@ -144,9 +144,13 @@
# updates ~5mins after the hour so condition 2 will pass.
valid_data = []
for datapoint in data:
- if all([datapoint['production'] is not None,
- now.datetime > datapoint['datetime'],
- sum(datapoint['production'].values()) != 0.0]):
+ if datapoint['production'] is None:
+ continue
+ elif now.datetime < datapoint['datetime']:
+ continue
+ elif sum(datapoint['production'].values()) == 0.0:
+ continue
+ else:
valid_data.append(datapoint)
return valid_data
|
{"golden_diff": "diff --git a/parsers/BO.py b/parsers/BO.py\n--- a/parsers/BO.py\n+++ b/parsers/BO.py\n@@ -144,9 +144,13 @@\n # updates ~5mins after the hour so condition 2 will pass.\n valid_data = []\n for datapoint in data:\n- if all([datapoint['production'] is not None,\n- now.datetime > datapoint['datetime'],\n- sum(datapoint['production'].values()) != 0.0]):\n+ if datapoint['production'] is None:\n+ continue\n+ elif now.datetime < datapoint['datetime']:\n+ continue\n+ elif sum(datapoint['production'].values()) == 0.0:\n+ continue\n+ else:\n valid_data.append(datapoint)\n \n return valid_data\n", "issue": "Exception BO\nfeeder_electricity_1 | Exception while fetching production of BO\r\nfeeder_electricity_1 | Traceback (most recent call last):\r\nfeeder_electricity_1 | File \"feeder_electricity.py\", line 111, in fetch_production\r\nfeeder_electricity_1 | objs = parser(country_code, session)\r\nfeeder_electricity_1 | File \"/home/feeder/lib_electricitymap/parsers/BO.py\", line 149, in fetch_production\r\nfeeder_electricity_1 | sum(datapoint['production'].values()) != 0.0]):\r\nfeeder_electricity_1 | AttributeError: 'NoneType' object has no attribute 'values'\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n# The numpy and pandas libraries are used to manipulate real time data\nimport pandas as pd\n# The BeautifulSoup library is used parse web html\nfrom bs4 import BeautifulSoup\n\ntz_bo = 'America/La_Paz'\n\nMAP_GENERATION = {\n 'hydro': 'Hidro',\n 'unknown': 'Termo',\n 'wind': 'Intermitentes'\n}\n\n\ndef webparser(resp):\n \"\"\"Takes content from the corresponding webpage and returns the necessary outputs in a dataframe\"\"\"\n # get the response as an html\n soup = BeautifulSoup(resp.text, 'html.parser')\n # Each variable correspond to a row\n rows = soup.find_all(\"row\")\n # Extract the name of variables and position\n variables = []\n corresponding_row = []\n hours = []\n for i_row in range(len(rows)):\n for tag in rows[i_row].find_all(\"string\"):\n if not tag.get_text().isdigit():\n variables.append(tag.get_text().strip())\n corresponding_row.append(i_row)\n else:\n hours.append(int(tag.get_text()))\n # Define output frame\n obj = pd.DataFrame(0, index=range(24), columns=['hour'] + variables)\n # Fill it with hours and variables' value\n obj.hour = hours\n for i_row, row in enumerate(corresponding_row):\n numbers = [float(numb.text) for numb in rows[row].find_all(\"number\")]\n for i_num, num in enumerate(numbers):\n obj.loc[i_num, (variables[i_row])] = num\n # Define negative values to NaN\n obj[obj < 0] = 0\n\n return obj\n\n\ndef fetch_hourly_production(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n production_by_hour = []\n for index, row in obj.iterrows():\n\n data = {\n 'countryCode': country_code,\n 'production': {},\n 'storage': {},\n 'source': 'cndc.bo',\n }\n # Fill datetime variable\n # Datetime are recorded from hour 1 to 24 in the web service\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n # date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n # Fill production types\n for i_type in MAP_GENERATION.keys():\n try:\n data['production'][i_type] = row[MAP_GENERATION[i_type]]\n except KeyError as e:\n data['production'] = None\n break\n\n production_by_hour.append(data)\n\n return production_by_hour\n\n\ndef fetch_production(country_code='BO', session=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'countryCode': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n # Define actual and previous day (for midnight data).\n now = arrow.now(tz=tz_bo)\n formatted_date = now.format('YYYY-MM-DD')\n past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format(\n 'YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n\n # Start with data for previous day in order to get midnight data.\n url = url_init + past_formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_yesterday = fetch_hourly_production(country_code, obj, past_formatted_date)\n\n # Now get data for rest of today.\n url = url_init + formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_today = fetch_hourly_production(country_code, obj, formatted_date)\n\n data = data_yesterday + data_today\n\n # Drop any datapoints where;\n # 1) A type of generation is totally missing resulting in None.\n # 2) Datapoint is in the future.\n # 3) All production values are zero, this can happen because the data source\n # updates ~5mins after the hour so condition 2 will pass.\n valid_data = []\n for datapoint in data:\n if all([datapoint['production'] is not None,\n now.datetime > datapoint['datetime'],\n sum(datapoint['production'].values()) != 0.0]):\n valid_data.append(datapoint)\n\n return valid_data\n\n\ndef fetch_hourly_generation_forecast(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n hourly_forecast = []\n for index, row in obj.iterrows():\n data = {\n 'countryCode': country_code,\n 'value': {},\n 'source': 'cndc.bo',\n }\n\n # Fill forecasted value\n data['value'] = row['Gen.Prevista']\n\n # Fill datetime variable - changing format if midnight (datetime are recorded from hour 1 to 24 in the webservice)\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n hourly_forecast.append(data)\n\n return hourly_forecast\n\n\ndef fetch_generation_forecast(country_code='BO', session=None):\n # Define actual and last day (for midnight data)\n formatted_date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n url = url_init + formatted_date\n\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n forecast = fetch_hourly_generation_forecast('BO', obj, formatted_date)\n\n return forecast\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_generation_forecast() ->')\n print(fetch_generation_forecast())\n", "path": "parsers/BO.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n# The numpy and pandas libraries are used to manipulate real time data\nimport pandas as pd\n# The BeautifulSoup library is used parse web html\nfrom bs4 import BeautifulSoup\n\ntz_bo = 'America/La_Paz'\n\nMAP_GENERATION = {\n 'hydro': 'Hidro',\n 'unknown': 'Termo',\n 'wind': 'Intermitentes'\n}\n\n\ndef webparser(resp):\n \"\"\"Takes content from the corresponding webpage and returns the necessary outputs in a dataframe\"\"\"\n # get the response as an html\n soup = BeautifulSoup(resp.text, 'html.parser')\n # Each variable correspond to a row\n rows = soup.find_all(\"row\")\n # Extract the name of variables and position\n variables = []\n corresponding_row = []\n hours = []\n for i_row in range(len(rows)):\n for tag in rows[i_row].find_all(\"string\"):\n if not tag.get_text().isdigit():\n variables.append(tag.get_text().strip())\n corresponding_row.append(i_row)\n else:\n hours.append(int(tag.get_text()))\n # Define output frame\n obj = pd.DataFrame(0, index=range(24), columns=['hour'] + variables)\n # Fill it with hours and variables' value\n obj.hour = hours\n for i_row, row in enumerate(corresponding_row):\n numbers = [float(numb.text) for numb in rows[row].find_all(\"number\")]\n for i_num, num in enumerate(numbers):\n obj.loc[i_num, (variables[i_row])] = num\n # Define negative values to NaN\n obj[obj < 0] = 0\n\n return obj\n\n\ndef fetch_hourly_production(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n production_by_hour = []\n for index, row in obj.iterrows():\n\n data = {\n 'countryCode': country_code,\n 'production': {},\n 'storage': {},\n 'source': 'cndc.bo',\n }\n # Fill datetime variable\n # Datetime are recorded from hour 1 to 24 in the web service\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n # date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n # Fill production types\n for i_type in MAP_GENERATION.keys():\n try:\n data['production'][i_type] = row[MAP_GENERATION[i_type]]\n except KeyError as e:\n data['production'] = None\n break\n\n production_by_hour.append(data)\n\n return production_by_hour\n\n\ndef fetch_production(country_code='BO', session=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'countryCode': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n # Define actual and previous day (for midnight data).\n now = arrow.now(tz=tz_bo)\n formatted_date = now.format('YYYY-MM-DD')\n past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format(\n 'YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n\n # Start with data for previous day in order to get midnight data.\n url = url_init + past_formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_yesterday = fetch_hourly_production(country_code, obj, past_formatted_date)\n\n # Now get data for rest of today.\n url = url_init + formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_today = fetch_hourly_production(country_code, obj, formatted_date)\n\n data = data_yesterday + data_today\n\n # Drop any datapoints where;\n # 1) A type of generation is totally missing resulting in None.\n # 2) Datapoint is in the future.\n # 3) All production values are zero, this can happen because the data source\n # updates ~5mins after the hour so condition 2 will pass.\n valid_data = []\n for datapoint in data:\n if datapoint['production'] is None:\n continue\n elif now.datetime < datapoint['datetime']:\n continue\n elif sum(datapoint['production'].values()) == 0.0:\n continue\n else:\n valid_data.append(datapoint)\n\n return valid_data\n\n\ndef fetch_hourly_generation_forecast(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n hourly_forecast = []\n for index, row in obj.iterrows():\n data = {\n 'countryCode': country_code,\n 'value': {},\n 'source': 'cndc.bo',\n }\n\n # Fill forecasted value\n data['value'] = row['Gen.Prevista']\n\n # Fill datetime variable - changing format if midnight (datetime are recorded from hour 1 to 24 in the webservice)\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n hourly_forecast.append(data)\n\n return hourly_forecast\n\n\ndef fetch_generation_forecast(country_code='BO', session=None):\n # Define actual and last day (for midnight data)\n formatted_date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n url = url_init + formatted_date\n\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n forecast = fetch_hourly_generation_forecast('BO', obj, formatted_date)\n\n return forecast\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_generation_forecast() ->')\n print(fetch_generation_forecast())\n", "path": "parsers/BO.py"}]}
| 2,595 | 187 |
gh_patches_debug_1702
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-331
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Figure out what to do with http_service on CI
We have two options:
- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;
- build the http_service without models and let it download models at runtime.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `http_service/check_models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7 import os
8 import sys
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13
14 logging.basicConfig(level=logging.INFO)
15 LOGGER = logging.getLogger()
16
17 MODELS = {
18 "defectenhancementtask": DefectEnhancementTaskModel,
19 "component": ComponentModel,
20 "regression": RegressionModel,
21 }
22 MODELS_DIR = os.path.join(os.path.dirname(__file__), "models")
23
24
25 def load_model(model):
26 model_file_path = os.path.join(MODELS_DIR, f"{model}model")
27 LOGGER.info(f"Lookup model in {model_file_path}")
28 model = MODELS[model].load(model_file_path)
29 return model
30
31
32 def check_models():
33 for model_name in MODELS.keys():
34 # Try loading the model
35 load_model(model_name)
36
37
38 if __name__ == "__main__":
39 try:
40 check_models()
41 except Exception:
42 LOGGER.warning(
43 "Failed to validate the models, please run `python models.py download`",
44 exc_info=True,
45 )
46 sys.exit(1)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/http_service/check_models.py b/http_service/check_models.py
--- a/http_service/check_models.py
+++ b/http_service/check_models.py
@@ -36,6 +36,13 @@
if __name__ == "__main__":
+
+ should_check_models = os.environ.get("CHECK_MODELS", "1")
+
+ if should_check_models == "0":
+ print("Skipping checking models as instructed by env var $CHECK_MODELS")
+ sys.exit(0)
+
try:
check_models()
except Exception:
|
{"golden_diff": "diff --git a/http_service/check_models.py b/http_service/check_models.py\n--- a/http_service/check_models.py\n+++ b/http_service/check_models.py\n@@ -36,6 +36,13 @@\n \n \n if __name__ == \"__main__\":\n+\n+ should_check_models = os.environ.get(\"CHECK_MODELS\", \"1\")\n+\n+ if should_check_models == \"0\":\n+ print(\"Skipping checking models as instructed by env var $CHECK_MODELS\")\n+ sys.exit(0)\n+\n try:\n check_models()\n except Exception:\n", "issue": "Figure out what to do with http_service on CI\nWe have two options:\r\n- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;\r\n- build the http_service without models and let it download models at runtime.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n\n should_check_models = os.environ.get(\"CHECK_MODELS\", \"1\")\n\n if should_check_models == \"0\":\n print(\"Skipping checking models as instructed by env var $CHECK_MODELS\")\n sys.exit(0)\n\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py"}]}
| 714 | 119 |
gh_patches_debug_26712
|
rasdani/github-patches
|
git_diff
|
dask__distributed-8447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`distributed.protocol.dumps` does not handle objects not serializable via `msgpack.dumps` as suggested by docstring
When using `distributed.protocol.dumps` to serialize arbitrary data, it fails and raises a `TypeError`. The docstring suggests it should be able to handle this.
**Minimal Complete Verifiable Example**:
```python
import numpy as np
from distributed.protocol import dumps
dumps(np.int64(123))
```
raises
```python
2024-01-09 19:55:04,572 - distributed.protocol.core - CRITICAL - Failed to Serialize
Traceback (most recent call last):
File "/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py", line 108, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py", line 36, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "msgpack/_packer.pyx", line 294, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 300, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 297, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 291, in msgpack._cmsgpack.Packer._pack
TypeError: can not serialize 'numpy.int64' object
Traceback (most recent call last):
File "/Users/hendrikmakait/projects/dask/distributed/sandbox.py", line 5, in <module>
dumps(np.int64(123))
File "/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py", line 108, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py", line 36, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "msgpack/_packer.pyx", line 294, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 300, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 297, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 291, in msgpack._cmsgpack.Packer._pack
TypeError: can not serialize 'numpy.int64' object
```
Relevant section of `dumps`'s docstring: https://github.com/dask/distributed/blob/7562f9c566978de4f3f5b73920a24ea1813d6e28/distributed/protocol/core.py#L30-L35
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/protocol/core.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4
5 import msgpack
6
7 from distributed.protocol import pickle
8 from distributed.protocol.compression import decompress, maybe_compress
9 from distributed.protocol.serialize import (
10 Pickled,
11 Serialize,
12 Serialized,
13 ToPickle,
14 merge_and_deserialize,
15 msgpack_decode_default,
16 msgpack_encode_default,
17 serialize_and_split,
18 )
19 from distributed.protocol.utils import msgpack_opts
20 from distributed.utils import ensure_memoryview
21
22 logger = logging.getLogger(__name__)
23
24
25 def dumps( # type: ignore[no-untyped-def]
26 msg, serializers=None, on_error="message", context=None, frame_split_size=None
27 ) -> list:
28 """Transform Python message to bytestream suitable for communication
29
30 Developer Notes
31 ---------------
32 The approach here is to use `msgpack.dumps()` to serialize `msg` and
33 write the result to the first output frame. If `msgpack.dumps()`
34 encounters an object it cannot serialize like a NumPy array, it is handled
35 out-of-band by `_encode_default()` and appended to the output frame list.
36 """
37 try:
38 if context and "compression" in context:
39 compress_opts = {"compression": context["compression"]}
40 else:
41 compress_opts = {}
42
43 def _inplace_compress_frames(header, frames):
44 compression = list(header.get("compression", [None] * len(frames)))
45
46 for i in range(len(frames)):
47 if compression[i] is None:
48 compression[i], frames[i] = maybe_compress(
49 frames[i], **compress_opts
50 )
51
52 header["compression"] = tuple(compression)
53
54 def create_serialized_sub_frames(obj: Serialized | Serialize) -> list:
55 if isinstance(obj, Serialized):
56 sub_header, sub_frames = obj.header, obj.frames
57 else:
58 sub_header, sub_frames = serialize_and_split(
59 obj,
60 serializers=serializers,
61 on_error=on_error,
62 context=context,
63 size=frame_split_size,
64 )
65 _inplace_compress_frames(sub_header, sub_frames)
66 sub_header["num-sub-frames"] = len(sub_frames)
67 sub_header = msgpack.dumps(
68 sub_header, default=msgpack_encode_default, use_bin_type=True
69 )
70 return [sub_header] + sub_frames
71
72 def create_pickled_sub_frames(obj: Pickled | ToPickle) -> list:
73 if isinstance(obj, Pickled):
74 sub_header, sub_frames = obj.header, obj.frames
75 else:
76 sub_frames = []
77 sub_header = {
78 "pickled-obj": pickle.dumps(
79 obj.data,
80 # In to support len() and slicing, we convert `PickleBuffer`
81 # objects to memoryviews of bytes.
82 buffer_callback=lambda x: sub_frames.append(
83 ensure_memoryview(x)
84 ),
85 )
86 }
87 _inplace_compress_frames(sub_header, sub_frames)
88
89 sub_header["num-sub-frames"] = len(sub_frames)
90 sub_header = msgpack.dumps(sub_header)
91 return [sub_header] + sub_frames
92
93 frames = [None]
94
95 def _encode_default(obj):
96 if isinstance(obj, (Serialize, Serialized)):
97 offset = len(frames)
98 frames.extend(create_serialized_sub_frames(obj))
99 return {"__Serialized__": offset}
100 elif isinstance(obj, (ToPickle, Pickled)):
101 offset = len(frames)
102 frames.extend(create_pickled_sub_frames(obj))
103 return {"__Pickled__": offset}
104 else:
105 return msgpack_encode_default(obj)
106
107 frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
108 return frames
109
110 except Exception:
111 logger.critical("Failed to Serialize", exc_info=True)
112 raise
113
114
115 def loads(frames, deserialize=True, deserializers=None):
116 """Transform bytestream back into Python value"""
117
118 try:
119
120 def _decode_default(obj):
121 offset = obj.get("__Serialized__", 0)
122 if offset > 0:
123 sub_header = msgpack.loads(
124 frames[offset],
125 object_hook=msgpack_decode_default,
126 use_list=False,
127 **msgpack_opts,
128 )
129 offset += 1
130 sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
131 if deserialize:
132 if "compression" in sub_header:
133 sub_frames = decompress(sub_header, sub_frames)
134 return merge_and_deserialize(
135 sub_header, sub_frames, deserializers=deserializers
136 )
137 else:
138 return Serialized(sub_header, sub_frames)
139
140 offset = obj.get("__Pickled__", 0)
141 if offset > 0:
142 sub_header = msgpack.loads(frames[offset])
143 offset += 1
144 sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
145 if "compression" in sub_header:
146 sub_frames = decompress(sub_header, sub_frames)
147 return pickle.loads(sub_header["pickled-obj"], buffers=sub_frames)
148 return msgpack_decode_default(obj)
149
150 return msgpack.loads(
151 frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts
152 )
153
154 except Exception:
155 logger.critical("Failed to deserialize", exc_info=True)
156 raise
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/protocol/core.py b/distributed/protocol/core.py
--- a/distributed/protocol/core.py
+++ b/distributed/protocol/core.py
@@ -11,6 +11,7 @@
Serialize,
Serialized,
ToPickle,
+ _is_msgpack_serializable,
merge_and_deserialize,
msgpack_decode_default,
msgpack_encode_default,
@@ -104,7 +105,31 @@
else:
return msgpack_encode_default(obj)
- frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
+ try:
+ frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
+ except TypeError as e:
+ logger.info(
+ f"Failed to serialize ({e}); falling back to pickle. "
+ "Be aware that this may degrade performance."
+ )
+
+ def _encode_default_safe(obj):
+ encoded = _encode_default(obj)
+ if encoded is not obj or _is_msgpack_serializable(obj):
+ return encoded
+
+ obj = ToPickle(obj)
+ offset = len(frames)
+ frames.extend(create_pickled_sub_frames(obj))
+ return {"__Pickled__": offset}
+
+ # If possible, we want to avoid the performance penalty from the checks
+ # implemented in _encode_default_safe to fall back to pickle, so we
+ # try to serialize the data without the fallback first assuming that
+ # this succeeds in the overwhelming majority of cases.
+ frames[0] = msgpack.dumps(
+ msg, default=_encode_default_safe, use_bin_type=True
+ )
return frames
except Exception:
|
{"golden_diff": "diff --git a/distributed/protocol/core.py b/distributed/protocol/core.py\n--- a/distributed/protocol/core.py\n+++ b/distributed/protocol/core.py\n@@ -11,6 +11,7 @@\n Serialize,\n Serialized,\n ToPickle,\n+ _is_msgpack_serializable,\n merge_and_deserialize,\n msgpack_decode_default,\n msgpack_encode_default,\n@@ -104,7 +105,31 @@\n else:\n return msgpack_encode_default(obj)\n \n- frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n+ try:\n+ frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n+ except TypeError as e:\n+ logger.info(\n+ f\"Failed to serialize ({e}); falling back to pickle. \"\n+ \"Be aware that this may degrade performance.\"\n+ )\n+\n+ def _encode_default_safe(obj):\n+ encoded = _encode_default(obj)\n+ if encoded is not obj or _is_msgpack_serializable(obj):\n+ return encoded\n+\n+ obj = ToPickle(obj)\n+ offset = len(frames)\n+ frames.extend(create_pickled_sub_frames(obj))\n+ return {\"__Pickled__\": offset}\n+\n+ # If possible, we want to avoid the performance penalty from the checks\n+ # implemented in _encode_default_safe to fall back to pickle, so we\n+ # try to serialize the data without the fallback first assuming that\n+ # this succeeds in the overwhelming majority of cases.\n+ frames[0] = msgpack.dumps(\n+ msg, default=_encode_default_safe, use_bin_type=True\n+ )\n return frames\n \n except Exception:\n", "issue": "`distributed.protocol.dumps` does not handle objects not serializable via `msgpack.dumps` as suggested by docstring\nWhen using `distributed.protocol.dumps` to serialize arbitrary data, it fails and raises a `TypeError`. The docstring suggests it should be able to handle this.\r\n\r\n**Minimal Complete Verifiable Example**:\r\n\r\n```python\r\nimport numpy as np\r\nfrom distributed.protocol import dumps\r\n\r\ndumps(np.int64(123))\r\n```\r\n\r\nraises\r\n\r\n```python\r\n2024-01-09 19:55:04,572 - distributed.protocol.core - CRITICAL - Failed to Serialize\r\nTraceback (most recent call last):\r\n File \"/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py\", line 108, in dumps\r\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py\", line 36, in packb\r\n return Packer(**kwargs).pack(o)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"msgpack/_packer.pyx\", line 294, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 300, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 297, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 291, in msgpack._cmsgpack.Packer._pack\r\nTypeError: can not serialize 'numpy.int64' object\r\nTraceback (most recent call last):\r\n File \"/Users/hendrikmakait/projects/dask/distributed/sandbox.py\", line 5, in <module>\r\n dumps(np.int64(123))\r\n File \"/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py\", line 108, in dumps\r\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py\", line 36, in packb\r\n return Packer(**kwargs).pack(o)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"msgpack/_packer.pyx\", line 294, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 300, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 297, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 291, in msgpack._cmsgpack.Packer._pack\r\nTypeError: can not serialize 'numpy.int64' object\r\n```\r\n\r\nRelevant section of `dumps`'s docstring: https://github.com/dask/distributed/blob/7562f9c566978de4f3f5b73920a24ea1813d6e28/distributed/protocol/core.py#L30-L35\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nimport msgpack\n\nfrom distributed.protocol import pickle\nfrom distributed.protocol.compression import decompress, maybe_compress\nfrom distributed.protocol.serialize import (\n Pickled,\n Serialize,\n Serialized,\n ToPickle,\n merge_and_deserialize,\n msgpack_decode_default,\n msgpack_encode_default,\n serialize_and_split,\n)\nfrom distributed.protocol.utils import msgpack_opts\nfrom distributed.utils import ensure_memoryview\n\nlogger = logging.getLogger(__name__)\n\n\ndef dumps( # type: ignore[no-untyped-def]\n msg, serializers=None, on_error=\"message\", context=None, frame_split_size=None\n) -> list:\n \"\"\"Transform Python message to bytestream suitable for communication\n\n Developer Notes\n ---------------\n The approach here is to use `msgpack.dumps()` to serialize `msg` and\n write the result to the first output frame. If `msgpack.dumps()`\n encounters an object it cannot serialize like a NumPy array, it is handled\n out-of-band by `_encode_default()` and appended to the output frame list.\n \"\"\"\n try:\n if context and \"compression\" in context:\n compress_opts = {\"compression\": context[\"compression\"]}\n else:\n compress_opts = {}\n\n def _inplace_compress_frames(header, frames):\n compression = list(header.get(\"compression\", [None] * len(frames)))\n\n for i in range(len(frames)):\n if compression[i] is None:\n compression[i], frames[i] = maybe_compress(\n frames[i], **compress_opts\n )\n\n header[\"compression\"] = tuple(compression)\n\n def create_serialized_sub_frames(obj: Serialized | Serialize) -> list:\n if isinstance(obj, Serialized):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_header, sub_frames = serialize_and_split(\n obj,\n serializers=serializers,\n on_error=on_error,\n context=context,\n size=frame_split_size,\n )\n _inplace_compress_frames(sub_header, sub_frames)\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(\n sub_header, default=msgpack_encode_default, use_bin_type=True\n )\n return [sub_header] + sub_frames\n\n def create_pickled_sub_frames(obj: Pickled | ToPickle) -> list:\n if isinstance(obj, Pickled):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_frames = []\n sub_header = {\n \"pickled-obj\": pickle.dumps(\n obj.data,\n # In to support len() and slicing, we convert `PickleBuffer`\n # objects to memoryviews of bytes.\n buffer_callback=lambda x: sub_frames.append(\n ensure_memoryview(x)\n ),\n )\n }\n _inplace_compress_frames(sub_header, sub_frames)\n\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(sub_header)\n return [sub_header] + sub_frames\n\n frames = [None]\n\n def _encode_default(obj):\n if isinstance(obj, (Serialize, Serialized)):\n offset = len(frames)\n frames.extend(create_serialized_sub_frames(obj))\n return {\"__Serialized__\": offset}\n elif isinstance(obj, (ToPickle, Pickled)):\n offset = len(frames)\n frames.extend(create_pickled_sub_frames(obj))\n return {\"__Pickled__\": offset}\n else:\n return msgpack_encode_default(obj)\n\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n return frames\n\n except Exception:\n logger.critical(\"Failed to Serialize\", exc_info=True)\n raise\n\n\ndef loads(frames, deserialize=True, deserializers=None):\n \"\"\"Transform bytestream back into Python value\"\"\"\n\n try:\n\n def _decode_default(obj):\n offset = obj.get(\"__Serialized__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(\n frames[offset],\n object_hook=msgpack_decode_default,\n use_list=False,\n **msgpack_opts,\n )\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if deserialize:\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return merge_and_deserialize(\n sub_header, sub_frames, deserializers=deserializers\n )\n else:\n return Serialized(sub_header, sub_frames)\n\n offset = obj.get(\"__Pickled__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(frames[offset])\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return pickle.loads(sub_header[\"pickled-obj\"], buffers=sub_frames)\n return msgpack_decode_default(obj)\n\n return msgpack.loads(\n frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts\n )\n\n except Exception:\n logger.critical(\"Failed to deserialize\", exc_info=True)\n raise\n", "path": "distributed/protocol/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nimport msgpack\n\nfrom distributed.protocol import pickle\nfrom distributed.protocol.compression import decompress, maybe_compress\nfrom distributed.protocol.serialize import (\n Pickled,\n Serialize,\n Serialized,\n ToPickle,\n _is_msgpack_serializable,\n merge_and_deserialize,\n msgpack_decode_default,\n msgpack_encode_default,\n serialize_and_split,\n)\nfrom distributed.protocol.utils import msgpack_opts\nfrom distributed.utils import ensure_memoryview\n\nlogger = logging.getLogger(__name__)\n\n\ndef dumps( # type: ignore[no-untyped-def]\n msg, serializers=None, on_error=\"message\", context=None, frame_split_size=None\n) -> list:\n \"\"\"Transform Python message to bytestream suitable for communication\n\n Developer Notes\n ---------------\n The approach here is to use `msgpack.dumps()` to serialize `msg` and\n write the result to the first output frame. If `msgpack.dumps()`\n encounters an object it cannot serialize like a NumPy array, it is handled\n out-of-band by `_encode_default()` and appended to the output frame list.\n \"\"\"\n try:\n if context and \"compression\" in context:\n compress_opts = {\"compression\": context[\"compression\"]}\n else:\n compress_opts = {}\n\n def _inplace_compress_frames(header, frames):\n compression = list(header.get(\"compression\", [None] * len(frames)))\n\n for i in range(len(frames)):\n if compression[i] is None:\n compression[i], frames[i] = maybe_compress(\n frames[i], **compress_opts\n )\n\n header[\"compression\"] = tuple(compression)\n\n def create_serialized_sub_frames(obj: Serialized | Serialize) -> list:\n if isinstance(obj, Serialized):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_header, sub_frames = serialize_and_split(\n obj,\n serializers=serializers,\n on_error=on_error,\n context=context,\n size=frame_split_size,\n )\n _inplace_compress_frames(sub_header, sub_frames)\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(\n sub_header, default=msgpack_encode_default, use_bin_type=True\n )\n return [sub_header] + sub_frames\n\n def create_pickled_sub_frames(obj: Pickled | ToPickle) -> list:\n if isinstance(obj, Pickled):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_frames = []\n sub_header = {\n \"pickled-obj\": pickle.dumps(\n obj.data,\n # In to support len() and slicing, we convert `PickleBuffer`\n # objects to memoryviews of bytes.\n buffer_callback=lambda x: sub_frames.append(\n ensure_memoryview(x)\n ),\n )\n }\n _inplace_compress_frames(sub_header, sub_frames)\n\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(sub_header)\n return [sub_header] + sub_frames\n\n frames = [None]\n\n def _encode_default(obj):\n if isinstance(obj, (Serialize, Serialized)):\n offset = len(frames)\n frames.extend(create_serialized_sub_frames(obj))\n return {\"__Serialized__\": offset}\n elif isinstance(obj, (ToPickle, Pickled)):\n offset = len(frames)\n frames.extend(create_pickled_sub_frames(obj))\n return {\"__Pickled__\": offset}\n else:\n return msgpack_encode_default(obj)\n\n try:\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n except TypeError as e:\n logger.info(\n f\"Failed to serialize ({e}); falling back to pickle. \"\n \"Be aware that this may degrade performance.\"\n )\n\n def _encode_default_safe(obj):\n encoded = _encode_default(obj)\n if encoded is not obj or _is_msgpack_serializable(obj):\n return encoded\n\n obj = ToPickle(obj)\n offset = len(frames)\n frames.extend(create_pickled_sub_frames(obj))\n return {\"__Pickled__\": offset}\n\n # If possible, we want to avoid the performance penalty from the checks\n # implemented in _encode_default_safe to fall back to pickle, so we\n # try to serialize the data without the fallback first assuming that\n # this succeeds in the overwhelming majority of cases.\n frames[0] = msgpack.dumps(\n msg, default=_encode_default_safe, use_bin_type=True\n )\n return frames\n\n except Exception:\n logger.critical(\"Failed to Serialize\", exc_info=True)\n raise\n\n\ndef loads(frames, deserialize=True, deserializers=None):\n \"\"\"Transform bytestream back into Python value\"\"\"\n\n try:\n\n def _decode_default(obj):\n offset = obj.get(\"__Serialized__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(\n frames[offset],\n object_hook=msgpack_decode_default,\n use_list=False,\n **msgpack_opts,\n )\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if deserialize:\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return merge_and_deserialize(\n sub_header, sub_frames, deserializers=deserializers\n )\n else:\n return Serialized(sub_header, sub_frames)\n\n offset = obj.get(\"__Pickled__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(frames[offset])\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return pickle.loads(sub_header[\"pickled-obj\"], buffers=sub_frames)\n return msgpack_decode_default(obj)\n\n return msgpack.loads(\n frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts\n )\n\n except Exception:\n logger.critical(\"Failed to deserialize\", exc_info=True)\n raise\n", "path": "distributed/protocol/core.py"}]}
| 2,523 | 382 |
gh_patches_debug_5353
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-1949
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"$" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute
**Describe the bug**
"$" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute

**To Reproduce**
Steps to reproduce the behavior:
1. Add a Datasource in the CLI
2. Choose a database, answer one of the prompts (e.g., password) with a string that contains the "$" character
3. See the error
**Expected behavior**
The "$" character should not be treated specially in user's answer to prompts
**Environment (please complete the following information):**
- OS: N/A
- GE Version: 0.12.1
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/data_context/util.py`
Content:
```
1 import copy
2 import inspect
3 import logging
4 import os
5 import re
6 from collections import OrderedDict
7
8 from great_expectations.data_context.types.base import (
9 DataContextConfig,
10 DataContextConfigSchema,
11 )
12 from great_expectations.exceptions import MissingConfigVariableError
13 from great_expectations.util import load_class, verify_dynamic_loading_support
14
15 logger = logging.getLogger(__name__)
16
17
18 # TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default
19 # TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.
20 def instantiate_class_from_config(config, runtime_environment, config_defaults=None):
21 """Build a GE class from configuration dictionaries."""
22
23 if config_defaults is None:
24 config_defaults = {}
25
26 config = copy.deepcopy(config)
27
28 module_name = config.pop("module_name", None)
29 if module_name is None:
30 try:
31 module_name = config_defaults.pop("module_name")
32 except KeyError:
33 raise KeyError(
34 "Neither config : {} nor config_defaults : {} contains a module_name key.".format(
35 config, config_defaults,
36 )
37 )
38 else:
39 # Pop the value without using it, to avoid sending an unwanted value to the config_class
40 config_defaults.pop("module_name", None)
41
42 verify_dynamic_loading_support(module_name=module_name)
43
44 class_name = config.pop("class_name", None)
45 if class_name is None:
46 logger.warning(
47 "Instantiating class from config without an explicit class_name is dangerous. Consider adding "
48 "an explicit class_name for %s" % config.get("name")
49 )
50 try:
51 class_name = config_defaults.pop("class_name")
52 except KeyError:
53 raise KeyError(
54 "Neither config : {} nor config_defaults : {} contains a class_name key.".format(
55 config, config_defaults,
56 )
57 )
58 else:
59 # Pop the value without using it, to avoid sending an unwanted value to the config_class
60 config_defaults.pop("class_name", None)
61
62 class_ = load_class(class_name=class_name, module_name=module_name)
63
64 config_with_defaults = copy.deepcopy(config_defaults)
65 config_with_defaults.update(config)
66 if runtime_environment is not None:
67 # If there are additional kwargs available in the runtime_environment requested by a
68 # class to be instantiated, provide them
69 argspec = inspect.getfullargspec(class_.__init__)[0][1:]
70
71 missing_args = set(argspec) - set(config_with_defaults.keys())
72 config_with_defaults.update(
73 {
74 missing_arg: runtime_environment[missing_arg]
75 for missing_arg in missing_args
76 if missing_arg in runtime_environment
77 }
78 )
79 # Add the entire runtime_environment as well if it's requested
80 if "runtime_environment" in missing_args:
81 config_with_defaults.update({"runtime_environment": runtime_environment})
82
83 try:
84 class_instance = class_(**config_with_defaults)
85 except TypeError as e:
86 raise TypeError(
87 "Couldn't instantiate class : {} with config : \n\t{}\n \n".format(
88 class_name, format_dict_for_error_message(config_with_defaults)
89 )
90 + str(e)
91 )
92
93 return class_instance
94
95
96 def format_dict_for_error_message(dict_):
97 # TODO : Tidy this up a bit. Indentation isn't fully consistent.
98
99 return "\n\t".join("\t\t".join((str(key), str(dict_[key]))) for key in dict_)
100
101
102 def substitute_config_variable(template_str, config_variables_dict):
103 """
104 This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,
105 returns a string where the pattern is replaced with the value of SOME_VARIABLE,
106 otherwise returns the string unchanged.
107
108 If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.
109 If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).
110 If it is not found there, the input string is returned as is.
111
112 :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}
113 or $SOME_VARIABLE
114 :param config_variables_dict: a dictionary of config variables. It is loaded from the
115 config variables store (by default, "uncommitted/config_variables.yml file)
116 :return:
117 """
118 if template_str is None:
119 return template_str
120
121 try:
122 match = re.search(r"\$\{(.*?)\}", template_str) or re.search(
123 r"\$([_a-z][_a-z0-9]*)", template_str
124 )
125 except TypeError:
126 # If the value is not a string (e.g., a boolean), we should return it as is
127 return template_str
128
129 if match:
130 config_variable_value = config_variables_dict.get(match.group(1))
131 if config_variable_value is not None:
132 if match.start() == 0 and match.end() == len(template_str):
133 return config_variable_value
134 else:
135 return (
136 template_str[: match.start()]
137 + config_variable_value
138 + template_str[match.end() :]
139 )
140
141 raise MissingConfigVariableError(
142 f"""\n\nUnable to find a match for config substitution variable: `{match.group(1)}`.
143 Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
144 See https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets""",
145 missing_config_variable=match.group(1),
146 )
147
148 return template_str
149
150
151 def substitute_all_config_variables(data, replace_variables_dict):
152 """
153 Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like
154 config object for their values.
155
156 The method traverses the dictionary recursively.
157
158 :param data:
159 :param replace_variables_dict:
160 :return: a dictionary with all the variables replaced with their values
161 """
162 if isinstance(data, DataContextConfig):
163 data = DataContextConfigSchema().dump(data)
164
165 if isinstance(data, dict) or isinstance(data, OrderedDict):
166 return {
167 k: substitute_all_config_variables(v, replace_variables_dict)
168 for k, v in data.items()
169 }
170 elif isinstance(data, list):
171 return [
172 substitute_all_config_variables(v, replace_variables_dict) for v in data
173 ]
174 return substitute_config_variable(data, replace_variables_dict)
175
176
177 def file_relative_path(dunderfile, relative_path):
178 """
179 This function is useful when one needs to load a file that is
180 relative to the position of the current file. (Such as when
181 you encode a configuration file path in source file and want
182 in runnable in any current working directory)
183
184 It is meant to be used like the following:
185 file_relative_path(__file__, 'path/relative/to/file')
186
187 H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34
188 """
189 return os.path.join(os.path.dirname(dunderfile), relative_path)
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py
--- a/great_expectations/data_context/util.py
+++ b/great_expectations/data_context/util.py
@@ -165,6 +165,8 @@
if isinstance(data, dict) or isinstance(data, OrderedDict):
return {
k: substitute_all_config_variables(v, replace_variables_dict)
+ if k.lower() not in {"password"}
+ else v
for k, v in data.items()
}
elif isinstance(data, list):
|
{"golden_diff": "diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py\n--- a/great_expectations/data_context/util.py\n+++ b/great_expectations/data_context/util.py\n@@ -165,6 +165,8 @@\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n+ if k.lower() not in {\"password\"}\n+ else v\n for k, v in data.items()\n }\n elif isinstance(data, list):\n", "issue": "\"$\" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute\n**Describe the bug**\r\n\"$\" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute\r\n\r\n\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Add a Datasource in the CLI\r\n2. Choose a database, answer one of the prompts (e.g., password) with a string that contains the \"$\" character\r\n3. See the error\r\n\r\n**Expected behavior**\r\nThe \"$\" character should not be treated specially in user's answer to prompts\r\n\r\n**Environment (please complete the following information):**\r\n - OS: N/A\r\n - GE Version: 0.12.1\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "import copy\nimport inspect\nimport logging\nimport os\nimport re\nfrom collections import OrderedDict\n\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n DataContextConfigSchema,\n)\nfrom great_expectations.exceptions import MissingConfigVariableError\nfrom great_expectations.util import load_class, verify_dynamic_loading_support\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default\n# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.\ndef instantiate_class_from_config(config, runtime_environment, config_defaults=None):\n \"\"\"Build a GE class from configuration dictionaries.\"\"\"\n\n if config_defaults is None:\n config_defaults = {}\n\n config = copy.deepcopy(config)\n\n module_name = config.pop(\"module_name\", None)\n if module_name is None:\n try:\n module_name = config_defaults.pop(\"module_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a module_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"module_name\", None)\n\n verify_dynamic_loading_support(module_name=module_name)\n\n class_name = config.pop(\"class_name\", None)\n if class_name is None:\n logger.warning(\n \"Instantiating class from config without an explicit class_name is dangerous. Consider adding \"\n \"an explicit class_name for %s\" % config.get(\"name\")\n )\n try:\n class_name = config_defaults.pop(\"class_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a class_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"class_name\", None)\n\n class_ = load_class(class_name=class_name, module_name=module_name)\n\n config_with_defaults = copy.deepcopy(config_defaults)\n config_with_defaults.update(config)\n if runtime_environment is not None:\n # If there are additional kwargs available in the runtime_environment requested by a\n # class to be instantiated, provide them\n argspec = inspect.getfullargspec(class_.__init__)[0][1:]\n\n missing_args = set(argspec) - set(config_with_defaults.keys())\n config_with_defaults.update(\n {\n missing_arg: runtime_environment[missing_arg]\n for missing_arg in missing_args\n if missing_arg in runtime_environment\n }\n )\n # Add the entire runtime_environment as well if it's requested\n if \"runtime_environment\" in missing_args:\n config_with_defaults.update({\"runtime_environment\": runtime_environment})\n\n try:\n class_instance = class_(**config_with_defaults)\n except TypeError as e:\n raise TypeError(\n \"Couldn't instantiate class : {} with config : \\n\\t{}\\n \\n\".format(\n class_name, format_dict_for_error_message(config_with_defaults)\n )\n + str(e)\n )\n\n return class_instance\n\n\ndef format_dict_for_error_message(dict_):\n # TODO : Tidy this up a bit. Indentation isn't fully consistent.\n\n return \"\\n\\t\".join(\"\\t\\t\".join((str(key), str(dict_[key]))) for key in dict_)\n\n\ndef substitute_config_variable(template_str, config_variables_dict):\n \"\"\"\n This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,\n returns a string where the pattern is replaced with the value of SOME_VARIABLE,\n otherwise returns the string unchanged.\n\n If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.\n If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).\n If it is not found there, the input string is returned as is.\n\n :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}\n or $SOME_VARIABLE\n :param config_variables_dict: a dictionary of config variables. It is loaded from the\n config variables store (by default, \"uncommitted/config_variables.yml file)\n :return:\n \"\"\"\n if template_str is None:\n return template_str\n\n try:\n match = re.search(r\"\\$\\{(.*?)\\}\", template_str) or re.search(\n r\"\\$([_a-z][_a-z0-9]*)\", template_str\n )\n except TypeError:\n # If the value is not a string (e.g., a boolean), we should return it as is\n return template_str\n\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n if config_variable_value is not None:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n return (\n template_str[: match.start()]\n + config_variable_value\n + template_str[match.end() :]\n )\n\n raise MissingConfigVariableError(\n f\"\"\"\\n\\nUnable to find a match for config substitution variable: `{match.group(1)}`.\nPlease add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.\nSee https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\",\n missing_config_variable=match.group(1),\n )\n\n return template_str\n\n\ndef substitute_all_config_variables(data, replace_variables_dict):\n \"\"\"\n Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like\n config object for their values.\n\n The method traverses the dictionary recursively.\n\n :param data:\n :param replace_variables_dict:\n :return: a dictionary with all the variables replaced with their values\n \"\"\"\n if isinstance(data, DataContextConfig):\n data = DataContextConfigSchema().dump(data)\n\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n for k, v in data.items()\n }\n elif isinstance(data, list):\n return [\n substitute_all_config_variables(v, replace_variables_dict) for v in data\n ]\n return substitute_config_variable(data, replace_variables_dict)\n\n\ndef file_relative_path(dunderfile, relative_path):\n \"\"\"\n This function is useful when one needs to load a file that is\n relative to the position of the current file. (Such as when\n you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n It is meant to be used like the following:\n file_relative_path(__file__, 'path/relative/to/file')\n\n H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34\n \"\"\"\n return os.path.join(os.path.dirname(dunderfile), relative_path)\n", "path": "great_expectations/data_context/util.py"}], "after_files": [{"content": "import copy\nimport inspect\nimport logging\nimport os\nimport re\nfrom collections import OrderedDict\n\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n DataContextConfigSchema,\n)\nfrom great_expectations.exceptions import MissingConfigVariableError\nfrom great_expectations.util import load_class, verify_dynamic_loading_support\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default\n# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.\ndef instantiate_class_from_config(config, runtime_environment, config_defaults=None):\n \"\"\"Build a GE class from configuration dictionaries.\"\"\"\n\n if config_defaults is None:\n config_defaults = {}\n\n config = copy.deepcopy(config)\n\n module_name = config.pop(\"module_name\", None)\n if module_name is None:\n try:\n module_name = config_defaults.pop(\"module_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a module_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"module_name\", None)\n\n verify_dynamic_loading_support(module_name=module_name)\n\n class_name = config.pop(\"class_name\", None)\n if class_name is None:\n logger.warning(\n \"Instantiating class from config without an explicit class_name is dangerous. Consider adding \"\n \"an explicit class_name for %s\" % config.get(\"name\")\n )\n try:\n class_name = config_defaults.pop(\"class_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a class_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"class_name\", None)\n\n class_ = load_class(class_name=class_name, module_name=module_name)\n\n config_with_defaults = copy.deepcopy(config_defaults)\n config_with_defaults.update(config)\n if runtime_environment is not None:\n # If there are additional kwargs available in the runtime_environment requested by a\n # class to be instantiated, provide them\n argspec = inspect.getfullargspec(class_.__init__)[0][1:]\n\n missing_args = set(argspec) - set(config_with_defaults.keys())\n config_with_defaults.update(\n {\n missing_arg: runtime_environment[missing_arg]\n for missing_arg in missing_args\n if missing_arg in runtime_environment\n }\n )\n # Add the entire runtime_environment as well if it's requested\n if \"runtime_environment\" in missing_args:\n config_with_defaults.update({\"runtime_environment\": runtime_environment})\n\n try:\n class_instance = class_(**config_with_defaults)\n except TypeError as e:\n raise TypeError(\n \"Couldn't instantiate class : {} with config : \\n\\t{}\\n \\n\".format(\n class_name, format_dict_for_error_message(config_with_defaults)\n )\n + str(e)\n )\n\n return class_instance\n\n\ndef format_dict_for_error_message(dict_):\n # TODO : Tidy this up a bit. Indentation isn't fully consistent.\n\n return \"\\n\\t\".join(\"\\t\\t\".join((str(key), str(dict_[key]))) for key in dict_)\n\n\ndef substitute_config_variable(template_str, config_variables_dict):\n \"\"\"\n This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,\n returns a string where the pattern is replaced with the value of SOME_VARIABLE,\n otherwise returns the string unchanged.\n\n If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.\n If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).\n If it is not found there, the input string is returned as is.\n\n :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}\n or $SOME_VARIABLE\n :param config_variables_dict: a dictionary of config variables. It is loaded from the\n config variables store (by default, \"uncommitted/config_variables.yml file)\n :return:\n \"\"\"\n if template_str is None:\n return template_str\n\n try:\n match = re.search(r\"\\$\\{(.*?)\\}\", template_str) or re.search(\n r\"\\$([_a-z][_a-z0-9]*)\", template_str\n )\n except TypeError:\n # If the value is not a string (e.g., a boolean), we should return it as is\n return template_str\n\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n if config_variable_value is not None:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n return (\n template_str[: match.start()]\n + config_variable_value\n + template_str[match.end() :]\n )\n\n raise MissingConfigVariableError(\n f\"\"\"\\n\\nUnable to find a match for config substitution variable: `{match.group(1)}`.\nPlease add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.\nSee https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\",\n missing_config_variable=match.group(1),\n )\n\n return template_str\n\n\ndef substitute_all_config_variables(data, replace_variables_dict):\n \"\"\"\n Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like\n config object for their values.\n\n The method traverses the dictionary recursively.\n\n :param data:\n :param replace_variables_dict:\n :return: a dictionary with all the variables replaced with their values\n \"\"\"\n if isinstance(data, DataContextConfig):\n data = DataContextConfigSchema().dump(data)\n\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n if k.lower() not in {\"password\"}\n else v\n for k, v in data.items()\n }\n elif isinstance(data, list):\n return [\n substitute_all_config_variables(v, replace_variables_dict) for v in data\n ]\n return substitute_config_variable(data, replace_variables_dict)\n\n\ndef file_relative_path(dunderfile, relative_path):\n \"\"\"\n This function is useful when one needs to load a file that is\n relative to the position of the current file. (Such as when\n you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n It is meant to be used like the following:\n file_relative_path(__file__, 'path/relative/to/file')\n\n H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34\n \"\"\"\n return os.path.join(os.path.dirname(dunderfile), relative_path)\n", "path": "great_expectations/data_context/util.py"}]}
| 2,493 | 121 |
gh_patches_debug_25429
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-6524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix test_sdists test farm test
`tests/letstest/scripts/test_sdists.sh` which we currently run as part of the release process is now failing. I think this should be fixed quickly so it doesn't cause us problems whenever we go to do our next release. The cause of the failure is that newer versions of `pytest` cannot be installed using old versions of `setuptools`.
To resolve this, we either need to use a newer version of `setuptools` or an older version of `pytest`. My thought is to upgrade `setuptools` because it should prevent problems with other packages in the future and our tests should arguably work with the latest version of every package installed, however, either approach should work.
If we upgrade `setuptools`, we need to decide where to do it. My thought would be to do it somewhere in `tools/_venv_common.py` or the scripts it calls so other code that uses `tools/_venv_common.py` would benefit. (It also looks like `tests/letstest/scripts/test_sdists.sh` still uses the old name `tools/_venv_common.sh` and needs to be updated.) `setuptools>=30.3` seems to be [required](https://github.com/pytest-dev/pytest/commit/d4351ac5a287eb96521f09618bbbe1c73f4e6884#diff-2eeaed663bd0d25b7e608891384b7298).
If we decided to pin back `pytest` (and maybe other packages) instead, I don't have an opinion on the approach.
@ohemorange or @sydneyli, are you interested in taking this? I initially planned on doing it myself but haven't managed to get to it yet and think it's important. It shouldn't be much work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/_venv_common.py`
Content:
```
1 #!/usr/bin/env python
2 """Aids in creating a developer virtual environment for Certbot.
3
4 When this module is run as a script, it takes the arguments that should
5 be passed to pip to install the Certbot packages as command line
6 arguments. The virtual environment will be created with the name "venv"
7 in the current working directory and will use the default version of
8 Python for the virtualenv executable in your PATH. You can change the
9 name of the virtual environment by setting the environment variable
10 VENV_NAME.
11 """
12
13 from __future__ import print_function
14
15 import os
16 import shutil
17 import glob
18 import time
19 import subprocess
20 import sys
21 import re
22 import shlex
23
24 VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$')
25
26
27 class PythonExecutableNotFoundError(Exception):
28 pass
29
30
31 def find_python_executable(python_major):
32 # type: (int) -> str
33 """
34 Find the relevant python executable that is of the given python major version.
35 Will test, in decreasing priority order:
36 * the current Python interpreter
37 * 'pythonX' executable in PATH (with X the given major version) if available
38 * 'python' executable in PATH if available
39 * Windows Python launcher 'py' executable in PATH if available
40 Incompatible python versions for Certbot will be evicted (eg. Python < 3.5 on Windows)
41 :param int python_major: the Python major version to target (2 or 3)
42 :rtype: str
43 :return: the relevant python executable path
44 :raise RuntimeError: if no relevant python executable path could be found
45 """
46 python_executable_path = None
47
48 # First try, current python executable
49 if _check_version('{0}.{1}.{2}'.format(
50 sys.version_info[0], sys.version_info[1], sys.version_info[2]), python_major):
51 return sys.executable
52
53 # Second try, with python executables in path
54 versions_to_test = ['2.7', '2', ''] if python_major == 2 else ['3', '']
55 for one_version in versions_to_test:
56 try:
57 one_python = 'python{0}'.format(one_version)
58 output = subprocess.check_output([one_python, '--version'],
59 universal_newlines=True, stderr=subprocess.STDOUT)
60 if _check_version(output.strip().split()[1], python_major):
61 return subprocess.check_output([one_python, '-c',
62 'import sys; sys.stdout.write(sys.executable);'],
63 universal_newlines=True)
64 except (subprocess.CalledProcessError, OSError):
65 pass
66
67 # Last try, with Windows Python launcher
68 try:
69 env_arg = '-{0}'.format(python_major)
70 output_version = subprocess.check_output(['py', env_arg, '--version'],
71 universal_newlines=True, stderr=subprocess.STDOUT)
72 if _check_version(output_version.strip().split()[1], python_major):
73 return subprocess.check_output(['py', env_arg, '-c',
74 'import sys; sys.stdout.write(sys.executable);'],
75 universal_newlines=True)
76 except (subprocess.CalledProcessError, OSError):
77 pass
78
79 if not python_executable_path:
80 raise RuntimeError('Error, no compatible Python {0} executable for Certbot could be found.'
81 .format(python_major))
82
83
84 def _check_version(version_str, major_version):
85 search = VERSION_PATTERN.search(version_str)
86
87 if not search:
88 return False
89
90 version = (int(search.group(1)), int(search.group(2)))
91
92 minimal_version_supported = (2, 7)
93 if major_version == 3 and os.name == 'nt':
94 minimal_version_supported = (3, 5)
95 elif major_version == 3:
96 minimal_version_supported = (3, 4)
97
98 if version >= minimal_version_supported:
99 return True
100
101 print('Incompatible python version for Certbot found: {0}'.format(version_str))
102 return False
103
104
105 def subprocess_with_print(cmd, env=os.environ, shell=False):
106 print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
107 subprocess.check_call(cmd, env=env, shell=shell)
108
109
110 def get_venv_bin_path(venv_path):
111 python_linux = os.path.join(venv_path, 'bin/python')
112 if os.path.isfile(python_linux):
113 return os.path.abspath(os.path.dirname(python_linux))
114 python_windows = os.path.join(venv_path, 'Scripts\\python.exe')
115 if os.path.isfile(python_windows):
116 return os.path.abspath(os.path.dirname(python_windows))
117
118 raise ValueError((
119 'Error, could not find python executable in venv path {0}: is it a valid venv ?'
120 .format(venv_path)))
121
122
123 def main(venv_name, venv_args, args):
124 """Creates a virtual environment and installs packages.
125
126 :param str venv_name: The name or path at where the virtual
127 environment should be created.
128 :param str venv_args: Command line arguments for virtualenv
129 :param str args: Command line arguments that should be given to pip
130 to install packages
131 """
132
133 for path in glob.glob('*.egg-info'):
134 if os.path.isdir(path):
135 shutil.rmtree(path)
136 else:
137 os.remove(path)
138
139 env_venv_name = os.environ.get('VENV_NAME')
140 if env_venv_name:
141 print('Creating venv at {0}'
142 ' as specified in VENV_NAME'.format(env_venv_name))
143 venv_name = env_venv_name
144
145 if os.path.isdir(venv_name):
146 os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
147
148 command = [sys.executable, '-m', 'virtualenv', '--no-site-packages', '--setuptools', venv_name]
149 command.extend(shlex.split(venv_args))
150 subprocess_with_print(command)
151
152 # We execute the two following commands in the context of the virtual environment, to install
153 # the packages in it. To do so, we append the venv binary to the PATH that will be used for
154 # these commands. With this trick, correct python executable will be selected.
155 new_environ = os.environ.copy()
156 new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])
157 subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),
158 env=new_environ, shell=True)
159 subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),
160 env=new_environ, shell=True)
161
162 if os.path.isdir(os.path.join(venv_name, 'bin')):
163 # Linux/OSX specific
164 print('-------------------------------------------------------------------')
165 print('Please run the following command to activate developer environment:')
166 print('source {0}/bin/activate'.format(venv_name))
167 print('-------------------------------------------------------------------')
168 elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
169 # Windows specific
170 print('---------------------------------------------------------------------------')
171 print('Please run one of the following commands to activate developer environment:')
172 print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
173 print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
174 print('---------------------------------------------------------------------------')
175 else:
176 raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))
177
178
179 if __name__ == '__main__':
180 main('venv',
181 '',
182 sys.argv[1:])
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/_venv_common.py b/tools/_venv_common.py
--- a/tools/_venv_common.py
+++ b/tools/_venv_common.py
@@ -149,13 +149,15 @@
command.extend(shlex.split(venv_args))
subprocess_with_print(command)
- # We execute the two following commands in the context of the virtual environment, to install
+ # We execute the following commands in the context of the virtual environment, to install
# the packages in it. To do so, we append the venv binary to the PATH that will be used for
# these commands. With this trick, correct python executable will be selected.
new_environ = os.environ.copy()
new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])
subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),
env=new_environ, shell=True)
+ subprocess_with_print("python -m pip install --upgrade 'setuptools>=30.3'",
+ env=new_environ, shell=True)
subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),
env=new_environ, shell=True)
|
{"golden_diff": "diff --git a/tools/_venv_common.py b/tools/_venv_common.py\n--- a/tools/_venv_common.py\n+++ b/tools/_venv_common.py\n@@ -149,13 +149,15 @@\n command.extend(shlex.split(venv_args))\n subprocess_with_print(command)\n \n- # We execute the two following commands in the context of the virtual environment, to install\n+ # We execute the following commands in the context of the virtual environment, to install\n # the packages in it. To do so, we append the venv binary to the PATH that will be used for\n # these commands. With this trick, correct python executable will be selected.\n new_environ = os.environ.copy()\n new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])\n subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),\n env=new_environ, shell=True)\n+ subprocess_with_print(\"python -m pip install --upgrade 'setuptools>=30.3'\",\n+ env=new_environ, shell=True)\n subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),\n env=new_environ, shell=True)\n", "issue": "Fix test_sdists test farm test\n`tests/letstest/scripts/test_sdists.sh` which we currently run as part of the release process is now failing. I think this should be fixed quickly so it doesn't cause us problems whenever we go to do our next release. The cause of the failure is that newer versions of `pytest` cannot be installed using old versions of `setuptools`.\r\n\r\nTo resolve this, we either need to use a newer version of `setuptools` or an older version of `pytest`. My thought is to upgrade `setuptools` because it should prevent problems with other packages in the future and our tests should arguably work with the latest version of every package installed, however, either approach should work.\r\n\r\nIf we upgrade `setuptools`, we need to decide where to do it. My thought would be to do it somewhere in `tools/_venv_common.py` or the scripts it calls so other code that uses `tools/_venv_common.py` would benefit. (It also looks like `tests/letstest/scripts/test_sdists.sh` still uses the old name `tools/_venv_common.sh` and needs to be updated.) `setuptools>=30.3` seems to be [required](https://github.com/pytest-dev/pytest/commit/d4351ac5a287eb96521f09618bbbe1c73f4e6884#diff-2eeaed663bd0d25b7e608891384b7298).\r\n\r\nIf we decided to pin back `pytest` (and maybe other packages) instead, I don't have an opinion on the approach.\r\n\r\n@ohemorange or @sydneyli, are you interested in taking this? I initially planned on doing it myself but haven't managed to get to it yet and think it's important. It shouldn't be much work.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Aids in creating a developer virtual environment for Certbot.\n\nWhen this module is run as a script, it takes the arguments that should\nbe passed to pip to install the Certbot packages as command line\narguments. The virtual environment will be created with the name \"venv\"\nin the current working directory and will use the default version of\nPython for the virtualenv executable in your PATH. You can change the\nname of the virtual environment by setting the environment variable\nVENV_NAME.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport glob\nimport time\nimport subprocess\nimport sys\nimport re\nimport shlex\n\nVERSION_PATTERN = re.compile(r'^(\\d+)\\.(\\d+).*$')\n\n\nclass PythonExecutableNotFoundError(Exception):\n pass\n\n\ndef find_python_executable(python_major):\n # type: (int) -> str\n \"\"\"\n Find the relevant python executable that is of the given python major version.\n Will test, in decreasing priority order:\n * the current Python interpreter\n * 'pythonX' executable in PATH (with X the given major version) if available\n * 'python' executable in PATH if available\n * Windows Python launcher 'py' executable in PATH if available\n Incompatible python versions for Certbot will be evicted (eg. Python < 3.5 on Windows)\n :param int python_major: the Python major version to target (2 or 3)\n :rtype: str\n :return: the relevant python executable path\n :raise RuntimeError: if no relevant python executable path could be found\n \"\"\"\n python_executable_path = None\n\n # First try, current python executable\n if _check_version('{0}.{1}.{2}'.format(\n sys.version_info[0], sys.version_info[1], sys.version_info[2]), python_major):\n return sys.executable\n\n # Second try, with python executables in path\n versions_to_test = ['2.7', '2', ''] if python_major == 2 else ['3', '']\n for one_version in versions_to_test:\n try:\n one_python = 'python{0}'.format(one_version)\n output = subprocess.check_output([one_python, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output.strip().split()[1], python_major):\n return subprocess.check_output([one_python, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n # Last try, with Windows Python launcher\n try:\n env_arg = '-{0}'.format(python_major)\n output_version = subprocess.check_output(['py', env_arg, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output_version.strip().split()[1], python_major):\n return subprocess.check_output(['py', env_arg, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n if not python_executable_path:\n raise RuntimeError('Error, no compatible Python {0} executable for Certbot could be found.'\n .format(python_major))\n\n\ndef _check_version(version_str, major_version):\n search = VERSION_PATTERN.search(version_str)\n\n if not search:\n return False\n\n version = (int(search.group(1)), int(search.group(2)))\n\n minimal_version_supported = (2, 7)\n if major_version == 3 and os.name == 'nt':\n minimal_version_supported = (3, 5)\n elif major_version == 3:\n minimal_version_supported = (3, 4)\n\n if version >= minimal_version_supported:\n return True\n\n print('Incompatible python version for Certbot found: {0}'.format(version_str))\n return False\n\n\ndef subprocess_with_print(cmd, env=os.environ, shell=False):\n print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)\n subprocess.check_call(cmd, env=env, shell=shell)\n\n\ndef get_venv_bin_path(venv_path):\n python_linux = os.path.join(venv_path, 'bin/python')\n if os.path.isfile(python_linux):\n return os.path.abspath(os.path.dirname(python_linux))\n python_windows = os.path.join(venv_path, 'Scripts\\\\python.exe')\n if os.path.isfile(python_windows):\n return os.path.abspath(os.path.dirname(python_windows))\n\n raise ValueError((\n 'Error, could not find python executable in venv path {0}: is it a valid venv ?'\n .format(venv_path)))\n\n\ndef main(venv_name, venv_args, args):\n \"\"\"Creates a virtual environment and installs packages.\n\n :param str venv_name: The name or path at where the virtual\n environment should be created.\n :param str venv_args: Command line arguments for virtualenv\n :param str args: Command line arguments that should be given to pip\n to install packages\n \"\"\"\n\n for path in glob.glob('*.egg-info'):\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n env_venv_name = os.environ.get('VENV_NAME')\n if env_venv_name:\n print('Creating venv at {0}'\n ' as specified in VENV_NAME'.format(env_venv_name))\n venv_name = env_venv_name\n\n if os.path.isdir(venv_name):\n os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))\n\n command = [sys.executable, '-m', 'virtualenv', '--no-site-packages', '--setuptools', venv_name]\n command.extend(shlex.split(venv_args))\n subprocess_with_print(command)\n\n # We execute the two following commands in the context of the virtual environment, to install\n # the packages in it. To do so, we append the venv binary to the PATH that will be used for\n # these commands. With this trick, correct python executable will be selected.\n new_environ = os.environ.copy()\n new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])\n subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),\n env=new_environ, shell=True)\n subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),\n env=new_environ, shell=True)\n\n if os.path.isdir(os.path.join(venv_name, 'bin')):\n # Linux/OSX specific\n print('-------------------------------------------------------------------')\n print('Please run the following command to activate developer environment:')\n print('source {0}/bin/activate'.format(venv_name))\n print('-------------------------------------------------------------------')\n elif os.path.isdir(os.path.join(venv_name, 'Scripts')):\n # Windows specific\n print('---------------------------------------------------------------------------')\n print('Please run one of the following commands to activate developer environment:')\n print('{0}\\\\Scripts\\\\activate.bat (for Batch)'.format(venv_name))\n print('.\\\\{0}\\\\Scripts\\\\Activate.ps1 (for Powershell)'.format(venv_name))\n print('---------------------------------------------------------------------------')\n else:\n raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))\n\n\nif __name__ == '__main__':\n main('venv',\n '',\n sys.argv[1:])\n", "path": "tools/_venv_common.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Aids in creating a developer virtual environment for Certbot.\n\nWhen this module is run as a script, it takes the arguments that should\nbe passed to pip to install the Certbot packages as command line\narguments. The virtual environment will be created with the name \"venv\"\nin the current working directory and will use the default version of\nPython for the virtualenv executable in your PATH. You can change the\nname of the virtual environment by setting the environment variable\nVENV_NAME.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport glob\nimport time\nimport subprocess\nimport sys\nimport re\nimport shlex\n\nVERSION_PATTERN = re.compile(r'^(\\d+)\\.(\\d+).*$')\n\n\nclass PythonExecutableNotFoundError(Exception):\n pass\n\n\ndef find_python_executable(python_major):\n # type: (int) -> str\n \"\"\"\n Find the relevant python executable that is of the given python major version.\n Will test, in decreasing priority order:\n * the current Python interpreter\n * 'pythonX' executable in PATH (with X the given major version) if available\n * 'python' executable in PATH if available\n * Windows Python launcher 'py' executable in PATH if available\n Incompatible python versions for Certbot will be evicted (eg. Python < 3.5 on Windows)\n :param int python_major: the Python major version to target (2 or 3)\n :rtype: str\n :return: the relevant python executable path\n :raise RuntimeError: if no relevant python executable path could be found\n \"\"\"\n python_executable_path = None\n\n # First try, current python executable\n if _check_version('{0}.{1}.{2}'.format(\n sys.version_info[0], sys.version_info[1], sys.version_info[2]), python_major):\n return sys.executable\n\n # Second try, with python executables in path\n versions_to_test = ['2.7', '2', ''] if python_major == 2 else ['3', '']\n for one_version in versions_to_test:\n try:\n one_python = 'python{0}'.format(one_version)\n output = subprocess.check_output([one_python, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output.strip().split()[1], python_major):\n return subprocess.check_output([one_python, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n # Last try, with Windows Python launcher\n try:\n env_arg = '-{0}'.format(python_major)\n output_version = subprocess.check_output(['py', env_arg, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output_version.strip().split()[1], python_major):\n return subprocess.check_output(['py', env_arg, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n if not python_executable_path:\n raise RuntimeError('Error, no compatible Python {0} executable for Certbot could be found.'\n .format(python_major))\n\n\ndef _check_version(version_str, major_version):\n search = VERSION_PATTERN.search(version_str)\n\n if not search:\n return False\n\n version = (int(search.group(1)), int(search.group(2)))\n\n minimal_version_supported = (2, 7)\n if major_version == 3 and os.name == 'nt':\n minimal_version_supported = (3, 5)\n elif major_version == 3:\n minimal_version_supported = (3, 4)\n\n if version >= minimal_version_supported:\n return True\n\n print('Incompatible python version for Certbot found: {0}'.format(version_str))\n return False\n\n\ndef subprocess_with_print(cmd, env=os.environ, shell=False):\n print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)\n subprocess.check_call(cmd, env=env, shell=shell)\n\n\ndef get_venv_bin_path(venv_path):\n python_linux = os.path.join(venv_path, 'bin/python')\n if os.path.isfile(python_linux):\n return os.path.abspath(os.path.dirname(python_linux))\n python_windows = os.path.join(venv_path, 'Scripts\\\\python.exe')\n if os.path.isfile(python_windows):\n return os.path.abspath(os.path.dirname(python_windows))\n\n raise ValueError((\n 'Error, could not find python executable in venv path {0}: is it a valid venv ?'\n .format(venv_path)))\n\n\ndef main(venv_name, venv_args, args):\n \"\"\"Creates a virtual environment and installs packages.\n\n :param str venv_name: The name or path at where the virtual\n environment should be created.\n :param str venv_args: Command line arguments for virtualenv\n :param str args: Command line arguments that should be given to pip\n to install packages\n \"\"\"\n\n for path in glob.glob('*.egg-info'):\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n env_venv_name = os.environ.get('VENV_NAME')\n if env_venv_name:\n print('Creating venv at {0}'\n ' as specified in VENV_NAME'.format(env_venv_name))\n venv_name = env_venv_name\n\n if os.path.isdir(venv_name):\n os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))\n\n command = [sys.executable, '-m', 'virtualenv', '--no-site-packages', '--setuptools', venv_name]\n command.extend(shlex.split(venv_args))\n subprocess_with_print(command)\n\n # We execute the following commands in the context of the virtual environment, to install\n # the packages in it. To do so, we append the venv binary to the PATH that will be used for\n # these commands. With this trick, correct python executable will be selected.\n new_environ = os.environ.copy()\n new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])\n subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),\n env=new_environ, shell=True)\n subprocess_with_print(\"python -m pip install --upgrade 'setuptools>=30.3'\",\n env=new_environ, shell=True)\n subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),\n env=new_environ, shell=True)\n\n if os.path.isdir(os.path.join(venv_name, 'bin')):\n # Linux/OSX specific\n print('-------------------------------------------------------------------')\n print('Please run the following command to activate developer environment:')\n print('source {0}/bin/activate'.format(venv_name))\n print('-------------------------------------------------------------------')\n elif os.path.isdir(os.path.join(venv_name, 'Scripts')):\n # Windows specific\n print('---------------------------------------------------------------------------')\n print('Please run one of the following commands to activate developer environment:')\n print('{0}\\\\Scripts\\\\activate.bat (for Batch)'.format(venv_name))\n print('.\\\\{0}\\\\Scripts\\\\Activate.ps1 (for Powershell)'.format(venv_name))\n print('---------------------------------------------------------------------------')\n else:\n raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))\n\n\nif __name__ == '__main__':\n main('venv',\n '',\n sys.argv[1:])\n", "path": "tools/_venv_common.py"}]}
| 2,781 | 287 |
gh_patches_debug_33406
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2315
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Start shipping our own type stubs
Typeshed houses very rudimentary type stubs for urllib3 but they're hidden within `requests.packages.urllib3` so Mypy can't take advantage of them when using urllib3 on its own. We could also probably improve on what is already in typeshed by adding better types than `Any` everywhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 import os
2 import shutil
3 import subprocess
4
5 import nox
6
7 # Whenever type-hints are completed on a file it should be added here so that
8 # this file will continue to be checked by mypy. Errors from other files are
9 # ignored.
10 TYPED_FILES = {
11 "src/urllib3/contrib/__init__.py",
12 "src/urllib3/contrib/_securetransport/bindings.py",
13 "src/urllib3/contrib/_securetransport/low_level.py",
14 "src/urllib3/contrib/ntlmpool.py",
15 "src/urllib3/contrib/pyopenssl.py",
16 "src/urllib3/contrib/securetransport.py",
17 "src/urllib3/contrib/socks.py",
18 "src/urllib3/__init__.py",
19 "src/urllib3/connection.py",
20 "src/urllib3/connectionpool.py",
21 "src/urllib3/exceptions.py",
22 "src/urllib3/_collections.py",
23 "src/urllib3/fields.py",
24 "src/urllib3/filepost.py",
25 "src/urllib3/poolmanager.py",
26 "src/urllib3/request.py",
27 "src/urllib3/response.py",
28 "src/urllib3/util/connection.py",
29 "src/urllib3/util/proxy.py",
30 "src/urllib3/util/queue.py",
31 "src/urllib3/util/response.py",
32 "src/urllib3/util/ssl_.py",
33 "src/urllib3/util/ssl_match_hostname.py",
34 "src/urllib3/util/ssltransport.py",
35 "src/urllib3/util/url.py",
36 "src/urllib3/util/request.py",
37 "src/urllib3/util/retry.py",
38 "src/urllib3/util/timeout.py",
39 "src/urllib3/util/util.py",
40 "src/urllib3/util/wait.py",
41 }
42 SOURCE_FILES = [
43 "docs/",
44 "dummyserver/",
45 "src/",
46 "test/",
47 "noxfile.py",
48 "setup.py",
49 ]
50
51
52 def tests_impl(session, extras="socks,secure,brotli"):
53 # Install deps and the package itself.
54 session.install("-r", "dev-requirements.txt")
55 session.install(f".[{extras}]")
56
57 # Show the pip version.
58 session.run("pip", "--version")
59 # Print the Python version and bytesize.
60 session.run("python", "--version")
61 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
62 # Print OpenSSL information.
63 session.run("python", "-m", "OpenSSL.debug")
64
65 # Inspired from https://github.com/pyca/cryptography
66 # We use parallel mode and then combine here so that coverage.py will take
67 # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py
68 # and collapse them into src/urllib3/__init__.py.
69
70 session.run(
71 "coverage",
72 "run",
73 "--parallel-mode",
74 "-m",
75 "pytest",
76 "-r",
77 "a",
78 "--tb=native",
79 "--no-success-flaky-report",
80 *(session.posargs or ("test/",)),
81 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
82 )
83 session.run("coverage", "combine")
84 session.run("coverage", "report", "-m")
85 session.run("coverage", "xml")
86
87
88 @nox.session(python=["3.6", "3.7", "3.8", "3.9", "3.10", "pypy"])
89 def test(session):
90 tests_impl(session)
91
92
93 @nox.session(python=["2.7"])
94 def unsupported_python2(session):
95 # Can't check both returncode and output with session.run
96 process = subprocess.run(
97 ["python", "setup.py", "install"],
98 env={**session.env},
99 text=True,
100 capture_output=True,
101 )
102 assert process.returncode == 1
103 print(process.stderr)
104 assert "Unsupported Python version" in process.stderr
105
106
107 @nox.session(python=["3"])
108 def test_brotlipy(session):
109 """Check that if 'brotlipy' is installed instead of 'brotli' or
110 'brotlicffi' that we still don't blow up.
111 """
112 session.install("brotlipy")
113 tests_impl(session, extras="socks,secure")
114
115
116 @nox.session()
117 def format(session):
118 """Run code formatters."""
119 session.install("pre-commit")
120 session.run("pre-commit", "--version")
121
122 process = subprocess.run(
123 ["pre-commit", "run", "--all-files"],
124 env=session.env,
125 text=True,
126 stdout=subprocess.PIPE,
127 stderr=subprocess.STDOUT,
128 )
129 # Ensure that pre-commit itself ran successfully
130 assert process.returncode in (0, 1)
131
132 lint(session)
133
134
135 @nox.session
136 def lint(session):
137 session.install("pre-commit")
138 session.run("pre-commit", "run", "--all-files")
139
140 mypy(session)
141
142
143 @nox.session()
144 def mypy(session):
145 """Run mypy."""
146 session.install("mypy==0.812")
147 session.install("idna>=2.0.0")
148 session.run("mypy", "--version")
149
150 session.log("mypy --strict src/urllib3")
151 all_errors, errors = [], []
152 process = subprocess.run(
153 ["mypy", "--strict", "src/urllib3"],
154 env=session.env,
155 text=True,
156 stdout=subprocess.PIPE,
157 stderr=subprocess.STDOUT,
158 )
159 # Ensure that mypy itself ran successfully
160 assert process.returncode in (0, 1)
161
162 for line in process.stdout.split("\n"):
163 all_errors.append(line)
164 filepath = line.partition(":")[0]
165 if filepath.replace(".pyi", ".py") in TYPED_FILES:
166 errors.append(line)
167 session.log(f"all errors count: {len(all_errors)}")
168 if errors:
169 session.error("\n" + "\n".join(sorted(set(errors))))
170
171
172 @nox.session
173 def docs(session):
174 session.install("-r", "docs/requirements.txt")
175 session.install(".[socks,secure,brotli]")
176
177 session.chdir("docs")
178 if os.path.exists("_build"):
179 shutil.rmtree("_build")
180 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -4,41 +4,6 @@
import nox
-# Whenever type-hints are completed on a file it should be added here so that
-# this file will continue to be checked by mypy. Errors from other files are
-# ignored.
-TYPED_FILES = {
- "src/urllib3/contrib/__init__.py",
- "src/urllib3/contrib/_securetransport/bindings.py",
- "src/urllib3/contrib/_securetransport/low_level.py",
- "src/urllib3/contrib/ntlmpool.py",
- "src/urllib3/contrib/pyopenssl.py",
- "src/urllib3/contrib/securetransport.py",
- "src/urllib3/contrib/socks.py",
- "src/urllib3/__init__.py",
- "src/urllib3/connection.py",
- "src/urllib3/connectionpool.py",
- "src/urllib3/exceptions.py",
- "src/urllib3/_collections.py",
- "src/urllib3/fields.py",
- "src/urllib3/filepost.py",
- "src/urllib3/poolmanager.py",
- "src/urllib3/request.py",
- "src/urllib3/response.py",
- "src/urllib3/util/connection.py",
- "src/urllib3/util/proxy.py",
- "src/urllib3/util/queue.py",
- "src/urllib3/util/response.py",
- "src/urllib3/util/ssl_.py",
- "src/urllib3/util/ssl_match_hostname.py",
- "src/urllib3/util/ssltransport.py",
- "src/urllib3/util/url.py",
- "src/urllib3/util/request.py",
- "src/urllib3/util/retry.py",
- "src/urllib3/util/timeout.py",
- "src/urllib3/util/util.py",
- "src/urllib3/util/wait.py",
-}
SOURCE_FILES = [
"docs/",
"dummyserver/",
@@ -146,27 +111,7 @@
session.install("mypy==0.812")
session.install("idna>=2.0.0")
session.run("mypy", "--version")
-
- session.log("mypy --strict src/urllib3")
- all_errors, errors = [], []
- process = subprocess.run(
- ["mypy", "--strict", "src/urllib3"],
- env=session.env,
- text=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- )
- # Ensure that mypy itself ran successfully
- assert process.returncode in (0, 1)
-
- for line in process.stdout.split("\n"):
- all_errors.append(line)
- filepath = line.partition(":")[0]
- if filepath.replace(".pyi", ".py") in TYPED_FILES:
- errors.append(line)
- session.log(f"all errors count: {len(all_errors)}")
- if errors:
- session.error("\n" + "\n".join(sorted(set(errors))))
+ session.run("mypy", "--strict", "src/urllib3")
@nox.session
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -4,41 +4,6 @@\n \n import nox\n \n-# Whenever type-hints are completed on a file it should be added here so that\n-# this file will continue to be checked by mypy. Errors from other files are\n-# ignored.\n-TYPED_FILES = {\n- \"src/urllib3/contrib/__init__.py\",\n- \"src/urllib3/contrib/_securetransport/bindings.py\",\n- \"src/urllib3/contrib/_securetransport/low_level.py\",\n- \"src/urllib3/contrib/ntlmpool.py\",\n- \"src/urllib3/contrib/pyopenssl.py\",\n- \"src/urllib3/contrib/securetransport.py\",\n- \"src/urllib3/contrib/socks.py\",\n- \"src/urllib3/__init__.py\",\n- \"src/urllib3/connection.py\",\n- \"src/urllib3/connectionpool.py\",\n- \"src/urllib3/exceptions.py\",\n- \"src/urllib3/_collections.py\",\n- \"src/urllib3/fields.py\",\n- \"src/urllib3/filepost.py\",\n- \"src/urllib3/poolmanager.py\",\n- \"src/urllib3/request.py\",\n- \"src/urllib3/response.py\",\n- \"src/urllib3/util/connection.py\",\n- \"src/urllib3/util/proxy.py\",\n- \"src/urllib3/util/queue.py\",\n- \"src/urllib3/util/response.py\",\n- \"src/urllib3/util/ssl_.py\",\n- \"src/urllib3/util/ssl_match_hostname.py\",\n- \"src/urllib3/util/ssltransport.py\",\n- \"src/urllib3/util/url.py\",\n- \"src/urllib3/util/request.py\",\n- \"src/urllib3/util/retry.py\",\n- \"src/urllib3/util/timeout.py\",\n- \"src/urllib3/util/util.py\",\n- \"src/urllib3/util/wait.py\",\n-}\n SOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n@@ -146,27 +111,7 @@\n session.install(\"mypy==0.812\")\n session.install(\"idna>=2.0.0\")\n session.run(\"mypy\", \"--version\")\n-\n- session.log(\"mypy --strict src/urllib3\")\n- all_errors, errors = [], []\n- process = subprocess.run(\n- [\"mypy\", \"--strict\", \"src/urllib3\"],\n- env=session.env,\n- text=True,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT,\n- )\n- # Ensure that mypy itself ran successfully\n- assert process.returncode in (0, 1)\n-\n- for line in process.stdout.split(\"\\n\"):\n- all_errors.append(line)\n- filepath = line.partition(\":\")[0]\n- if filepath.replace(\".pyi\", \".py\") in TYPED_FILES:\n- errors.append(line)\n- session.log(f\"all errors count: {len(all_errors)}\")\n- if errors:\n- session.error(\"\\n\" + \"\\n\".join(sorted(set(errors))))\n+ session.run(\"mypy\", \"--strict\", \"src/urllib3\")\n \n \n @nox.session\n", "issue": "Start shipping our own type stubs\nTypeshed houses very rudimentary type stubs for urllib3 but they're hidden within `requests.packages.urllib3` so Mypy can't take advantage of them when using urllib3 on its own. We could also probably improve on what is already in typeshed by adding better types than `Any` everywhere.\n", "before_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\n# Whenever type-hints are completed on a file it should be added here so that\n# this file will continue to be checked by mypy. Errors from other files are\n# ignored.\nTYPED_FILES = {\n \"src/urllib3/contrib/__init__.py\",\n \"src/urllib3/contrib/_securetransport/bindings.py\",\n \"src/urllib3/contrib/_securetransport/low_level.py\",\n \"src/urllib3/contrib/ntlmpool.py\",\n \"src/urllib3/contrib/pyopenssl.py\",\n \"src/urllib3/contrib/securetransport.py\",\n \"src/urllib3/contrib/socks.py\",\n \"src/urllib3/__init__.py\",\n \"src/urllib3/connection.py\",\n \"src/urllib3/connectionpool.py\",\n \"src/urllib3/exceptions.py\",\n \"src/urllib3/_collections.py\",\n \"src/urllib3/fields.py\",\n \"src/urllib3/filepost.py\",\n \"src/urllib3/poolmanager.py\",\n \"src/urllib3/request.py\",\n \"src/urllib3/response.py\",\n \"src/urllib3/util/connection.py\",\n \"src/urllib3/util/proxy.py\",\n \"src/urllib3/util/queue.py\",\n \"src/urllib3/util/response.py\",\n \"src/urllib3/util/ssl_.py\",\n \"src/urllib3/util/ssl_match_hostname.py\",\n \"src/urllib3/util/ssltransport.py\",\n \"src/urllib3/util/url.py\",\n \"src/urllib3/util/request.py\",\n \"src/urllib3/util/retry.py\",\n \"src/urllib3/util/timeout.py\",\n \"src/urllib3/util/util.py\",\n \"src/urllib3/util/wait.py\",\n}\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\n@nox.session(python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session):\n tests_impl(session)\n\n\n@nox.session(python=[\"2.7\"])\ndef unsupported_python2(session):\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\n@nox.session(python=[\"3\"])\ndef test_brotlipy(session):\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\")\n\n\n@nox.session()\ndef format(session):\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\n@nox.session\ndef lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\n@nox.session()\ndef mypy(session):\n \"\"\"Run mypy.\"\"\"\n session.install(\"mypy==0.812\")\n session.install(\"idna>=2.0.0\")\n session.run(\"mypy\", \"--version\")\n\n session.log(\"mypy --strict src/urllib3\")\n all_errors, errors = [], []\n process = subprocess.run(\n [\"mypy\", \"--strict\", \"src/urllib3\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that mypy itself ran successfully\n assert process.returncode in (0, 1)\n\n for line in process.stdout.split(\"\\n\"):\n all_errors.append(line)\n filepath = line.partition(\":\")[0]\n if filepath.replace(\".pyi\", \".py\") in TYPED_FILES:\n errors.append(line)\n session.log(f\"all errors count: {len(all_errors)}\")\n if errors:\n session.error(\"\\n\" + \"\\n\".join(sorted(set(errors))))\n\n\n@nox.session\ndef docs(session):\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}], "after_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\n@nox.session(python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session):\n tests_impl(session)\n\n\n@nox.session(python=[\"2.7\"])\ndef unsupported_python2(session):\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\n@nox.session(python=[\"3\"])\ndef test_brotlipy(session):\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\")\n\n\n@nox.session()\ndef format(session):\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\n@nox.session\ndef lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\n@nox.session()\ndef mypy(session):\n \"\"\"Run mypy.\"\"\"\n session.install(\"mypy==0.812\")\n session.install(\"idna>=2.0.0\")\n session.run(\"mypy\", \"--version\")\n session.run(\"mypy\", \"--strict\", \"src/urllib3\")\n\n\n@nox.session\ndef docs(session):\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]}
| 2,186 | 767 |
gh_patches_debug_43397
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1931
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add token to websockets connector
Currently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.
Ideally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.
This will also a nice feature to implement along side opsdroid-web v2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/websocket/__init__.py`
Content:
```
1 """A connector which allows websocket connections."""
2 import json
3 import logging
4 import uuid
5 from datetime import datetime
6
7 import aiohttp
8 import aiohttp.web
9 from aiohttp import WSCloseCode
10 from opsdroid.connector import Connector, register_event
11 from opsdroid.events import Message
12
13 _LOGGER = logging.getLogger(__name__)
14 HEADERS = {"Access-Control-Allow-Origin": "*"}
15 CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
16
17
18 class ConnectorWebsocket(Connector):
19 """A connector which allows websocket connections."""
20
21 def __init__(self, config, opsdroid=None):
22 """Create the connector."""
23 super().__init__(config, opsdroid=opsdroid)
24 _LOGGER.debug(_("Starting Websocket connector."))
25 self.name = config.get("name", "websocket")
26 self.max_connections = self.config.get("max-connections", 10)
27 self.connection_timeout = self.config.get("connection-timeout", 60)
28 self.accepting_connections = True
29 self.active_connections = {}
30 self.available_connections = []
31 self.bot_name = self.config.get("bot-name", "opsdroid")
32
33 async def connect(self):
34 """Connect to the chat service."""
35 self.accepting_connections = True
36
37 self.opsdroid.web_server.web_app.router.add_get(
38 "/connector/websocket/{socket}", self.websocket_handler
39 )
40
41 self.opsdroid.web_server.web_app.router.add_post(
42 "/connector/websocket", self.new_websocket_handler
43 )
44
45 async def disconnect(self):
46 """Disconnect from current sessions."""
47 self.accepting_connections = False
48 connections_to_close = self.active_connections.copy()
49 for connection in connections_to_close:
50 await connections_to_close[connection].close(
51 code=WSCloseCode.GOING_AWAY, message="Server shutdown"
52 )
53
54 async def new_websocket_handler(self, request):
55 """Handle for aiohttp creating websocket connections."""
56 if (
57 len(self.active_connections) + len(self.available_connections)
58 < self.max_connections
59 and self.accepting_connections
60 ):
61 socket = {"id": str(uuid.uuid1()), "date": datetime.now()}
62 self.available_connections.append(socket)
63 return aiohttp.web.Response(
64 text=json.dumps({"socket": socket["id"]}), headers=HEADERS, status=200
65 )
66 return aiohttp.web.Response(
67 text=json.dumps("No connections available"), headers=HEADERS, status=429
68 )
69
70 async def websocket_handler(self, request):
71 """Handle for aiohttp handling websocket connections."""
72 socket = request.match_info.get("socket")
73 available = [
74 item for item in self.available_connections if item["id"] == socket
75 ]
76 if len(available) != 1:
77 return aiohttp.web.Response(
78 text=json.dumps("Please request a socket first"),
79 headers=HEADERS,
80 status=400,
81 )
82 if (
83 datetime.now() - available[0]["date"]
84 ).total_seconds() > self.connection_timeout:
85 self.available_connections.remove(available[0])
86 return aiohttp.web.Response(
87 text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
88 )
89 self.available_connections.remove(available[0])
90 _LOGGER.debug(_("User connected to %s."), socket)
91
92 websocket = aiohttp.web.WebSocketResponse()
93 await websocket.prepare(request)
94
95 self.active_connections[socket] = websocket
96 async for msg in websocket:
97 if msg.type == aiohttp.WSMsgType.TEXT:
98 message = Message(text=msg.data, user=None, target=None, connector=self)
99 await self.opsdroid.parse(message)
100 elif msg.type == aiohttp.WSMsgType.ERROR:
101 _LOGGER.error(
102 _("Websocket connection closed with exception %s."),
103 websocket.exception(),
104 )
105
106 _LOGGER.info(_("websocket connection closed"))
107 self.active_connections.pop(socket, None)
108
109 return websocket
110
111 async def listen(self):
112 """Listen for and parse new messages.
113
114 Listening is handled by the aiohttp web server so
115 we don't need to do anything here.
116
117 """
118
119 @register_event(Message)
120 async def send_message(self, message):
121 """Respond with a message."""
122 try:
123 if message.target is None:
124 message.target = next(iter(self.active_connections))
125 _LOGGER.debug(
126 _("Responding with: '%s' in target %s"), message.text, message.target
127 )
128 await self.active_connections[message.target].send_str(message.text)
129 except KeyError:
130 _LOGGER.error(_("No active socket for target %s"), message.target)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py
--- a/opsdroid/connector/websocket/__init__.py
+++ b/opsdroid/connector/websocket/__init__.py
@@ -9,12 +9,49 @@
from aiohttp import WSCloseCode
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
+import dataclasses
+from typing import Optional
_LOGGER = logging.getLogger(__name__)
HEADERS = {"Access-Control-Allow-Origin": "*"}
CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
+@dataclasses.dataclass
+class WebsocketMessage:
+ """A message received from a websocket connection."""
+
+ message: str
+ user: Optional[str]
+ socket: Optional[str]
+
+ @classmethod
+ def parse_payload(cls, payload: str):
+ """Parse the payload of a websocket message.
+
+ We will try to parse the payload as a json string.
+ If that fails, we will use the default values which are:
+
+ message: str
+ user: None
+ socket: None
+
+ """
+ try:
+ data = json.loads(payload)
+ return cls(
+ message=data.get("message"),
+ user=data.get("user"),
+ socket=data.get("socket"),
+ )
+ except json.JSONDecodeError:
+ return cls(
+ message=payload,
+ user=None,
+ socket=None,
+ )
+
+
class ConnectorWebsocket(Connector):
"""A connector which allows websocket connections."""
@@ -29,6 +66,7 @@
self.active_connections = {}
self.available_connections = []
self.bot_name = self.config.get("bot-name", "opsdroid")
+ self.authorization_token = self.config.get("token")
async def connect(self):
"""Connect to the chat service."""
@@ -53,6 +91,7 @@
async def new_websocket_handler(self, request):
"""Handle for aiohttp creating websocket connections."""
+ await self.validate_request(request)
if (
len(self.active_connections) + len(self.available_connections)
< self.max_connections
@@ -95,7 +134,13 @@
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
- message = Message(text=msg.data, user=None, target=None, connector=self)
+ payload = WebsocketMessage.parse_payload(msg.data)
+ message = Message(
+ text=payload.message,
+ user=payload.user,
+ target=payload.socket,
+ connector=self,
+ )
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
@@ -108,6 +153,20 @@
return websocket
+ async def validate_request(self, request):
+ """Validate the request by looking at headers and the connector token.
+
+ If the token does not exist in the header, but exists in the configuration,
+ then we will simply return a Forbidden error.
+
+ """
+ client_token = request.headers.get("Authorization")
+ if self.authorization_token and (
+ client_token is None or client_token != self.authorization_token
+ ):
+ raise aiohttp.web.HTTPUnauthorized()
+ return True
+
async def listen(self):
"""Listen for and parse new messages.
@@ -117,7 +176,7 @@
"""
@register_event(Message)
- async def send_message(self, message):
+ async def send_message(self, message: Message):
"""Respond with a message."""
try:
if message.target is None:
|
{"golden_diff": "diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py\n--- a/opsdroid/connector/websocket/__init__.py\n+++ b/opsdroid/connector/websocket/__init__.py\n@@ -9,12 +9,49 @@\n from aiohttp import WSCloseCode\n from opsdroid.connector import Connector, register_event\n from opsdroid.events import Message\n+import dataclasses\n+from typing import Optional\n \n _LOGGER = logging.getLogger(__name__)\n HEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\n CONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n \n \n+@dataclasses.dataclass\n+class WebsocketMessage:\n+ \"\"\"A message received from a websocket connection.\"\"\"\n+\n+ message: str\n+ user: Optional[str]\n+ socket: Optional[str]\n+\n+ @classmethod\n+ def parse_payload(cls, payload: str):\n+ \"\"\"Parse the payload of a websocket message.\n+\n+ We will try to parse the payload as a json string.\n+ If that fails, we will use the default values which are:\n+\n+ message: str\n+ user: None\n+ socket: None\n+\n+ \"\"\"\n+ try:\n+ data = json.loads(payload)\n+ return cls(\n+ message=data.get(\"message\"),\n+ user=data.get(\"user\"),\n+ socket=data.get(\"socket\"),\n+ )\n+ except json.JSONDecodeError:\n+ return cls(\n+ message=payload,\n+ user=None,\n+ socket=None,\n+ )\n+\n+\n class ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n \n@@ -29,6 +66,7 @@\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n+ self.authorization_token = self.config.get(\"token\")\n \n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n@@ -53,6 +91,7 @@\n \n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n+ await self.validate_request(request)\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n@@ -95,7 +134,13 @@\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n- message = Message(text=msg.data, user=None, target=None, connector=self)\n+ payload = WebsocketMessage.parse_payload(msg.data)\n+ message = Message(\n+ text=payload.message,\n+ user=payload.user,\n+ target=payload.socket,\n+ connector=self,\n+ )\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n@@ -108,6 +153,20 @@\n \n return websocket\n \n+ async def validate_request(self, request):\n+ \"\"\"Validate the request by looking at headers and the connector token.\n+\n+ If the token does not exist in the header, but exists in the configuration,\n+ then we will simply return a Forbidden error.\n+\n+ \"\"\"\n+ client_token = request.headers.get(\"Authorization\")\n+ if self.authorization_token and (\n+ client_token is None or client_token != self.authorization_token\n+ ):\n+ raise aiohttp.web.HTTPUnauthorized()\n+ return True\n+\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n \n@@ -117,7 +176,7 @@\n \"\"\"\n \n @register_event(Message)\n- async def send_message(self, message):\n+ async def send_message(self, message: Message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n", "issue": "Add token to websockets connector\nCurrently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.\r\n\r\nIdeally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.\r\n\r\nThis will also a nice feature to implement along side opsdroid-web v2\n", "before_files": [{"content": "\"\"\"A connector which allows websocket connections.\"\"\"\nimport json\nimport logging\nimport uuid\nfrom datetime import datetime\n\nimport aiohttp\nimport aiohttp.web\nfrom aiohttp import WSCloseCode\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nHEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\nCONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n\n\nclass ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Websocket connector.\"))\n self.name = config.get(\"name\", \"websocket\")\n self.max_connections = self.config.get(\"max-connections\", 10)\n self.connection_timeout = self.config.get(\"connection-timeout\", 60)\n self.accepting_connections = True\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n self.accepting_connections = True\n\n self.opsdroid.web_server.web_app.router.add_get(\n \"/connector/websocket/{socket}\", self.websocket_handler\n )\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/websocket\", self.new_websocket_handler\n )\n\n async def disconnect(self):\n \"\"\"Disconnect from current sessions.\"\"\"\n self.accepting_connections = False\n connections_to_close = self.active_connections.copy()\n for connection in connections_to_close:\n await connections_to_close[connection].close(\n code=WSCloseCode.GOING_AWAY, message=\"Server shutdown\"\n )\n\n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n and self.accepting_connections\n ):\n socket = {\"id\": str(uuid.uuid1()), \"date\": datetime.now()}\n self.available_connections.append(socket)\n return aiohttp.web.Response(\n text=json.dumps({\"socket\": socket[\"id\"]}), headers=HEADERS, status=200\n )\n return aiohttp.web.Response(\n text=json.dumps(\"No connections available\"), headers=HEADERS, status=429\n )\n\n async def websocket_handler(self, request):\n \"\"\"Handle for aiohttp handling websocket connections.\"\"\"\n socket = request.match_info.get(\"socket\")\n available = [\n item for item in self.available_connections if item[\"id\"] == socket\n ]\n if len(available) != 1:\n return aiohttp.web.Response(\n text=json.dumps(\"Please request a socket first\"),\n headers=HEADERS,\n status=400,\n )\n if (\n datetime.now() - available[0][\"date\"]\n ).total_seconds() > self.connection_timeout:\n self.available_connections.remove(available[0])\n return aiohttp.web.Response(\n text=json.dumps(\"Socket request timed out\"), headers=HEADERS, status=408\n )\n self.available_connections.remove(available[0])\n _LOGGER.debug(_(\"User connected to %s.\"), socket)\n\n websocket = aiohttp.web.WebSocketResponse()\n await websocket.prepare(request)\n\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n message = Message(text=msg.data, user=None, target=None, connector=self)\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n _(\"Websocket connection closed with exception %s.\"),\n websocket.exception(),\n )\n\n _LOGGER.info(_(\"websocket connection closed\"))\n self.active_connections.pop(socket, None)\n\n return websocket\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n\n Listening is handled by the aiohttp web server so\n we don't need to do anything here.\n\n \"\"\"\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n message.target = next(iter(self.active_connections))\n _LOGGER.debug(\n _(\"Responding with: '%s' in target %s\"), message.text, message.target\n )\n await self.active_connections[message.target].send_str(message.text)\n except KeyError:\n _LOGGER.error(_(\"No active socket for target %s\"), message.target)\n", "path": "opsdroid/connector/websocket/__init__.py"}], "after_files": [{"content": "\"\"\"A connector which allows websocket connections.\"\"\"\nimport json\nimport logging\nimport uuid\nfrom datetime import datetime\n\nimport aiohttp\nimport aiohttp.web\nfrom aiohttp import WSCloseCode\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\nimport dataclasses\nfrom typing import Optional\n\n_LOGGER = logging.getLogger(__name__)\nHEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\nCONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n\n\n@dataclasses.dataclass\nclass WebsocketMessage:\n \"\"\"A message received from a websocket connection.\"\"\"\n\n message: str\n user: Optional[str]\n socket: Optional[str]\n\n @classmethod\n def parse_payload(cls, payload: str):\n \"\"\"Parse the payload of a websocket message.\n\n We will try to parse the payload as a json string.\n If that fails, we will use the default values which are:\n\n message: str\n user: None\n socket: None\n\n \"\"\"\n try:\n data = json.loads(payload)\n return cls(\n message=data.get(\"message\"),\n user=data.get(\"user\"),\n socket=data.get(\"socket\"),\n )\n except json.JSONDecodeError:\n return cls(\n message=payload,\n user=None,\n socket=None,\n )\n\n\nclass ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Websocket connector.\"))\n self.name = config.get(\"name\", \"websocket\")\n self.max_connections = self.config.get(\"max-connections\", 10)\n self.connection_timeout = self.config.get(\"connection-timeout\", 60)\n self.accepting_connections = True\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n self.authorization_token = self.config.get(\"token\")\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n self.accepting_connections = True\n\n self.opsdroid.web_server.web_app.router.add_get(\n \"/connector/websocket/{socket}\", self.websocket_handler\n )\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/websocket\", self.new_websocket_handler\n )\n\n async def disconnect(self):\n \"\"\"Disconnect from current sessions.\"\"\"\n self.accepting_connections = False\n connections_to_close = self.active_connections.copy()\n for connection in connections_to_close:\n await connections_to_close[connection].close(\n code=WSCloseCode.GOING_AWAY, message=\"Server shutdown\"\n )\n\n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n await self.validate_request(request)\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n and self.accepting_connections\n ):\n socket = {\"id\": str(uuid.uuid1()), \"date\": datetime.now()}\n self.available_connections.append(socket)\n return aiohttp.web.Response(\n text=json.dumps({\"socket\": socket[\"id\"]}), headers=HEADERS, status=200\n )\n return aiohttp.web.Response(\n text=json.dumps(\"No connections available\"), headers=HEADERS, status=429\n )\n\n async def websocket_handler(self, request):\n \"\"\"Handle for aiohttp handling websocket connections.\"\"\"\n socket = request.match_info.get(\"socket\")\n available = [\n item for item in self.available_connections if item[\"id\"] == socket\n ]\n if len(available) != 1:\n return aiohttp.web.Response(\n text=json.dumps(\"Please request a socket first\"),\n headers=HEADERS,\n status=400,\n )\n if (\n datetime.now() - available[0][\"date\"]\n ).total_seconds() > self.connection_timeout:\n self.available_connections.remove(available[0])\n return aiohttp.web.Response(\n text=json.dumps(\"Socket request timed out\"), headers=HEADERS, status=408\n )\n self.available_connections.remove(available[0])\n _LOGGER.debug(_(\"User connected to %s.\"), socket)\n\n websocket = aiohttp.web.WebSocketResponse()\n await websocket.prepare(request)\n\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n payload = WebsocketMessage.parse_payload(msg.data)\n message = Message(\n text=payload.message,\n user=payload.user,\n target=payload.socket,\n connector=self,\n )\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n _(\"Websocket connection closed with exception %s.\"),\n websocket.exception(),\n )\n\n _LOGGER.info(_(\"websocket connection closed\"))\n self.active_connections.pop(socket, None)\n\n return websocket\n\n async def validate_request(self, request):\n \"\"\"Validate the request by looking at headers and the connector token.\n\n If the token does not exist in the header, but exists in the configuration,\n then we will simply return a Forbidden error.\n\n \"\"\"\n client_token = request.headers.get(\"Authorization\")\n if self.authorization_token and (\n client_token is None or client_token != self.authorization_token\n ):\n raise aiohttp.web.HTTPUnauthorized()\n return True\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n\n Listening is handled by the aiohttp web server so\n we don't need to do anything here.\n\n \"\"\"\n\n @register_event(Message)\n async def send_message(self, message: Message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n message.target = next(iter(self.active_connections))\n _LOGGER.debug(\n _(\"Responding with: '%s' in target %s\"), message.text, message.target\n )\n await self.active_connections[message.target].send_str(message.text)\n except KeyError:\n _LOGGER.error(_(\"No active socket for target %s\"), message.target)\n", "path": "opsdroid/connector/websocket/__init__.py"}]}
| 1,631 | 850 |
gh_patches_debug_33659
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1511
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Standardize theme interface
Themes should have a static set of requirements that all themes should have. This really just means we need to provide more guidance on designing themes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/constants/teams.py`
Content:
```
1 from collections import namedtuple
2
3 TeamAttrs = namedtuple(
4 "TeamAttrs",
5 [
6 "id",
7 "oauth_id",
8 "name",
9 "email",
10 "secret",
11 "website",
12 "affiliation",
13 "country",
14 "bracket",
15 "hidden",
16 "banned",
17 "captain_id",
18 "created",
19 ],
20 )
21
```
Path: `CTFd/utils/initialization/__init__.py`
Content:
```
1 import datetime
2 import logging
3 import os
4 import sys
5
6 from flask import abort, redirect, render_template, request, session, url_for
7 from sqlalchemy.exc import IntegrityError, InvalidRequestError
8 from werkzeug.middleware.dispatcher import DispatcherMiddleware
9
10 from CTFd.cache import clear_user_recent_ips
11 from CTFd.exceptions import UserNotFoundException, UserTokenExpiredException
12 from CTFd.models import Tracking, db
13 from CTFd.utils import config, get_config, markdown
14 from CTFd.utils.config import (
15 can_send_mail,
16 ctf_logo,
17 ctf_name,
18 ctf_theme,
19 integrations,
20 is_setup,
21 )
22 from CTFd.utils.config.pages import get_pages
23 from CTFd.utils.countries import get_countries, lookup_country_code
24 from CTFd.utils.dates import isoformat, unix_time, unix_time_millis
25 from CTFd.utils.events import EventManager, RedisEventManager
26 from CTFd.utils.humanize.words import pluralize
27 from CTFd.utils.modes import generate_account_url, get_mode_as_word
28 from CTFd.utils.plugins import (
29 get_configurable_plugins,
30 get_registered_admin_scripts,
31 get_registered_admin_stylesheets,
32 get_registered_scripts,
33 get_registered_stylesheets,
34 )
35 from CTFd.utils.security.auth import login_user, logout_user, lookup_user_token
36 from CTFd.utils.security.csrf import generate_nonce
37 from CTFd.utils.user import (
38 authed,
39 get_current_team_attrs,
40 get_current_user_attrs,
41 get_current_user_recent_ips,
42 get_ip,
43 is_admin,
44 )
45
46
47 def init_template_filters(app):
48 app.jinja_env.filters["markdown"] = markdown
49 app.jinja_env.filters["unix_time"] = unix_time
50 app.jinja_env.filters["unix_time_millis"] = unix_time_millis
51 app.jinja_env.filters["isoformat"] = isoformat
52 app.jinja_env.filters["pluralize"] = pluralize
53
54
55 def init_template_globals(app):
56 from CTFd.constants.config import Configs
57 from CTFd.constants.plugins import Plugins
58 from CTFd.constants.sessions import Session
59 from CTFd.forms import Forms
60 from CTFd.utils.config.visibility import (
61 accounts_visible,
62 challenges_visible,
63 registration_visible,
64 scores_visible,
65 )
66
67 app.jinja_env.globals.update(config=config)
68 app.jinja_env.globals.update(get_pages=get_pages)
69 app.jinja_env.globals.update(can_send_mail=can_send_mail)
70 app.jinja_env.globals.update(get_ctf_name=ctf_name)
71 app.jinja_env.globals.update(get_ctf_logo=ctf_logo)
72 app.jinja_env.globals.update(get_ctf_theme=ctf_theme)
73 app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)
74 app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)
75 app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)
76 app.jinja_env.globals.update(
77 get_registered_admin_scripts=get_registered_admin_scripts
78 )
79 app.jinja_env.globals.update(
80 get_registered_admin_stylesheets=get_registered_admin_stylesheets
81 )
82 app.jinja_env.globals.update(get_config=get_config)
83 app.jinja_env.globals.update(generate_account_url=generate_account_url)
84 app.jinja_env.globals.update(get_countries=get_countries)
85 app.jinja_env.globals.update(lookup_country_code=lookup_country_code)
86 app.jinja_env.globals.update(accounts_visible=accounts_visible)
87 app.jinja_env.globals.update(challenges_visible=challenges_visible)
88 app.jinja_env.globals.update(registration_visible=registration_visible)
89 app.jinja_env.globals.update(scores_visible=scores_visible)
90 app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)
91 app.jinja_env.globals.update(integrations=integrations)
92 app.jinja_env.globals.update(authed=authed)
93 app.jinja_env.globals.update(is_admin=is_admin)
94 app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)
95 app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)
96 app.jinja_env.globals.update(get_ip=get_ip)
97 app.jinja_env.globals.update(Configs=Configs)
98 app.jinja_env.globals.update(Plugins=Plugins)
99 app.jinja_env.globals.update(Session=Session)
100 app.jinja_env.globals.update(Forms=Forms)
101
102
103 def init_logs(app):
104 logger_submissions = logging.getLogger("submissions")
105 logger_logins = logging.getLogger("logins")
106 logger_registrations = logging.getLogger("registrations")
107
108 logger_submissions.setLevel(logging.INFO)
109 logger_logins.setLevel(logging.INFO)
110 logger_registrations.setLevel(logging.INFO)
111
112 log_dir = app.config["LOG_FOLDER"]
113 if not os.path.exists(log_dir):
114 os.makedirs(log_dir)
115
116 logs = {
117 "submissions": os.path.join(log_dir, "submissions.log"),
118 "logins": os.path.join(log_dir, "logins.log"),
119 "registrations": os.path.join(log_dir, "registrations.log"),
120 }
121
122 try:
123 for log in logs.values():
124 if not os.path.exists(log):
125 open(log, "a").close()
126
127 submission_log = logging.handlers.RotatingFileHandler(
128 logs["submissions"], maxBytes=10485760, backupCount=5
129 )
130 login_log = logging.handlers.RotatingFileHandler(
131 logs["logins"], maxBytes=10485760, backupCount=5
132 )
133 registration_log = logging.handlers.RotatingFileHandler(
134 logs["registrations"], maxBytes=10485760, backupCount=5
135 )
136
137 logger_submissions.addHandler(submission_log)
138 logger_logins.addHandler(login_log)
139 logger_registrations.addHandler(registration_log)
140 except IOError:
141 pass
142
143 stdout = logging.StreamHandler(stream=sys.stdout)
144
145 logger_submissions.addHandler(stdout)
146 logger_logins.addHandler(stdout)
147 logger_registrations.addHandler(stdout)
148
149 logger_submissions.propagate = 0
150 logger_logins.propagate = 0
151 logger_registrations.propagate = 0
152
153
154 def init_events(app):
155 if app.config.get("CACHE_TYPE") == "redis":
156 app.events_manager = RedisEventManager()
157 elif app.config.get("CACHE_TYPE") == "filesystem":
158 app.events_manager = EventManager()
159 else:
160 app.events_manager = EventManager()
161
162
163 def init_request_processors(app):
164 @app.url_defaults
165 def inject_theme(endpoint, values):
166 if "theme" not in values and app.url_map.is_endpoint_expecting(
167 endpoint, "theme"
168 ):
169 values["theme"] = ctf_theme()
170
171 @app.before_request
172 def needs_setup():
173 if is_setup() is False:
174 if request.endpoint in (
175 "views.setup",
176 "views.integrations",
177 "views.themes",
178 ):
179 return
180 else:
181 return redirect(url_for("views.setup"))
182
183 @app.before_request
184 def tracker():
185 if request.endpoint == "views.themes":
186 return
187
188 if authed():
189 user_ips = get_current_user_recent_ips()
190 ip = get_ip()
191
192 track = None
193 if (ip not in user_ips) or (request.method != "GET"):
194 track = Tracking.query.filter_by(
195 ip=get_ip(), user_id=session["id"]
196 ).first()
197
198 if track:
199 track.date = datetime.datetime.utcnow()
200 else:
201 track = Tracking(ip=get_ip(), user_id=session["id"])
202 db.session.add(track)
203
204 if track:
205 try:
206 db.session.commit()
207 except (InvalidRequestError, IntegrityError):
208 db.session.rollback()
209 db.session.close()
210 logout_user()
211 else:
212 clear_user_recent_ips(user_id=session["id"])
213
214 @app.before_request
215 def banned():
216 if request.endpoint == "views.themes":
217 return
218
219 if authed():
220 user = get_current_user_attrs()
221 team = get_current_team_attrs()
222
223 if user and user.banned:
224 return (
225 render_template(
226 "errors/403.html", error="You have been banned from this CTF"
227 ),
228 403,
229 )
230
231 if team and team.banned:
232 return (
233 render_template(
234 "errors/403.html",
235 error="Your team has been banned from this CTF",
236 ),
237 403,
238 )
239
240 @app.before_request
241 def tokens():
242 token = request.headers.get("Authorization")
243 if token and request.content_type == "application/json":
244 try:
245 token_type, token = token.split(" ", 1)
246 user = lookup_user_token(token)
247 except UserNotFoundException:
248 abort(401)
249 except UserTokenExpiredException:
250 abort(401)
251 except Exception:
252 abort(401)
253 else:
254 login_user(user)
255
256 @app.before_request
257 def csrf():
258 try:
259 func = app.view_functions[request.endpoint]
260 except KeyError:
261 abort(404)
262 if hasattr(func, "_bypass_csrf"):
263 return
264 if request.headers.get("Authorization"):
265 return
266 if not session.get("nonce"):
267 session["nonce"] = generate_nonce()
268 if request.method not in ("GET", "HEAD", "OPTIONS", "TRACE"):
269 if request.content_type == "application/json":
270 if session["nonce"] != request.headers.get("CSRF-Token"):
271 abort(403)
272 if request.content_type != "application/json":
273 if session["nonce"] != request.form.get("nonce"):
274 abort(403)
275
276 application_root = app.config.get("APPLICATION_ROOT")
277 if application_root != "/":
278
279 @app.before_request
280 def force_subdirectory_redirect():
281 if request.path.startswith(application_root) is False:
282 return redirect(
283 application_root + request.script_root + request.full_path
284 )
285
286 app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})
287
```
Path: `CTFd/constants/users.py`
Content:
```
1 from collections import namedtuple
2
3 UserAttrs = namedtuple(
4 "UserAttrs",
5 [
6 "id",
7 "oauth_id",
8 "name",
9 "email",
10 "type",
11 "secret",
12 "website",
13 "affiliation",
14 "country",
15 "bracket",
16 "hidden",
17 "banned",
18 "verified",
19 "team_id",
20 "created",
21 ],
22 )
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/CTFd/constants/teams.py b/CTFd/constants/teams.py
--- a/CTFd/constants/teams.py
+++ b/CTFd/constants/teams.py
@@ -18,3 +18,32 @@
"created",
],
)
+
+
+class _TeamAttrsWrapper:
+ def __getattr__(self, attr):
+ from CTFd.utils.user import get_current_team_attrs
+
+ attrs = get_current_team_attrs()
+ return getattr(attrs, attr, None)
+
+ @property
+ def place(self):
+ from CTFd.utils.user import get_current_team
+
+ team = get_current_team()
+ if team:
+ return team.place
+ return None
+
+ @property
+ def score(self):
+ from CTFd.utils.user import get_current_team
+
+ team = get_current_team()
+ if team:
+ return team.score
+ return None
+
+
+Team = _TeamAttrsWrapper()
diff --git a/CTFd/constants/users.py b/CTFd/constants/users.py
--- a/CTFd/constants/users.py
+++ b/CTFd/constants/users.py
@@ -20,3 +20,32 @@
"created",
],
)
+
+
+class _UserAttrsWrapper:
+ def __getattr__(self, attr):
+ from CTFd.utils.user import get_current_user_attrs
+
+ attrs = get_current_user_attrs()
+ return getattr(attrs, attr, None)
+
+ @property
+ def place(self):
+ from CTFd.utils.user import get_current_user
+
+ user = get_current_user()
+ if user:
+ return user.place
+ return None
+
+ @property
+ def score(self):
+ from CTFd.utils.user import get_current_user
+
+ user = get_current_user()
+ if user:
+ return user.score
+ return None
+
+
+User = _UserAttrsWrapper()
diff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py
--- a/CTFd/utils/initialization/__init__.py
+++ b/CTFd/utils/initialization/__init__.py
@@ -56,6 +56,8 @@
from CTFd.constants.config import Configs
from CTFd.constants.plugins import Plugins
from CTFd.constants.sessions import Session
+ from CTFd.constants.users import User
+ from CTFd.constants.teams import Team
from CTFd.forms import Forms
from CTFd.utils.config.visibility import (
accounts_visible,
@@ -98,6 +100,8 @@
app.jinja_env.globals.update(Plugins=Plugins)
app.jinja_env.globals.update(Session=Session)
app.jinja_env.globals.update(Forms=Forms)
+ app.jinja_env.globals.update(User=User)
+ app.jinja_env.globals.update(Team=Team)
def init_logs(app):
|
{"golden_diff": "diff --git a/CTFd/constants/teams.py b/CTFd/constants/teams.py\n--- a/CTFd/constants/teams.py\n+++ b/CTFd/constants/teams.py\n@@ -18,3 +18,32 @@\n \"created\",\n ],\n )\n+\n+\n+class _TeamAttrsWrapper:\n+ def __getattr__(self, attr):\n+ from CTFd.utils.user import get_current_team_attrs\n+\n+ attrs = get_current_team_attrs()\n+ return getattr(attrs, attr, None)\n+\n+ @property\n+ def place(self):\n+ from CTFd.utils.user import get_current_team\n+\n+ team = get_current_team()\n+ if team:\n+ return team.place\n+ return None\n+\n+ @property\n+ def score(self):\n+ from CTFd.utils.user import get_current_team\n+\n+ team = get_current_team()\n+ if team:\n+ return team.score\n+ return None\n+\n+\n+Team = _TeamAttrsWrapper()\ndiff --git a/CTFd/constants/users.py b/CTFd/constants/users.py\n--- a/CTFd/constants/users.py\n+++ b/CTFd/constants/users.py\n@@ -20,3 +20,32 @@\n \"created\",\n ],\n )\n+\n+\n+class _UserAttrsWrapper:\n+ def __getattr__(self, attr):\n+ from CTFd.utils.user import get_current_user_attrs\n+\n+ attrs = get_current_user_attrs()\n+ return getattr(attrs, attr, None)\n+\n+ @property\n+ def place(self):\n+ from CTFd.utils.user import get_current_user\n+\n+ user = get_current_user()\n+ if user:\n+ return user.place\n+ return None\n+\n+ @property\n+ def score(self):\n+ from CTFd.utils.user import get_current_user\n+\n+ user = get_current_user()\n+ if user:\n+ return user.score\n+ return None\n+\n+\n+User = _UserAttrsWrapper()\ndiff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py\n--- a/CTFd/utils/initialization/__init__.py\n+++ b/CTFd/utils/initialization/__init__.py\n@@ -56,6 +56,8 @@\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n+ from CTFd.constants.users import User\n+ from CTFd.constants.teams import Team\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n@@ -98,6 +100,8 @@\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Forms=Forms)\n+ app.jinja_env.globals.update(User=User)\n+ app.jinja_env.globals.update(Team=Team)\n \n \n def init_logs(app):\n", "issue": "Standardize theme interface\nThemes should have a static set of requirements that all themes should have. This really just means we need to provide more guidance on designing themes. \n", "before_files": [{"content": "from collections import namedtuple\n\nTeamAttrs = namedtuple(\n \"TeamAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"captain_id\",\n \"created\",\n ],\n)\n", "path": "CTFd/constants/teams.py"}, {"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom CTFd.cache import clear_user_recent_ips\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.countries import get_countries, lookup_country_code\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.humanize.words import pluralize\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import (\n authed,\n get_current_team_attrs,\n get_current_user_attrs,\n get_current_user_recent_ips,\n get_ip,\n is_admin,\n)\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n app.jinja_env.filters[\"pluralize\"] = pluralize\n\n\ndef init_template_globals(app):\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n )\n\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n app.jinja_env.globals.update(is_admin=is_admin)\n app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)\n app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)\n app.jinja_env.globals.update(get_ip=get_ip)\n app.jinja_env.globals.update(Configs=Configs)\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Forms=Forms)\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n\n\ndef init_request_processors(app):\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user_ips = get_current_user_recent_ips()\n ip = get_ip()\n\n track = None\n if (ip not in user_ips) or (request.method != \"GET\"):\n track = Tracking.query.filter_by(\n ip=get_ip(), user_id=session[\"id\"]\n ).first()\n\n if track:\n track.date = datetime.datetime.utcnow()\n else:\n track = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(track)\n\n if track:\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n db.session.close()\n logout_user()\n else:\n clear_user_recent_ips(user_id=session[\"id\"])\n\n @app.before_request\n def banned():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user = get_current_user_attrs()\n team = get_current_team_attrs()\n\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\", error=\"You have been banned from this CTF\"\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401)\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}, {"content": "from collections import namedtuple\n\nUserAttrs = namedtuple(\n \"UserAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"type\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"verified\",\n \"team_id\",\n \"created\",\n ],\n)\n", "path": "CTFd/constants/users.py"}], "after_files": [{"content": "from collections import namedtuple\n\nTeamAttrs = namedtuple(\n \"TeamAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"captain_id\",\n \"created\",\n ],\n)\n\n\nclass _TeamAttrsWrapper:\n def __getattr__(self, attr):\n from CTFd.utils.user import get_current_team_attrs\n\n attrs = get_current_team_attrs()\n return getattr(attrs, attr, None)\n\n @property\n def place(self):\n from CTFd.utils.user import get_current_team\n\n team = get_current_team()\n if team:\n return team.place\n return None\n\n @property\n def score(self):\n from CTFd.utils.user import get_current_team\n\n team = get_current_team()\n if team:\n return team.score\n return None\n\n\nTeam = _TeamAttrsWrapper()\n", "path": "CTFd/constants/teams.py"}, {"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom CTFd.cache import clear_user_recent_ips\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.countries import get_countries, lookup_country_code\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.humanize.words import pluralize\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import (\n authed,\n get_current_team_attrs,\n get_current_user_attrs,\n get_current_user_recent_ips,\n get_ip,\n is_admin,\n)\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n app.jinja_env.filters[\"pluralize\"] = pluralize\n\n\ndef init_template_globals(app):\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n from CTFd.constants.users import User\n from CTFd.constants.teams import Team\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n )\n\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n app.jinja_env.globals.update(is_admin=is_admin)\n app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)\n app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)\n app.jinja_env.globals.update(get_ip=get_ip)\n app.jinja_env.globals.update(Configs=Configs)\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Forms=Forms)\n app.jinja_env.globals.update(User=User)\n app.jinja_env.globals.update(Team=Team)\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n\n\ndef init_request_processors(app):\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user_ips = get_current_user_recent_ips()\n ip = get_ip()\n\n track = None\n if (ip not in user_ips) or (request.method != \"GET\"):\n track = Tracking.query.filter_by(\n ip=get_ip(), user_id=session[\"id\"]\n ).first()\n\n if track:\n track.date = datetime.datetime.utcnow()\n else:\n track = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(track)\n\n if track:\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n db.session.close()\n logout_user()\n else:\n clear_user_recent_ips(user_id=session[\"id\"])\n\n @app.before_request\n def banned():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user = get_current_user_attrs()\n team = get_current_team_attrs()\n\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\", error=\"You have been banned from this CTF\"\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401)\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}, {"content": "from collections import namedtuple\n\nUserAttrs = namedtuple(\n \"UserAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"type\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"verified\",\n \"team_id\",\n \"created\",\n ],\n)\n\n\nclass _UserAttrsWrapper:\n def __getattr__(self, attr):\n from CTFd.utils.user import get_current_user_attrs\n\n attrs = get_current_user_attrs()\n return getattr(attrs, attr, None)\n\n @property\n def place(self):\n from CTFd.utils.user import get_current_user\n\n user = get_current_user()\n if user:\n return user.place\n return None\n\n @property\n def score(self):\n from CTFd.utils.user import get_current_user\n\n user = get_current_user()\n if user:\n return user.score\n return None\n\n\nUser = _UserAttrsWrapper()\n", "path": "CTFd/constants/users.py"}]}
| 3,494 | 670 |
gh_patches_debug_30899
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1797
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: DTO Factory exclude fields in nested complex types
### Summary
Need to document procedure for excluding fields from nested complex types, e.g.,
```py
class Model1:
a: int
b: str
class Model2:
c: int
d: str
class Model3:
e: Union[Model1, Model2]
config = DTOConfig(exclude={"e.0.a", "e.1.c"})
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/data_transfer_objects/factory/excluding_fields.py`
Content:
```
1 from datetime import datetime
2 from uuid import UUID
3
4 from sqlalchemy import ForeignKey
5 from sqlalchemy.orm import Mapped, mapped_column, relationship
6 from typing_extensions import Annotated
7
8 from litestar import Litestar, post
9 from litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO
10 from litestar.dto.factory import DTOConfig, dto_field
11
12 from .my_lib import Base
13
14
15 class Address(Base):
16 street: Mapped[str]
17 city: Mapped[str]
18 state: Mapped[str]
19 zip: Mapped[str]
20
21
22 class User(Base):
23 name: Mapped[str]
24 password: Mapped[str] = mapped_column(info=dto_field("private"))
25 created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
26 address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
27 address: Mapped[Address] = relationship(info=dto_field("read-only"))
28
29
30 UserDTO = SQLAlchemyDTO[User]
31 config = DTOConfig(exclude={"id", "address.id", "address.street"})
32 ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
33
34
35 @post("/users", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)
36 def create_user(data: User) -> User:
37 data.created_at = datetime.min
38 data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
39 return data
40
41
42 app = Litestar(route_handlers=[create_user])
43
44 # run: /users -H "Content-Type: application/json" -d '{"name":"Litestar User","password":"xyz","created_at":"2023-04-24T00:00:00Z"}'
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py
--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py
+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from typing import List
from uuid import UUID
from sqlalchemy import ForeignKey
@@ -19,16 +20,30 @@
zip: Mapped[str]
+class Pets(Base):
+ name: Mapped[str]
+ user_id: Mapped[UUID] = mapped_column(ForeignKey("user.id"))
+
+
class User(Base):
name: Mapped[str]
password: Mapped[str] = mapped_column(info=dto_field("private"))
created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
address: Mapped[Address] = relationship(info=dto_field("read-only"))
+ pets: Mapped[List[Pets]] = relationship(info=dto_field("read-only"))
UserDTO = SQLAlchemyDTO[User]
-config = DTOConfig(exclude={"id", "address.id", "address.street"})
+config = DTOConfig(
+ exclude={
+ "id",
+ "address.id",
+ "address.street",
+ "pets.0.id",
+ "pets.0.user_id",
+ }
+)
ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
@@ -36,6 +51,7 @@
def create_user(data: User) -> User:
data.created_at = datetime.min
data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
+ data.pets = [Pets(id=1, name="Fido"), Pets(id=2, name="Spot")]
return data
|
{"golden_diff": "diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py\n+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n@@ -1,4 +1,5 @@\n from datetime import datetime\n+from typing import List\n from uuid import UUID\n \n from sqlalchemy import ForeignKey\n@@ -19,16 +20,30 @@\n zip: Mapped[str]\n \n \n+class Pets(Base):\n+ name: Mapped[str]\n+ user_id: Mapped[UUID] = mapped_column(ForeignKey(\"user.id\"))\n+\n+\n class User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n+ pets: Mapped[List[Pets]] = relationship(info=dto_field(\"read-only\"))\n \n \n UserDTO = SQLAlchemyDTO[User]\n-config = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\n+config = DTOConfig(\n+ exclude={\n+ \"id\",\n+ \"address.id\",\n+ \"address.street\",\n+ \"pets.0.id\",\n+ \"pets.0.user_id\",\n+ }\n+)\n ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n \n \n@@ -36,6 +51,7 @@\n def create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n+ data.pets = [Pets(id=1, name=\"Fido\"), Pets(id=2, name=\"Spot\")]\n return data\n", "issue": "Docs: DTO Factory exclude fields in nested complex types\n### Summary\n\nNeed to document procedure for excluding fields from nested complex types, e.g.,\r\n\r\n```py\r\nclass Model1:\r\n a: int\r\n b: str\r\n\r\nclass Model2:\r\n c: int\r\n d: str\r\n\r\nclass Model3:\r\n e: Union[Model1, Model2]\r\n\r\nconfig = DTOConfig(exclude={\"e.0.a\", \"e.1.c\"})\r\n...\r\n```\n", "before_files": [{"content": "from datetime import datetime\nfrom uuid import UUID\n\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\nfrom typing_extensions import Annotated\n\nfrom litestar import Litestar, post\nfrom litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO\nfrom litestar.dto.factory import DTOConfig, dto_field\n\nfrom .my_lib import Base\n\n\nclass Address(Base):\n street: Mapped[str]\n city: Mapped[str]\n state: Mapped[str]\n zip: Mapped[str]\n\n\nclass User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n\n\nUserDTO = SQLAlchemyDTO[User]\nconfig = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\nReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n\n\n@post(\"/users\", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)\ndef create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n return data\n\n\napp = Litestar(route_handlers=[create_user])\n\n# run: /users -H \"Content-Type: application/json\" -d '{\"name\":\"Litestar User\",\"password\":\"xyz\",\"created_at\":\"2023-04-24T00:00:00Z\"}'\n", "path": "docs/examples/data_transfer_objects/factory/excluding_fields.py"}], "after_files": [{"content": "from datetime import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\nfrom typing_extensions import Annotated\n\nfrom litestar import Litestar, post\nfrom litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO\nfrom litestar.dto.factory import DTOConfig, dto_field\n\nfrom .my_lib import Base\n\n\nclass Address(Base):\n street: Mapped[str]\n city: Mapped[str]\n state: Mapped[str]\n zip: Mapped[str]\n\n\nclass Pets(Base):\n name: Mapped[str]\n user_id: Mapped[UUID] = mapped_column(ForeignKey(\"user.id\"))\n\n\nclass User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n pets: Mapped[List[Pets]] = relationship(info=dto_field(\"read-only\"))\n\n\nUserDTO = SQLAlchemyDTO[User]\nconfig = DTOConfig(\n exclude={\n \"id\",\n \"address.id\",\n \"address.street\",\n \"pets.0.id\",\n \"pets.0.user_id\",\n }\n)\nReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n\n\n@post(\"/users\", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)\ndef create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n data.pets = [Pets(id=1, name=\"Fido\"), Pets(id=2, name=\"Spot\")]\n return data\n\n\napp = Litestar(route_handlers=[create_user])\n\n# run: /users -H \"Content-Type: application/json\" -d '{\"name\":\"Litestar User\",\"password\":\"xyz\",\"created_at\":\"2023-04-24T00:00:00Z\"}'\n", "path": "docs/examples/data_transfer_objects/factory/excluding_fields.py"}]}
| 831 | 437 |
gh_patches_debug_4423
|
rasdani/github-patches
|
git_diff
|
elastic__ecs-1483
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove `ignore_above` setting when not indexing field
Certain ECS fields are type `keyword` but are also not:
* searchable (`index: false`)
* used in aggregations, sorting, or scripting (`doc_values: false`)
The ECS tooling's behavior is to set `ignore_above: 1024` for all `keyword` fields.
However, `ignore_above` should not be present when `index: false` and `doc_values: false` are set on a field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/schema/cleaner.py`
Content:
```
1 import copy
2
3 from generators import ecs_helpers
4 from schema import visitor
5
6 # This script performs a few cleanup functions in place, within the deeply nested
7 # 'fields' structure passed to `clean(fields)`.
8 #
9 # What happens here:
10 #
11 # - check that mandatory attributes are present, without which we can't do much.
12 # - cleans things up, like stripping spaces, sorting arrays
13 # - makes lots of defaults explicit
14 # - pre-calculate a few additional helpful fields
15 # - converts shorthands into full representation (e.g. reuse locations)
16 #
17 # This script only deals with field sets themselves and the fields defined
18 # inside them. It doesn't perform field reuse, and therefore doesn't
19 # deal with final field names either.
20
21
22 def clean(fields, strict=False):
23 global strict_mode
24 strict_mode = strict
25 visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
26
27
28 # Schema level cleanup
29
30
31 def schema_cleanup(schema):
32 # Sanity check first
33 schema_mandatory_attributes(schema)
34 # trailing space cleanup
35 ecs_helpers.dict_clean_string_values(schema['schema_details'])
36 ecs_helpers.dict_clean_string_values(schema['field_details'])
37 # Some defaults
38 schema['schema_details'].setdefault('group', 2)
39 schema['schema_details'].setdefault('root', False)
40 schema['field_details'].setdefault('type', 'group')
41 schema['field_details'].setdefault('short', schema['field_details']['description'])
42 if 'reusable' in schema['schema_details']:
43 # order to perform chained reuses. Set to 1 if it needs to happen earlier.
44 schema['schema_details']['reusable'].setdefault('order', 2)
45 # Precalculate stuff. Those can't be set in the YAML.
46 if schema['schema_details']['root']:
47 schema['schema_details']['prefix'] = ''
48 else:
49 schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'
50 normalize_reuse_notation(schema)
51 # Final validity check if in strict mode
52 schema_assertions_and_warnings(schema)
53
54
55 SCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']
56
57
58 def schema_mandatory_attributes(schema):
59 """Ensures for the presence of the mandatory schema attributes and raises if any are missing"""
60 current_schema_attributes = sorted(list(schema['field_details'].keys()) +
61 list(schema['schema_details'].keys()))
62 missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)
63 if len(missing_attributes) > 0:
64 msg = "Schema {} is missing the following mandatory attributes: {}.\nFound these: {}".format(
65 schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)
66 raise ValueError(msg)
67 if 'reusable' in schema['schema_details']:
68 reuse_attributes = sorted(schema['schema_details']['reusable'].keys())
69 missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)
70 if len(missing_reuse_attributes) > 0:
71 msg = "Reusable schema {} is missing the following reuse attributes: {}.\nFound these: {}".format(
72 schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)
73 raise ValueError(msg)
74
75
76 def schema_assertions_and_warnings(schema):
77 """Additional checks on a fleshed out schema"""
78 single_line_short_description(schema, strict=strict_mode)
79 if 'beta' in schema['field_details']:
80 single_line_beta_description(schema, strict=strict_mode)
81 if 'reusable' in schema['schema_details']:
82 single_line_short_override_description(schema, strict=strict_mode)
83
84
85 def normalize_reuse_notation(schema):
86 """
87 Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.
88
89 When marking "user" as reusable under "destination" with the shorthand entry
90 `- destination`, this is expanded to the complete entry
91 `- { "at": "destination", "as": "user" }`.
92 The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.
93
94 The dictionary notation enables nesting a field set as a different name.
95 An example is nesting "process" fields to capture parent process details
96 at `process.parent.*`.
97 The dictionary notation `- { "at": "process", "as": "parent" }` will yield
98 fields such as `process.parent.pid`.
99 """
100 if 'reusable' not in schema['schema_details']:
101 return
102 schema_name = schema['field_details']['name']
103 reuse_entries = []
104 for reuse_entry in schema['schema_details']['reusable']['expected']:
105 if type(reuse_entry) is dict: # Already explicit
106 if 'at' in reuse_entry and 'as' in reuse_entry:
107 explicit_entry = reuse_entry
108 else:
109 raise ValueError("When specifying reusable expected locations for {} " +
110 "with the dictionary notation, keys 'as' and 'at' are required. " +
111 "Got {}.".format(schema_name, reuse_entry))
112 else: # Make it explicit
113 explicit_entry = {'at': reuse_entry, 'as': schema_name}
114 explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']
115 reuse_entries.append(explicit_entry)
116 schema['schema_details']['reusable']['expected'] = reuse_entries
117
118
119 # Field level cleanup
120
121
122 def field_cleanup(field):
123 field_mandatory_attributes(field)
124 if ecs_helpers.is_intermediate(field):
125 return
126 ecs_helpers.dict_clean_string_values(field['field_details'])
127 if 'allowed_values' in field['field_details']:
128 for allowed_value in field['field_details']['allowed_values']:
129 ecs_helpers.dict_clean_string_values(allowed_value)
130 field_defaults(field)
131 field_assertions_and_warnings(field)
132
133
134 def field_defaults(field):
135 field['field_details'].setdefault('short', field['field_details']['description'])
136 field['field_details'].setdefault('normalize', [])
137 field_or_multi_field_datatype_defaults(field['field_details'])
138 if 'multi_fields' in field['field_details']:
139 for mf in field['field_details']['multi_fields']:
140 field_or_multi_field_datatype_defaults(mf)
141 if 'name' not in mf:
142 mf['name'] = mf['type']
143
144
145 def field_or_multi_field_datatype_defaults(field_details):
146 """Sets datatype-related defaults on a canonical field or multi-field entries."""
147 if field_details['type'] == 'keyword':
148 field_details.setdefault('ignore_above', 1024)
149 if field_details['type'] == 'text':
150 field_details.setdefault('norms', False)
151 # wildcard needs the index param stripped
152 if field_details['type'] == 'wildcard':
153 field_details.pop('index', None)
154 if 'index' in field_details and not field_details['index']:
155 field_details.setdefault('doc_values', False)
156
157
158 FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']
159 ACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']
160
161
162 def field_mandatory_attributes(field):
163 """Ensures for the presence of the mandatory field attributes and raises if any are missing"""
164 if ecs_helpers.is_intermediate(field):
165 return
166 current_field_attributes = sorted(field['field_details'].keys())
167 missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)
168
169 # `alias` fields require a target `path` attribute.
170 if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:
171 missing_attributes.append('path')
172 # `scaled_float` fields require a `scaling_factor` attribute.
173 if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:
174 missing_attributes.append('scaling_factor')
175
176 if len(missing_attributes) > 0:
177 msg = "Field is missing the following mandatory attributes: {}.\nFound these: {}.\nField details: {}"
178 raise ValueError(msg.format(', '.join(missing_attributes),
179 current_field_attributes, field))
180
181
182 def field_assertions_and_warnings(field):
183 """Additional checks on a fleshed out field"""
184 if not ecs_helpers.is_intermediate(field):
185 # check short description length if in strict mode
186 single_line_short_description(field, strict=strict_mode)
187 check_example_value(field, strict=strict_mode)
188 if 'beta' in field['field_details']:
189 single_line_beta_description(field, strict=strict_mode)
190 if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:
191 msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format(
192 field['field_details']['name'], field['field_details']['level'],
193 ACCEPTABLE_FIELD_LEVELS)
194 raise ValueError(msg)
195
196 # Common
197
198
199 SHORT_LIMIT = 120
200
201
202 def single_line_short_check(short_to_check, short_name):
203 short_length = len(short_to_check)
204 if "\n" in short_to_check or short_length > SHORT_LIMIT:
205 msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format(
206 SHORT_LIMIT, short_length)
207 msg += "Offending field or field set: {}\nShort description:\n {}".format(
208 short_name,
209 short_to_check)
210 return msg
211 return None
212
213
214 def single_line_short_description(schema_or_field, strict=True):
215 error = single_line_short_check(schema_or_field['field_details']['short'], schema_or_field['field_details']['name'])
216 if error:
217 if strict:
218 raise ValueError(error)
219 else:
220 ecs_helpers.strict_warning(error)
221
222
223 def single_line_short_override_description(schema_or_field, strict=True):
224 for field in schema_or_field['schema_details']['reusable']['expected']:
225 if not 'short_override' in field:
226 continue
227 error = single_line_short_check(field['short_override'], field['full'])
228 if error:
229 if strict:
230 raise ValueError(error)
231 else:
232 ecs_helpers.strict_warning(error)
233
234
235 def check_example_value(field, strict=True):
236 """
237 Checks if value of the example field is of type list or dict.
238 Fails or warns (depending on strict mode) if so.
239 """
240 example_value = field['field_details'].get('example', None)
241 if isinstance(example_value, (list, dict)):
242 name = field['field_details']['name']
243 msg = f"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation."
244 if strict:
245 raise ValueError(msg)
246 else:
247 ecs_helpers.strict_warning(msg)
248
249
250 def single_line_beta_description(schema_or_field, strict=True):
251 if "\n" in schema_or_field['field_details']['beta']:
252 msg = "Beta descriptions must be single line.\n"
253 msg += f"Offending field or field set: {schema_or_field['field_details']['name']}"
254 if strict:
255 raise ValueError(msg)
256 else:
257 ecs_helpers.strict_warning(msg)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py
--- a/scripts/schema/cleaner.py
+++ b/scripts/schema/cleaner.py
@@ -153,6 +153,7 @@
field_details.pop('index', None)
if 'index' in field_details and not field_details['index']:
field_details.setdefault('doc_values', False)
+ field_details.pop('ignore_above', None)
FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']
|
{"golden_diff": "diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py\n--- a/scripts/schema/cleaner.py\n+++ b/scripts/schema/cleaner.py\n@@ -153,6 +153,7 @@\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n+ field_details.pop('ignore_above', None)\n \n \n FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\n", "issue": "Remove `ignore_above` setting when not indexing field\nCertain ECS fields are type `keyword` but are also not:\r\n\r\n* searchable (`index: false`)\r\n* used in aggregations, sorting, or scripting (`doc_values: false`)\r\n\r\nThe ECS tooling's behavior is to set `ignore_above: 1024` for all `keyword` fields.\r\n\r\nHowever, `ignore_above` should not be present when `index: false` and `doc_values: false` are set on a field.\r\n\n", "before_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n \"\"\"Ensures for the presence of the mandatory schema attributes and raises if any are missing\"\"\"\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n \"\"\"Additional checks on a fleshed out schema\"\"\"\n single_line_short_description(schema, strict=strict_mode)\n if 'beta' in schema['field_details']:\n single_line_beta_description(schema, strict=strict_mode)\n if 'reusable' in schema['schema_details']:\n single_line_short_override_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n \"\"\"Sets datatype-related defaults on a canonical field or multi-field entries.\"\"\"\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n # wildcard needs the index param stripped\n if field_details['type'] == 'wildcard':\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n \"\"\"Ensures for the presence of the mandatory field attributes and raises if any are missing\"\"\"\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n \"\"\"Additional checks on a fleshed out field\"\"\"\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if 'beta' in field['field_details']:\n single_line_beta_description(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_check(short_to_check, short_name):\n short_length = len(short_to_check)\n if \"\\n\" in short_to_check or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n short_name,\n short_to_check)\n return msg\n return None\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n error = single_line_short_check(schema_or_field['field_details']['short'], schema_or_field['field_details']['name'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef single_line_short_override_description(schema_or_field, strict=True):\n for field in schema_or_field['schema_details']['reusable']['expected']:\n if not 'short_override' in field:\n continue\n error = single_line_short_check(field['short_override'], field['full'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef single_line_beta_description(schema_or_field, strict=True):\n if \"\\n\" in schema_or_field['field_details']['beta']:\n msg = \"Beta descriptions must be single line.\\n\"\n msg += f\"Offending field or field set: {schema_or_field['field_details']['name']}\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}], "after_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n \"\"\"Ensures for the presence of the mandatory schema attributes and raises if any are missing\"\"\"\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n \"\"\"Additional checks on a fleshed out schema\"\"\"\n single_line_short_description(schema, strict=strict_mode)\n if 'beta' in schema['field_details']:\n single_line_beta_description(schema, strict=strict_mode)\n if 'reusable' in schema['schema_details']:\n single_line_short_override_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n \"\"\"Sets datatype-related defaults on a canonical field or multi-field entries.\"\"\"\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n # wildcard needs the index param stripped\n if field_details['type'] == 'wildcard':\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n field_details.pop('ignore_above', None)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n \"\"\"Ensures for the presence of the mandatory field attributes and raises if any are missing\"\"\"\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n \"\"\"Additional checks on a fleshed out field\"\"\"\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if 'beta' in field['field_details']:\n single_line_beta_description(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_check(short_to_check, short_name):\n short_length = len(short_to_check)\n if \"\\n\" in short_to_check or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n short_name,\n short_to_check)\n return msg\n return None\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n error = single_line_short_check(schema_or_field['field_details']['short'], schema_or_field['field_details']['name'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef single_line_short_override_description(schema_or_field, strict=True):\n for field in schema_or_field['schema_details']['reusable']['expected']:\n if not 'short_override' in field:\n continue\n error = single_line_short_check(field['short_override'], field['full'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef single_line_beta_description(schema_or_field, strict=True):\n if \"\\n\" in schema_or_field['field_details']['beta']:\n msg = \"Beta descriptions must be single line.\\n\"\n msg += f\"Offending field or field set: {schema_or_field['field_details']['name']}\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}]}
| 3,414 | 114 |
gh_patches_debug_34308
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__aimmo-444
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix aimmo view tests
The new PR on branch `fix_staging` is breaking some tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/views.py`
Content:
```
1 import cPickle as pickle
2 import logging
3 import os
4
5 from django.contrib.auth.decorators import login_required
6 from django.core.exceptions import ValidationError
7 from django.core.urlresolvers import reverse
8 from django.http import HttpResponse, Http404
9 from django.http import JsonResponse
10 from django.shortcuts import redirect, render, get_object_or_404
11 from django.views.decorators.csrf import csrf_exempt
12 from django.views.decorators.http import require_http_methods
13 from django.views.generic import TemplateView
14
15 from models import Avatar, Game, LevelAttempt
16 from players import forms
17 from . import app_settings
18
19 LOGGER = logging.getLogger(__name__)
20
21
22 def _post_code_success_response(message):
23 return _create_response("SUCCESS", message)
24
25
26 def _create_response(status, message):
27 response = {
28 "status": status,
29 "message": message
30 }
31 return JsonResponse(response)
32
33
34 @login_required
35 def code(request, id):
36 game = get_object_or_404(Game, id=id)
37 if not game.can_user_play(request.user):
38 raise Http404
39 try:
40 avatar = game.avatar_set.get(owner=request.user)
41 except Avatar.DoesNotExist:
42 initial_code_file_name = os.path.join(
43 os.path.abspath(os.path.dirname(__file__)),
44 'avatar_examples/dumb_avatar.py',
45 )
46 with open(initial_code_file_name) as initial_code_file:
47 initial_code = initial_code_file.read()
48 avatar = Avatar.objects.create(owner=request.user, code=initial_code,
49 game_id=id)
50 if request.method == 'POST':
51 avatar.code = request.POST['code']
52 avatar.save()
53 return _post_code_success_response('Your code was saved!<br><br><a href="%s">Watch</a>' % reverse('aimmo/watch', kwargs={'id': game.id}))
54 else:
55 return HttpResponse(avatar.code)
56
57
58 def list_games(request):
59 response = {
60 game.pk:
61 {
62 'name': game.name,
63 'settings': pickle.dumps(game.settings_as_dict()),
64 } for game in Game.objects.exclude_inactive()
65 }
66 return JsonResponse(response)
67
68
69 def get_game(request, id):
70 game = get_object_or_404(Game, id=id)
71 response = {
72 'main': {
73 'parameters': [],
74 'main_avatar': None,
75 'users': [],
76 }
77 }
78 for avatar in game.avatar_set.all():
79 if avatar.owner_id == game.main_user_id:
80 response['main']['main_avatar'] = avatar.owner_id
81 response['main']['users'].append({
82 'id': avatar.owner_id,
83 'code': avatar.code,
84 })
85 return JsonResponse(response)
86
87
88 @csrf_exempt
89 @require_http_methods(['POST'])
90 def mark_game_complete(request, id):
91 game = get_object_or_404(Game, id=id)
92 game.completed = True
93 game.static_data = request.body
94 game.save()
95 return HttpResponse('Done!')
96
97
98 class ProgramView(TemplateView):
99 template_name = 'players/program.html'
100
101 def get_context_data(self, **kwargs):
102 context = super(ProgramView, self).get_context_data(**kwargs)
103 game = get_object_or_404(Game, id=self.kwargs['id'])
104 if not game.can_user_play(self.request.user):
105 raise Http404
106 context['game_id'] = int(self.kwargs['id'])
107 return context
108
109
110 def program_level(request, num):
111 try:
112 game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)
113 except Game.DoesNotExist:
114 LOGGER.debug('Adding level')
115 game = _add_and_return_level(num, request.user)
116 LOGGER.debug('Programming game with id %s', game.id)
117 return render(request, 'players/program.html', {'game_id': game.id})
118
119
120 def _render_game(request, game):
121 context = {
122 'current_user_player_key': request.user.pk,
123 'active': game.is_active,
124 'static_data': game.static_data or '{}',
125 }
126 context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)
127 context['game_id'] = game.id
128 return render(request, 'players/viewer.html', context)
129
130
131 def watch_game(request, id):
132 game = get_object_or_404(Game, id=id)
133 if not game.can_user_play(request.user):
134 raise Http404
135 return _render_game(request, game)
136
137
138 def watch_level(request, num):
139 try:
140 game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)
141 except Game.DoesNotExist:
142 LOGGER.debug('Adding level')
143 game = _add_and_return_level(num, request.user)
144 LOGGER.debug('Displaying game with id %s', game.id)
145 return _render_game(request, game)
146
147
148 def _add_and_return_level(num, user):
149 game = Game(generator='Level'+num, name='Level '+num, public=False, main_user=user)
150 try:
151 game.save()
152 except ValidationError as e:
153 LOGGER.warn(e)
154 raise Http404
155 game.can_play = [user]
156 game.save()
157 level_attempt = LevelAttempt(game=game, user=user, level_number=num)
158 level_attempt.save()
159 return game
160
161
162 @login_required
163 def add_game(request):
164 if request.method == 'POST':
165 form = forms.AddGameForm(request.POST)
166 if form.is_valid():
167 game = form.save(commit=False)
168 game.generator = 'Main'
169 game.owner = request.user
170 game.save()
171 return redirect('aimmo/program', id=game.id)
172 else:
173 form = forms.AddGameForm()
174 return render(request, 'players/add_game.html', {'form': form})
175
```
Path: `players/app_settings.py`
Content:
```
1 from django.conf import settings
2
3 #: URL function for locating the game server, takes one parameter `game`
4 GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)
5
6 MAX_LEVEL = 1
7
```
Path: `example_project/example_project/settings.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 """Django settings for example_project project."""
38 import subprocess
39
40 import os
41
42 ALLOWED_HOSTS = ['*']
43
44 DEBUG = True
45
46 DATABASES = {
47 'default': {
48 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',
49 # 'sqlite3' or 'oracle'.
50 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'),
51 # Or path to database file if using sqlite3.
52 }
53 }
54
55 USE_I18N = True
56 USE_L10N = True
57
58 TIME_ZONE = 'Europe/London'
59 LANGUAGE_CODE = 'en-gb'
60 STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
61 STATIC_URL = '/static/'
62 SECRET_KEY = 'not-a-secret'
63
64 ROOT_URLCONF = 'django_autoconfig.autourlconf'
65
66 WSGI_APPLICATION = 'example_project.wsgi.application'
67
68 INSTALLED_APPS = (
69 'django.contrib.admin',
70 'django.contrib.auth',
71 'django.contrib.contenttypes',
72 'players',
73 'django_forms_bootstrap',
74 )
75
76 LOGGING = {
77 'version': 1,
78 'disable_existing_loggers': False,
79 'filters': {
80 'require_debug_false': {
81 '()': 'django.utils.log.RequireDebugFalse'
82 }
83 },
84 'handlers': {
85 'console': {
86 'level': 'DEBUG',
87 'class': 'logging.StreamHandler'
88 },
89 },
90 'loggers': {
91 'views': {
92 'handlers': ['console'],
93 'level': 'DEBUG'
94 },
95 }
96 }
97
98 LOGIN_URL = '/players/accounts/login/'
99
100 LOGIN_REDIRECT_URL = '/players/'
101
102 MIDDLEWARE_CLASSES = [
103 'django.contrib.sessions.middleware.SessionMiddleware',
104 'django.middleware.locale.LocaleMiddleware',
105 'django.middleware.common.CommonMiddleware',
106 'django.middleware.csrf.CsrfViewMiddleware',
107 'django.contrib.auth.middleware.AuthenticationMiddleware',
108 'django.contrib.messages.middleware.MessageMiddleware',
109 ]
110
111
112 def get_url(game):
113 if os.environ.get('AIMMO_MODE', '') == 'minikube':
114 output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',
115 'game-%s' % game, '--url'])
116 return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game
117 else:
118 return 'http://localhost:%d' % (6001 + int(game) * 1000), ''
119
120
121 AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url
122
123 try:
124 from example_project.local_settings import * # pylint: disable=E0611
125 except ImportError:
126 pass
127
128 from django_autoconfig import autoconfig
129 autoconfig.configure_settings(globals())
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py
--- a/example_project/example_project/settings.py
+++ b/example_project/example_project/settings.py
@@ -109,16 +109,25 @@
]
-def get_url(game):
+def get_game_url_base_and_path(game):
if os.environ.get('AIMMO_MODE', '') == 'minikube':
output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',
'game-%s' % game, '--url'])
- return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game
+ return 'local.aimmo.codeforlife.education', '/game-%s' % game
else:
- return 'http://localhost:%d' % (6001 + int(game) * 1000), ''
+ return 'localhost', ''
-AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url
+def get_game_port(game):
+ if os.environ.get('AIMMO_MODE', '') == 'minikube':
+ return 0
+
+ return 6001 + int(game) * 1000
+
+
+AIMMO_GAME_SERVER_URL_FUNCTION = get_game_url_base_and_path
+AIMMO_GAME_SERVER_PORT_FUNCTION = get_game_port
+AIMMO_GAME_SERVER_SSL_FLAG = False
try:
from example_project.local_settings import * # pylint: disable=E0611
diff --git a/players/app_settings.py b/players/app_settings.py
--- a/players/app_settings.py
+++ b/players/app_settings.py
@@ -1,6 +1,8 @@
from django.conf import settings
#: URL function for locating the game server, takes one parameter `game`
-GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)
+GAME_SERVER_URL_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_URL_FUNCTION', None)
+GAME_SERVER_PORT_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_PORT_FUNCTION', None)
+GAME_SERVER_SSL_FLAG = getattr(settings, 'AIMMO_GAME_SERVER_SSL_FLAG', False)
MAX_LEVEL = 1
diff --git a/players/views.py b/players/views.py
--- a/players/views.py
+++ b/players/views.py
@@ -123,7 +123,9 @@
'active': game.is_active,
'static_data': game.static_data or '{}',
}
- context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)
+ context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_URL_FUNCTION(game.id)
+ context['game_url_port'] = app_settings.GAME_SERVER_PORT_FUNCTION(game.id)
+ context['game_ssl_flag'] = app_settings.GAME_SERVER_SSL_FLAG
context['game_id'] = game.id
return render(request, 'players/viewer.html', context)
|
{"golden_diff": "diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py\n--- a/example_project/example_project/settings.py\n+++ b/example_project/example_project/settings.py\n@@ -109,16 +109,25 @@\n ]\n \n \n-def get_url(game):\n+def get_game_url_base_and_path(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',\n 'game-%s' % game, '--url'])\n- return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game\n+ return 'local.aimmo.codeforlife.education', '/game-%s' % game\n else:\n- return 'http://localhost:%d' % (6001 + int(game) * 1000), ''\n+ return 'localhost', ''\n \n \n-AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n+def get_game_port(game):\n+ if os.environ.get('AIMMO_MODE', '') == 'minikube':\n+ return 0\n+\n+ return 6001 + int(game) * 1000\n+\n+\n+AIMMO_GAME_SERVER_URL_FUNCTION = get_game_url_base_and_path\n+AIMMO_GAME_SERVER_PORT_FUNCTION = get_game_port\n+AIMMO_GAME_SERVER_SSL_FLAG = False\n \n try:\n from example_project.local_settings import * # pylint: disable=E0611\ndiff --git a/players/app_settings.py b/players/app_settings.py\n--- a/players/app_settings.py\n+++ b/players/app_settings.py\n@@ -1,6 +1,8 @@\n from django.conf import settings\n \n #: URL function for locating the game server, takes one parameter `game`\n-GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)\n+GAME_SERVER_URL_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_URL_FUNCTION', None)\n+GAME_SERVER_PORT_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_PORT_FUNCTION', None)\n+GAME_SERVER_SSL_FLAG = getattr(settings, 'AIMMO_GAME_SERVER_SSL_FLAG', False)\n \n MAX_LEVEL = 1\ndiff --git a/players/views.py b/players/views.py\n--- a/players/views.py\n+++ b/players/views.py\n@@ -123,7 +123,9 @@\n 'active': game.is_active,\n 'static_data': game.static_data or '{}',\n }\n- context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)\n+ context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_URL_FUNCTION(game.id)\n+ context['game_url_port'] = app_settings.GAME_SERVER_PORT_FUNCTION(game.id)\n+ context['game_ssl_flag'] = app_settings.GAME_SERVER_SSL_FLAG\n context['game_id'] = game.id\n return render(request, 'players/viewer.html', context)\n", "issue": "Fix aimmo view tests \nThe new PR on branch `fix_staging` is breaking some tests.\n", "before_files": [{"content": "import cPickle as pickle\nimport logging\nimport os\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic import TemplateView\n\nfrom models import Avatar, Game, LevelAttempt\nfrom players import forms\nfrom . import app_settings\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _post_code_success_response(message):\n return _create_response(\"SUCCESS\", message)\n\n\ndef _create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n try:\n avatar = game.avatar_set.get(owner=request.user)\n except Avatar.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n avatar = Avatar.objects.create(owner=request.user, code=initial_code,\n game_id=id)\n if request.method == 'POST':\n avatar.code = request.POST['code']\n avatar.save()\n return _post_code_success_response('Your code was saved!<br><br><a href=\"%s\">Watch</a>' % reverse('aimmo/watch', kwargs={'id': game.id}))\n else:\n return HttpResponse(avatar.code)\n\n\ndef list_games(request):\n response = {\n game.pk:\n {\n 'name': game.name,\n 'settings': pickle.dumps(game.settings_as_dict()),\n } for game in Game.objects.exclude_inactive()\n }\n return JsonResponse(response)\n\n\ndef get_game(request, id):\n game = get_object_or_404(Game, id=id)\n response = {\n 'main': {\n 'parameters': [],\n 'main_avatar': None,\n 'users': [],\n }\n }\n for avatar in game.avatar_set.all():\n if avatar.owner_id == game.main_user_id:\n response['main']['main_avatar'] = avatar.owner_id\n response['main']['users'].append({\n 'id': avatar.owner_id,\n 'code': avatar.code,\n })\n return JsonResponse(response)\n\n\n@csrf_exempt\n@require_http_methods(['POST'])\ndef mark_game_complete(request, id):\n game = get_object_or_404(Game, id=id)\n game.completed = True\n game.static_data = request.body\n game.save()\n return HttpResponse('Done!')\n\n\nclass ProgramView(TemplateView):\n template_name = 'players/program.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProgramView, self).get_context_data(**kwargs)\n game = get_object_or_404(Game, id=self.kwargs['id'])\n if not game.can_user_play(self.request.user):\n raise Http404\n context['game_id'] = int(self.kwargs['id'])\n return context\n\n\ndef program_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Programming game with id %s', game.id)\n return render(request, 'players/program.html', {'game_id': game.id})\n\n\ndef _render_game(request, game):\n context = {\n 'current_user_player_key': request.user.pk,\n 'active': game.is_active,\n 'static_data': game.static_data or '{}',\n }\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)\n context['game_id'] = game.id\n return render(request, 'players/viewer.html', context)\n\n\ndef watch_game(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n return _render_game(request, game)\n\n\ndef watch_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Displaying game with id %s', game.id)\n return _render_game(request, game)\n\n\ndef _add_and_return_level(num, user):\n game = Game(generator='Level'+num, name='Level '+num, public=False, main_user=user)\n try:\n game.save()\n except ValidationError as e:\n LOGGER.warn(e)\n raise Http404\n game.can_play = [user]\n game.save()\n level_attempt = LevelAttempt(game=game, user=user, level_number=num)\n level_attempt.save()\n return game\n\n\n@login_required\ndef add_game(request):\n if request.method == 'POST':\n form = forms.AddGameForm(request.POST)\n if form.is_valid():\n game = form.save(commit=False)\n game.generator = 'Main'\n game.owner = request.user\n game.save()\n return redirect('aimmo/program', id=game.id)\n else:\n form = forms.AddGameForm()\n return render(request, 'players/add_game.html', {'form': form})\n", "path": "players/views.py"}, {"content": "from django.conf import settings\n\n#: URL function for locating the game server, takes one parameter `game`\nGAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)\n\nMAX_LEVEL = 1\n", "path": "players/app_settings.py"}, {"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n\"\"\"Django settings for example_project project.\"\"\"\nimport subprocess\n\nimport os\n\nALLOWED_HOSTS = ['*']\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',\n # 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'),\n # Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'players',\n 'django_forms_bootstrap',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\nLOGIN_URL = '/players/accounts/login/'\n\nLOGIN_REDIRECT_URL = '/players/'\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n]\n\n\ndef get_url(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',\n 'game-%s' % game, '--url'])\n return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game\n else:\n return 'http://localhost:%d' % (6001 + int(game) * 1000), ''\n\n\nAIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}], "after_files": [{"content": "import cPickle as pickle\nimport logging\nimport os\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic import TemplateView\n\nfrom models import Avatar, Game, LevelAttempt\nfrom players import forms\nfrom . import app_settings\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _post_code_success_response(message):\n return _create_response(\"SUCCESS\", message)\n\n\ndef _create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n try:\n avatar = game.avatar_set.get(owner=request.user)\n except Avatar.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n avatar = Avatar.objects.create(owner=request.user, code=initial_code,\n game_id=id)\n if request.method == 'POST':\n avatar.code = request.POST['code']\n avatar.save()\n return _post_code_success_response('Your code was saved!<br><br><a href=\"%s\">Watch</a>' % reverse('aimmo/watch', kwargs={'id': game.id}))\n else:\n return HttpResponse(avatar.code)\n\n\ndef list_games(request):\n response = {\n game.pk:\n {\n 'name': game.name,\n 'settings': pickle.dumps(game.settings_as_dict()),\n } for game in Game.objects.exclude_inactive()\n }\n return JsonResponse(response)\n\n\ndef get_game(request, id):\n game = get_object_or_404(Game, id=id)\n response = {\n 'main': {\n 'parameters': [],\n 'main_avatar': None,\n 'users': [],\n }\n }\n for avatar in game.avatar_set.all():\n if avatar.owner_id == game.main_user_id:\n response['main']['main_avatar'] = avatar.owner_id\n response['main']['users'].append({\n 'id': avatar.owner_id,\n 'code': avatar.code,\n })\n return JsonResponse(response)\n\n\n@csrf_exempt\n@require_http_methods(['POST'])\ndef mark_game_complete(request, id):\n game = get_object_or_404(Game, id=id)\n game.completed = True\n game.static_data = request.body\n game.save()\n return HttpResponse('Done!')\n\n\nclass ProgramView(TemplateView):\n template_name = 'players/program.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProgramView, self).get_context_data(**kwargs)\n game = get_object_or_404(Game, id=self.kwargs['id'])\n if not game.can_user_play(self.request.user):\n raise Http404\n context['game_id'] = int(self.kwargs['id'])\n return context\n\n\ndef program_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Programming game with id %s', game.id)\n return render(request, 'players/program.html', {'game_id': game.id})\n\n\ndef _render_game(request, game):\n context = {\n 'current_user_player_key': request.user.pk,\n 'active': game.is_active,\n 'static_data': game.static_data or '{}',\n }\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_URL_FUNCTION(game.id)\n context['game_url_port'] = app_settings.GAME_SERVER_PORT_FUNCTION(game.id)\n context['game_ssl_flag'] = app_settings.GAME_SERVER_SSL_FLAG\n context['game_id'] = game.id\n return render(request, 'players/viewer.html', context)\n\n\ndef watch_game(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n return _render_game(request, game)\n\n\ndef watch_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Displaying game with id %s', game.id)\n return _render_game(request, game)\n\n\ndef _add_and_return_level(num, user):\n game = Game(generator='Level'+num, name='Level '+num, public=False, main_user=user)\n try:\n game.save()\n except ValidationError as e:\n LOGGER.warn(e)\n raise Http404\n game.can_play = [user]\n game.save()\n level_attempt = LevelAttempt(game=game, user=user, level_number=num)\n level_attempt.save()\n return game\n\n\n@login_required\ndef add_game(request):\n if request.method == 'POST':\n form = forms.AddGameForm(request.POST)\n if form.is_valid():\n game = form.save(commit=False)\n game.generator = 'Main'\n game.owner = request.user\n game.save()\n return redirect('aimmo/program', id=game.id)\n else:\n form = forms.AddGameForm()\n return render(request, 'players/add_game.html', {'form': form})\n", "path": "players/views.py"}, {"content": "from django.conf import settings\n\n#: URL function for locating the game server, takes one parameter `game`\nGAME_SERVER_URL_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_URL_FUNCTION', None)\nGAME_SERVER_PORT_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_PORT_FUNCTION', None)\nGAME_SERVER_SSL_FLAG = getattr(settings, 'AIMMO_GAME_SERVER_SSL_FLAG', False)\n\nMAX_LEVEL = 1\n", "path": "players/app_settings.py"}, {"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n\"\"\"Django settings for example_project project.\"\"\"\nimport subprocess\n\nimport os\n\nALLOWED_HOSTS = ['*']\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',\n # 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'),\n # Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'players',\n 'django_forms_bootstrap',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\nLOGIN_URL = '/players/accounts/login/'\n\nLOGIN_REDIRECT_URL = '/players/'\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n]\n\n\ndef get_game_url_base_and_path(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',\n 'game-%s' % game, '--url'])\n return 'local.aimmo.codeforlife.education', '/game-%s' % game\n else:\n return 'localhost', ''\n\n\ndef get_game_port(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n return 0\n\n return 6001 + int(game) * 1000\n\n\nAIMMO_GAME_SERVER_URL_FUNCTION = get_game_url_base_and_path\nAIMMO_GAME_SERVER_PORT_FUNCTION = get_game_port\nAIMMO_GAME_SERVER_SSL_FLAG = False\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}]}
| 3,296 | 643 |
gh_patches_debug_36750
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-3761
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add materialize and materialize-incremental rest endpoints
**Is your feature request related to a problem? Please describe.**
In multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.
**Describe the solution you'd like**
adding two endpoints to Python server
**Additional context**
Also we can implement to go server too
Add materialize and materialize-incremental rest endpoints
**Is your feature request related to a problem? Please describe.**
In multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.
**Describe the solution you'd like**
adding two endpoints to Python server
**Additional context**
Also we can implement to go server too
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/feature_server.py`
Content:
```
1 import json
2 import traceback
3 import warnings
4
5 import gunicorn.app.base
6 import pandas as pd
7 from fastapi import FastAPI, HTTPException, Request, Response, status
8 from fastapi.logger import logger
9 from fastapi.params import Depends
10 from google.protobuf.json_format import MessageToDict, Parse
11 from pydantic import BaseModel
12
13 import feast
14 from feast import proto_json
15 from feast.data_source import PushMode
16 from feast.errors import PushSourceNotFoundException
17 from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest
18
19
20 # TODO: deprecate this in favor of push features
21 class WriteToFeatureStoreRequest(BaseModel):
22 feature_view_name: str
23 df: dict
24 allow_registry_cache: bool = True
25
26
27 class PushFeaturesRequest(BaseModel):
28 push_source_name: str
29 df: dict
30 allow_registry_cache: bool = True
31 to: str = "online"
32
33
34 def get_app(store: "feast.FeatureStore"):
35 proto_json.patch()
36
37 app = FastAPI()
38
39 async def get_body(request: Request):
40 return await request.body()
41
42 @app.post("/get-online-features")
43 def get_online_features(body=Depends(get_body)):
44 try:
45 # Validate and parse the request data into GetOnlineFeaturesRequest Protobuf object
46 request_proto = GetOnlineFeaturesRequest()
47 Parse(body, request_proto)
48
49 # Initialize parameters for FeatureStore.get_online_features(...) call
50 if request_proto.HasField("feature_service"):
51 features = store.get_feature_service(
52 request_proto.feature_service, allow_cache=True
53 )
54 else:
55 features = list(request_proto.features.val)
56
57 full_feature_names = request_proto.full_feature_names
58
59 batch_sizes = [len(v.val) for v in request_proto.entities.values()]
60 num_entities = batch_sizes[0]
61 if any(batch_size != num_entities for batch_size in batch_sizes):
62 raise HTTPException(status_code=500, detail="Uneven number of columns")
63
64 response_proto = store._get_online_features(
65 features=features,
66 entity_values=request_proto.entities,
67 full_feature_names=full_feature_names,
68 native_entity_values=False,
69 ).proto
70
71 # Convert the Protobuf object to JSON and return it
72 return MessageToDict( # type: ignore
73 response_proto, preserving_proto_field_name=True, float_precision=18
74 )
75 except Exception as e:
76 # Print the original exception on the server side
77 logger.exception(traceback.format_exc())
78 # Raise HTTPException to return the error message to the client
79 raise HTTPException(status_code=500, detail=str(e))
80
81 @app.post("/push")
82 def push(body=Depends(get_body)):
83 try:
84 request = PushFeaturesRequest(**json.loads(body))
85 df = pd.DataFrame(request.df)
86 if request.to == "offline":
87 to = PushMode.OFFLINE
88 elif request.to == "online":
89 to = PushMode.ONLINE
90 elif request.to == "online_and_offline":
91 to = PushMode.ONLINE_AND_OFFLINE
92 else:
93 raise ValueError(
94 f"{request.to} is not a supported push format. Please specify one of these ['online', 'offline', 'online_and_offline']."
95 )
96 store.push(
97 push_source_name=request.push_source_name,
98 df=df,
99 allow_registry_cache=request.allow_registry_cache,
100 to=to,
101 )
102 except PushSourceNotFoundException as e:
103 # Print the original exception on the server side
104 logger.exception(traceback.format_exc())
105 # Raise HTTPException to return the error message to the client
106 raise HTTPException(status_code=422, detail=str(e))
107 except Exception as e:
108 # Print the original exception on the server side
109 logger.exception(traceback.format_exc())
110 # Raise HTTPException to return the error message to the client
111 raise HTTPException(status_code=500, detail=str(e))
112
113 @app.post("/write-to-online-store")
114 def write_to_online_store(body=Depends(get_body)):
115 warnings.warn(
116 "write_to_online_store is deprecated. Please consider using /push instead",
117 RuntimeWarning,
118 )
119 try:
120 request = WriteToFeatureStoreRequest(**json.loads(body))
121 df = pd.DataFrame(request.df)
122 store.write_to_online_store(
123 feature_view_name=request.feature_view_name,
124 df=df,
125 allow_registry_cache=request.allow_registry_cache,
126 )
127 except Exception as e:
128 # Print the original exception on the server side
129 logger.exception(traceback.format_exc())
130 # Raise HTTPException to return the error message to the client
131 raise HTTPException(status_code=500, detail=str(e))
132
133 @app.get("/health")
134 def health():
135 return Response(status_code=status.HTTP_200_OK)
136
137 return app
138
139
140 class FeastServeApplication(gunicorn.app.base.BaseApplication):
141 def __init__(self, store: "feast.FeatureStore", **options):
142 self._app = get_app(store=store)
143 self._options = options
144 super().__init__()
145
146 def load_config(self):
147 for key, value in self._options.items():
148 if key.lower() in self.cfg.settings and value is not None:
149 self.cfg.set(key.lower(), value)
150
151 self.cfg.set("worker_class", "uvicorn.workers.UvicornWorker")
152
153 def load(self):
154 return self._app
155
156
157 def start_server(
158 store: "feast.FeatureStore",
159 host: str,
160 port: int,
161 no_access_log: bool,
162 workers: int,
163 keep_alive_timeout: int,
164 ):
165 FeastServeApplication(
166 store=store,
167 bind=f"{host}:{port}",
168 accesslog=None if no_access_log else "-",
169 workers=workers,
170 keepalive=keep_alive_timeout,
171 ).run()
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py
--- a/sdk/python/feast/feature_server.py
+++ b/sdk/python/feast/feature_server.py
@@ -1,9 +1,11 @@
import json
import traceback
import warnings
+from typing import List, Optional
import gunicorn.app.base
import pandas as pd
+from dateutil import parser
from fastapi import FastAPI, HTTPException, Request, Response, status
from fastapi.logger import logger
from fastapi.params import Depends
@@ -11,7 +13,7 @@
from pydantic import BaseModel
import feast
-from feast import proto_json
+from feast import proto_json, utils
from feast.data_source import PushMode
from feast.errors import PushSourceNotFoundException
from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest
@@ -31,6 +33,17 @@
to: str = "online"
+class MaterializeRequest(BaseModel):
+ start_ts: str
+ end_ts: str
+ feature_views: Optional[List[str]] = None
+
+
+class MaterializeIncrementalRequest(BaseModel):
+ end_ts: str
+ feature_views: Optional[List[str]] = None
+
+
def get_app(store: "feast.FeatureStore"):
proto_json.patch()
@@ -134,6 +147,34 @@
def health():
return Response(status_code=status.HTTP_200_OK)
+ @app.post("/materialize")
+ def materialize(body=Depends(get_body)):
+ try:
+ request = MaterializeRequest(**json.loads(body))
+ store.materialize(
+ utils.make_tzaware(parser.parse(request.start_ts)),
+ utils.make_tzaware(parser.parse(request.end_ts)),
+ request.feature_views,
+ )
+ except Exception as e:
+ # Print the original exception on the server side
+ logger.exception(traceback.format_exc())
+ # Raise HTTPException to return the error message to the client
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @app.post("/materialize-incremental")
+ def materialize_incremental(body=Depends(get_body)):
+ try:
+ request = MaterializeIncrementalRequest(**json.loads(body))
+ store.materialize_incremental(
+ utils.make_tzaware(parser.parse(request.end_ts)), request.feature_views
+ )
+ except Exception as e:
+ # Print the original exception on the server side
+ logger.exception(traceback.format_exc())
+ # Raise HTTPException to return the error message to the client
+ raise HTTPException(status_code=500, detail=str(e))
+
return app
|
{"golden_diff": "diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py\n--- a/sdk/python/feast/feature_server.py\n+++ b/sdk/python/feast/feature_server.py\n@@ -1,9 +1,11 @@\n import json\n import traceback\n import warnings\n+from typing import List, Optional\n \n import gunicorn.app.base\n import pandas as pd\n+from dateutil import parser\n from fastapi import FastAPI, HTTPException, Request, Response, status\n from fastapi.logger import logger\n from fastapi.params import Depends\n@@ -11,7 +13,7 @@\n from pydantic import BaseModel\n \n import feast\n-from feast import proto_json\n+from feast import proto_json, utils\n from feast.data_source import PushMode\n from feast.errors import PushSourceNotFoundException\n from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest\n@@ -31,6 +33,17 @@\n to: str = \"online\"\n \n \n+class MaterializeRequest(BaseModel):\n+ start_ts: str\n+ end_ts: str\n+ feature_views: Optional[List[str]] = None\n+\n+\n+class MaterializeIncrementalRequest(BaseModel):\n+ end_ts: str\n+ feature_views: Optional[List[str]] = None\n+\n+\n def get_app(store: \"feast.FeatureStore\"):\n proto_json.patch()\n \n@@ -134,6 +147,34 @@\n def health():\n return Response(status_code=status.HTTP_200_OK)\n \n+ @app.post(\"/materialize\")\n+ def materialize(body=Depends(get_body)):\n+ try:\n+ request = MaterializeRequest(**json.loads(body))\n+ store.materialize(\n+ utils.make_tzaware(parser.parse(request.start_ts)),\n+ utils.make_tzaware(parser.parse(request.end_ts)),\n+ request.feature_views,\n+ )\n+ except Exception as e:\n+ # Print the original exception on the server side\n+ logger.exception(traceback.format_exc())\n+ # Raise HTTPException to return the error message to the client\n+ raise HTTPException(status_code=500, detail=str(e))\n+\n+ @app.post(\"/materialize-incremental\")\n+ def materialize_incremental(body=Depends(get_body)):\n+ try:\n+ request = MaterializeIncrementalRequest(**json.loads(body))\n+ store.materialize_incremental(\n+ utils.make_tzaware(parser.parse(request.end_ts)), request.feature_views\n+ )\n+ except Exception as e:\n+ # Print the original exception on the server side\n+ logger.exception(traceback.format_exc())\n+ # Raise HTTPException to return the error message to the client\n+ raise HTTPException(status_code=500, detail=str(e))\n+\n return app\n", "issue": "Add materialize and materialize-incremental rest endpoints\n**Is your feature request related to a problem? Please describe.**\r\nIn multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.\r\n\r\n**Describe the solution you'd like**\r\nadding two endpoints to Python server\r\n\r\n**Additional context**\r\nAlso we can implement to go server too\r\n\nAdd materialize and materialize-incremental rest endpoints\n**Is your feature request related to a problem? Please describe.**\r\nIn multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.\r\n\r\n**Describe the solution you'd like**\r\nadding two endpoints to Python server\r\n\r\n**Additional context**\r\nAlso we can implement to go server too\r\n\n", "before_files": [{"content": "import json\nimport traceback\nimport warnings\n\nimport gunicorn.app.base\nimport pandas as pd\nfrom fastapi import FastAPI, HTTPException, Request, Response, status\nfrom fastapi.logger import logger\nfrom fastapi.params import Depends\nfrom google.protobuf.json_format import MessageToDict, Parse\nfrom pydantic import BaseModel\n\nimport feast\nfrom feast import proto_json\nfrom feast.data_source import PushMode\nfrom feast.errors import PushSourceNotFoundException\nfrom feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest\n\n\n# TODO: deprecate this in favor of push features\nclass WriteToFeatureStoreRequest(BaseModel):\n feature_view_name: str\n df: dict\n allow_registry_cache: bool = True\n\n\nclass PushFeaturesRequest(BaseModel):\n push_source_name: str\n df: dict\n allow_registry_cache: bool = True\n to: str = \"online\"\n\n\ndef get_app(store: \"feast.FeatureStore\"):\n proto_json.patch()\n\n app = FastAPI()\n\n async def get_body(request: Request):\n return await request.body()\n\n @app.post(\"/get-online-features\")\n def get_online_features(body=Depends(get_body)):\n try:\n # Validate and parse the request data into GetOnlineFeaturesRequest Protobuf object\n request_proto = GetOnlineFeaturesRequest()\n Parse(body, request_proto)\n\n # Initialize parameters for FeatureStore.get_online_features(...) call\n if request_proto.HasField(\"feature_service\"):\n features = store.get_feature_service(\n request_proto.feature_service, allow_cache=True\n )\n else:\n features = list(request_proto.features.val)\n\n full_feature_names = request_proto.full_feature_names\n\n batch_sizes = [len(v.val) for v in request_proto.entities.values()]\n num_entities = batch_sizes[0]\n if any(batch_size != num_entities for batch_size in batch_sizes):\n raise HTTPException(status_code=500, detail=\"Uneven number of columns\")\n\n response_proto = store._get_online_features(\n features=features,\n entity_values=request_proto.entities,\n full_feature_names=full_feature_names,\n native_entity_values=False,\n ).proto\n\n # Convert the Protobuf object to JSON and return it\n return MessageToDict( # type: ignore\n response_proto, preserving_proto_field_name=True, float_precision=18\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/push\")\n def push(body=Depends(get_body)):\n try:\n request = PushFeaturesRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n if request.to == \"offline\":\n to = PushMode.OFFLINE\n elif request.to == \"online\":\n to = PushMode.ONLINE\n elif request.to == \"online_and_offline\":\n to = PushMode.ONLINE_AND_OFFLINE\n else:\n raise ValueError(\n f\"{request.to} is not a supported push format. Please specify one of these ['online', 'offline', 'online_and_offline'].\"\n )\n store.push(\n push_source_name=request.push_source_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n to=to,\n )\n except PushSourceNotFoundException as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=422, detail=str(e))\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/write-to-online-store\")\n def write_to_online_store(body=Depends(get_body)):\n warnings.warn(\n \"write_to_online_store is deprecated. Please consider using /push instead\",\n RuntimeWarning,\n )\n try:\n request = WriteToFeatureStoreRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n store.write_to_online_store(\n feature_view_name=request.feature_view_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.get(\"/health\")\n def health():\n return Response(status_code=status.HTTP_200_OK)\n\n return app\n\n\nclass FeastServeApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, store: \"feast.FeatureStore\", **options):\n self._app = get_app(store=store)\n self._options = options\n super().__init__()\n\n def load_config(self):\n for key, value in self._options.items():\n if key.lower() in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n self.cfg.set(\"worker_class\", \"uvicorn.workers.UvicornWorker\")\n\n def load(self):\n return self._app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n no_access_log: bool,\n workers: int,\n keep_alive_timeout: int,\n):\n FeastServeApplication(\n store=store,\n bind=f\"{host}:{port}\",\n accesslog=None if no_access_log else \"-\",\n workers=workers,\n keepalive=keep_alive_timeout,\n ).run()\n", "path": "sdk/python/feast/feature_server.py"}], "after_files": [{"content": "import json\nimport traceback\nimport warnings\nfrom typing import List, Optional\n\nimport gunicorn.app.base\nimport pandas as pd\nfrom dateutil import parser\nfrom fastapi import FastAPI, HTTPException, Request, Response, status\nfrom fastapi.logger import logger\nfrom fastapi.params import Depends\nfrom google.protobuf.json_format import MessageToDict, Parse\nfrom pydantic import BaseModel\n\nimport feast\nfrom feast import proto_json, utils\nfrom feast.data_source import PushMode\nfrom feast.errors import PushSourceNotFoundException\nfrom feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest\n\n\n# TODO: deprecate this in favor of push features\nclass WriteToFeatureStoreRequest(BaseModel):\n feature_view_name: str\n df: dict\n allow_registry_cache: bool = True\n\n\nclass PushFeaturesRequest(BaseModel):\n push_source_name: str\n df: dict\n allow_registry_cache: bool = True\n to: str = \"online\"\n\n\nclass MaterializeRequest(BaseModel):\n start_ts: str\n end_ts: str\n feature_views: Optional[List[str]] = None\n\n\nclass MaterializeIncrementalRequest(BaseModel):\n end_ts: str\n feature_views: Optional[List[str]] = None\n\n\ndef get_app(store: \"feast.FeatureStore\"):\n proto_json.patch()\n\n app = FastAPI()\n\n async def get_body(request: Request):\n return await request.body()\n\n @app.post(\"/get-online-features\")\n def get_online_features(body=Depends(get_body)):\n try:\n # Validate and parse the request data into GetOnlineFeaturesRequest Protobuf object\n request_proto = GetOnlineFeaturesRequest()\n Parse(body, request_proto)\n\n # Initialize parameters for FeatureStore.get_online_features(...) call\n if request_proto.HasField(\"feature_service\"):\n features = store.get_feature_service(\n request_proto.feature_service, allow_cache=True\n )\n else:\n features = list(request_proto.features.val)\n\n full_feature_names = request_proto.full_feature_names\n\n batch_sizes = [len(v.val) for v in request_proto.entities.values()]\n num_entities = batch_sizes[0]\n if any(batch_size != num_entities for batch_size in batch_sizes):\n raise HTTPException(status_code=500, detail=\"Uneven number of columns\")\n\n response_proto = store._get_online_features(\n features=features,\n entity_values=request_proto.entities,\n full_feature_names=full_feature_names,\n native_entity_values=False,\n ).proto\n\n # Convert the Protobuf object to JSON and return it\n return MessageToDict( # type: ignore\n response_proto, preserving_proto_field_name=True, float_precision=18\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/push\")\n def push(body=Depends(get_body)):\n try:\n request = PushFeaturesRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n if request.to == \"offline\":\n to = PushMode.OFFLINE\n elif request.to == \"online\":\n to = PushMode.ONLINE\n elif request.to == \"online_and_offline\":\n to = PushMode.ONLINE_AND_OFFLINE\n else:\n raise ValueError(\n f\"{request.to} is not a supported push format. Please specify one of these ['online', 'offline', 'online_and_offline'].\"\n )\n store.push(\n push_source_name=request.push_source_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n to=to,\n )\n except PushSourceNotFoundException as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=422, detail=str(e))\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/write-to-online-store\")\n def write_to_online_store(body=Depends(get_body)):\n warnings.warn(\n \"write_to_online_store is deprecated. Please consider using /push instead\",\n RuntimeWarning,\n )\n try:\n request = WriteToFeatureStoreRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n store.write_to_online_store(\n feature_view_name=request.feature_view_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.get(\"/health\")\n def health():\n return Response(status_code=status.HTTP_200_OK)\n\n @app.post(\"/materialize\")\n def materialize(body=Depends(get_body)):\n try:\n request = MaterializeRequest(**json.loads(body))\n store.materialize(\n utils.make_tzaware(parser.parse(request.start_ts)),\n utils.make_tzaware(parser.parse(request.end_ts)),\n request.feature_views,\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/materialize-incremental\")\n def materialize_incremental(body=Depends(get_body)):\n try:\n request = MaterializeIncrementalRequest(**json.loads(body))\n store.materialize_incremental(\n utils.make_tzaware(parser.parse(request.end_ts)), request.feature_views\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n return app\n\n\nclass FeastServeApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, store: \"feast.FeatureStore\", **options):\n self._app = get_app(store=store)\n self._options = options\n super().__init__()\n\n def load_config(self):\n for key, value in self._options.items():\n if key.lower() in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n self.cfg.set(\"worker_class\", \"uvicorn.workers.UvicornWorker\")\n\n def load(self):\n return self._app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n no_access_log: bool,\n workers: int,\n keep_alive_timeout: int,\n):\n FeastServeApplication(\n store=store,\n bind=f\"{host}:{port}\",\n accesslog=None if no_access_log else \"-\",\n workers=workers,\n keepalive=keep_alive_timeout,\n ).run()\n", "path": "sdk/python/feast/feature_server.py"}]}
| 2,116 | 609 |
gh_patches_debug_18488
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-1054
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi
# 🚀 Feature Request
When calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.
This argument can not be passed when using the submitit-plugin. Example error:
```
Key 'additional_parameters' not in 'SlurmQueueConf'
full_key: hydra.launcher.additional_parameters
reference_type=Optional[SlurmQueueConf]
object_type=SlurmQueueConf
```
We should add support for passing this `additional_parameters` dict via the plugin aswell.
## Motivation
Same as above.
## Pitch
Add support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)
**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes
## Additional context
Since this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass
3 from typing import Optional
4
5 from hydra.core.config_store import ConfigStore
6
7
8 @dataclass
9 class BaseQueueConf:
10 """Configuration shared by all executors"""
11
12 submitit_folder: str = "${hydra.sweep.dir}/.submitit/%j"
13
14 # maximum time for the job in minutes
15 timeout_min: int = 60
16 # number of cpus to use for each task
17 cpus_per_task: int = 1
18 # number of gpus to use on each node
19 gpus_per_node: int = 0
20 # number of tasks to spawn on each node
21 tasks_per_node: int = 1
22 # memory to reserve for the job on each node (in GB)
23 mem_gb: int = 4
24 # number of nodes to use for the job
25 nodes: int = 1
26 # name of the job
27 name: str = "${hydra.job.name}"
28
29
30 @dataclass
31 class SlurmQueueConf(BaseQueueConf):
32 """Slurm configuration overrides and specific parameters"""
33
34 _target_: str = (
35 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher"
36 )
37
38 # Params are used to configure sbatch, for more info check:
39 # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py
40
41 # Following parameters are slurm specific
42 # More information: https://slurm.schedmd.com/sbatch.html
43 #
44 # slurm partition to use on the cluster
45 partition: Optional[str] = None
46 comment: Optional[str] = None
47 constraint: Optional[str] = None
48 exclude: Optional[str] = None
49
50 # Following parameters are submitit specifics
51 #
52 # USR1 signal delay before timeout
53 signal_delay_s: int = 120
54 # Maximum number of retries on job timeout.
55 # Change this only after you confirmed your code can handle re-submission
56 # by properly resuming from the latest stored checkpoint.
57 # check the following for more info on slurm_max_num_timeout
58 # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
59 max_num_timeout: int = 0
60
61
62 @dataclass
63 class LocalQueueConf(BaseQueueConf):
64 _target_: str = (
65 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher"
66 )
67
68
69 # finally, register two different choices:
70 ConfigStore.instance().store(
71 group="hydra/launcher",
72 name="submitit_local",
73 node=LocalQueueConf(),
74 provider="submitit_launcher",
75 )
76
77
78 ConfigStore.instance().store(
79 group="hydra/launcher",
80 name="submitit_slurm",
81 node=SlurmQueueConf(),
82 provider="submitit_launcher",
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
@@ -1,6 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-from dataclasses import dataclass
-from typing import Optional
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
from hydra.core.config_store import ConfigStore
@@ -57,6 +57,9 @@
# check the following for more info on slurm_max_num_timeout
# https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
max_num_timeout: int = 0
+ # Useful to add parameters which are not currently available in the plugin.
+ # Eg: {"mail-user": "blublu@fb.com", "mail-type": "BEGIN"}
+ additional_parameters: Dict[str, Any] = field(default_factory=dict)
@dataclass
|
{"golden_diff": "diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n@@ -1,6 +1,6 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n-from dataclasses import dataclass\n-from typing import Optional\n+from dataclasses import dataclass, field\n+from typing import Any, Dict, Optional\n \n from hydra.core.config_store import ConfigStore\n \n@@ -57,6 +57,9 @@\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n+ # Useful to add parameters which are not currently available in the plugin.\n+ # Eg: {\"mail-user\": \"blublu@fb.com\", \"mail-type\": \"BEGIN\"}\n+ additional_parameters: Dict[str, Any] = field(default_factory=dict)\n \n \n @dataclass\n", "issue": "[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi\n# \ud83d\ude80 Feature Request\r\n\r\nWhen calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.\r\n\r\nThis argument can not be passed when using the submitit-plugin. Example error:\r\n\r\n```\r\nKey 'additional_parameters' not in 'SlurmQueueConf'\r\n full_key: hydra.launcher.additional_parameters\r\n reference_type=Optional[SlurmQueueConf]\r\n object_type=SlurmQueueConf\r\n```\r\n\r\nWe should add support for passing this `additional_parameters` dict via the plugin aswell.\r\n\r\n## Motivation\r\n\r\nSame as above.\r\n\r\n## Pitch\r\n\r\nAdd support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)\r\n\r\n**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes\r\n\r\n## Additional context\r\n\r\nSince this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: int = 1\n # number of gpus to use on each node\n gpus_per_node: int = 0\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: int = 4\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: int = 1\n # number of gpus to use on each node\n gpus_per_node: int = 0\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: int = 4\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n # Useful to add parameters which are not currently available in the plugin.\n # Eg: {\"mail-user\": \"blublu@fb.com\", \"mail-type\": \"BEGIN\"}\n additional_parameters: Dict[str, Any] = field(default_factory=dict)\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}]}
| 1,497 | 276 |
gh_patches_debug_15154
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-5926
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Two models one code name
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: custom code
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: 20.04
- **MLflow installed from (source or binary)**: source
- **MLflow version (run ``mlflow --version``)**: 1.23.1
- **Python version**: 3.9
- **yarn version, if running the dev UI**: N/A
### Describe the problem
When two mlflow models have the same name for the code packaged with them, for example: a preprocessing code name `prep_features` was packaged with two different mlflow models. If the first mlflow model is loaded, then the second one is also loaded, python will only load the `prep_features` module once. This leads to problems if `prep_features` for the second model is different from the first model.
### Tracking information
_No response_
### Code to reproduce issue
prep_features. py change 1 to 2 when creating custom_model2 with the 2m1cn.py script
```python
def preprocess_features():
return 1 # 2 for custom_model2
```
2m1cn.py use this file to create custom_model1 and custom_model2
```python
import mlflow.pyfunc
class CM(mlflow.pyfunc.PythonModel):
def __init__(self):
pass
def predict(self, context, model_input):
from prep_features import preprocess_features
return preprocess_features()
# Construct and save the model
model_path = "custom_model1" # "custom_model2"
code_path = ["prep_features.py"]
custom_model = CM()
mlflow.pyfunc.save_model(path=model_path, python_model=custom_model, code_path=code_path)
```
test.py now use this file to test your created models
```python
# Evaluate the models
import mlflow.pyfunc
import pandas as pd
model_input = pd.DataFrame([range(10)])
loaded_model1 = mlflow.pyfunc.load_model("custom_model1")
print(f"Model1: {loaded_model1.predict(model_input)}")
loaded_model2 = mlflow.pyfunc.load_model("custom_model2")
print(f"Model2: {loaded_model2.predict(model_input)}")
```
The output should be 1 for custom_model1 and 2 for custom_model 2. But because of the bug the output is 1 for both.
### Other info / logs
_No response_
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [X] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [ ] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/utils/model_utils.py`
Content:
```
1 import os
2 import sys
3
4 from mlflow.exceptions import MlflowException
5 from mlflow.models import Model
6 from mlflow.models.model import MLMODEL_FILE_NAME
7 from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, RESOURCE_ALREADY_EXISTS
8 from mlflow.tracking.artifact_utils import _download_artifact_from_uri
9 from mlflow.utils.uri import append_to_uri_path
10 from mlflow.utils.file_utils import _copy_file_or_tree
11
12 FLAVOR_CONFIG_CODE = "code"
13
14
15 def _get_flavor_configuration(model_path, flavor_name):
16 """
17 Obtains the configuration for the specified flavor from the specified
18 MLflow model path. If the model does not contain the specified flavor,
19 an exception will be thrown.
20
21 :param model_path: The path to the root directory of the MLflow model for which to load
22 the specified flavor configuration.
23 :param flavor_name: The name of the flavor configuration to load.
24 :return: The flavor configuration as a dictionary.
25 """
26 model_configuration_path = os.path.join(model_path, MLMODEL_FILE_NAME)
27 if not os.path.exists(model_configuration_path):
28 raise MlflowException(
29 'Could not find an "{model_file}" configuration file at "{model_path}"'.format(
30 model_file=MLMODEL_FILE_NAME, model_path=model_path
31 ),
32 RESOURCE_DOES_NOT_EXIST,
33 )
34
35 model_conf = Model.load(model_configuration_path)
36 if flavor_name not in model_conf.flavors:
37 raise MlflowException(
38 'Model does not have the "{flavor_name}" flavor'.format(flavor_name=flavor_name),
39 RESOURCE_DOES_NOT_EXIST,
40 )
41 conf = model_conf.flavors[flavor_name]
42 return conf
43
44
45 def _get_flavor_configuration_from_uri(model_uri, flavor_name):
46 """
47 Obtains the configuration for the specified flavor from the specified
48 MLflow model uri. If the model does not contain the specified flavor,
49 an exception will be thrown.
50
51 :param model_uri: The path to the root directory of the MLflow model for which to load
52 the specified flavor configuration.
53 :param flavor_name: The name of the flavor configuration to load.
54 :return: The flavor configuration as a dictionary.
55 """
56 try:
57 ml_model_file = _download_artifact_from_uri(
58 artifact_uri=append_to_uri_path(model_uri, MLMODEL_FILE_NAME)
59 )
60 except Exception as ex:
61 raise MlflowException(
62 'Failed to download an "{model_file}" model file from "{model_uri}": {ex}'.format(
63 model_file=MLMODEL_FILE_NAME, model_uri=model_uri, ex=ex
64 ),
65 RESOURCE_DOES_NOT_EXIST,
66 )
67 model_conf = Model.load(ml_model_file)
68 if flavor_name not in model_conf.flavors:
69 raise MlflowException(
70 'Model does not have the "{flavor_name}" flavor'.format(flavor_name=flavor_name),
71 RESOURCE_DOES_NOT_EXIST,
72 )
73 return model_conf.flavors[flavor_name]
74
75
76 def _get_code_dirs(src_code_path, dst_code_path=None):
77 """
78 Obtains the names of the subdirectories contained under the specified source code
79 path and joins them with the specified destination code path.
80 :param src_code_path: The path of the source code directory for which to list subdirectories.
81 :param dst_code_path: The destination directory path to which subdirectory names should be
82 joined.
83 """
84 if not dst_code_path:
85 dst_code_path = src_code_path
86 return [
87 (os.path.join(dst_code_path, x))
88 for x in os.listdir(src_code_path)
89 if os.path.isdir(os.path.join(src_code_path, x)) and not x == "__pycache__"
90 ]
91
92
93 def _validate_code_paths(code_paths):
94 if code_paths is not None:
95 if not isinstance(code_paths, list):
96 raise TypeError("Argument code_paths should be a list, not {}".format(type(code_paths)))
97
98
99 def _validate_and_copy_code_paths(code_paths, path, default_subpath="code"):
100 """
101 Validates that a code path is a valid list and copies the code paths to a directory. This
102 can later be used to log custom code as an artifact.
103
104 :param code_paths: A list of files or directories containing code that should be logged
105 as artifacts
106 :param path: The local model path.
107 :param default_subpath: The default directory name used to store code artifacts.
108 """
109 _validate_code_paths(code_paths)
110 if code_paths is not None:
111 code_dir_subpath = default_subpath
112 for code_path in code_paths:
113 _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)
114 else:
115 code_dir_subpath = None
116 return code_dir_subpath
117
118
119 def _add_code_to_system_path(code_path):
120 sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
121
122
123 def _validate_and_prepare_target_save_path(path):
124 if os.path.exists(path) and any(os.scandir(path)):
125 raise MlflowException(
126 message="Path '{}' already exists and is not empty".format(path),
127 error_code=RESOURCE_ALREADY_EXISTS,
128 )
129
130 os.makedirs(path, exist_ok=True)
131
132
133 def _add_code_from_conf_to_system_path(local_path, conf, code_key=FLAVOR_CONFIG_CODE):
134 """
135 Checks if any code_paths were logged with the model in the flavor conf and prepends
136 the directory to the system path.
137
138 :param local_path: The local path containing model artifacts.
139 :param conf: The flavor-specific conf that should contain the FLAVOR_CONFIG_CODE
140 key, which specifies the directory containing custom code logged as artifacts.
141 :param code_key: The key used by the flavor to indicate custom code artifacts.
142 By default this is FLAVOR_CONFIG_CODE.
143 """
144 if code_key in conf and conf[code_key]:
145 code_path = os.path.join(local_path, conf[code_key])
146 _add_code_to_system_path(code_path)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/utils/model_utils.py b/mlflow/utils/model_utils.py
--- a/mlflow/utils/model_utils.py
+++ b/mlflow/utils/model_utils.py
@@ -1,5 +1,6 @@
import os
import sys
+from pathlib import Path
from mlflow.exceptions import MlflowException
from mlflow.models import Model
@@ -118,6 +119,13 @@
def _add_code_to_system_path(code_path):
sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
+ # Delete cached modules so they will get reloaded anew from the correct code path
+ # Otherwise python will use the cached modules
+ modules = [
+ p.stem for p in Path(code_path).rglob("*.py") if p.is_file() and p.name != "__init__.py"
+ ]
+ for module in modules:
+ sys.modules.pop(module, None)
def _validate_and_prepare_target_save_path(path):
|
{"golden_diff": "diff --git a/mlflow/utils/model_utils.py b/mlflow/utils/model_utils.py\n--- a/mlflow/utils/model_utils.py\n+++ b/mlflow/utils/model_utils.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+from pathlib import Path\n \n from mlflow.exceptions import MlflowException\n from mlflow.models import Model\n@@ -118,6 +119,13 @@\n \n def _add_code_to_system_path(code_path):\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n+ # Delete cached modules so they will get reloaded anew from the correct code path\n+ # Otherwise python will use the cached modules\n+ modules = [\n+ p.stem for p in Path(code_path).rglob(\"*.py\") if p.is_file() and p.name != \"__init__.py\"\n+ ]\n+ for module in modules:\n+ sys.modules.pop(module, None)\n \n \n def _validate_and_prepare_target_save_path(path):\n", "issue": "Two models one code name \n### Willingness to contribute\r\n\r\nYes. I can contribute a fix for this bug independently.\r\n\r\n### System information\r\n\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: custom code\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: 20.04\r\n- **MLflow installed from (source or binary)**: source\r\n- **MLflow version (run ``mlflow --version``)**: 1.23.1\r\n- **Python version**: 3.9\r\n- **yarn version, if running the dev UI**: N/A\r\n\r\n\r\n### Describe the problem\r\n\r\nWhen two mlflow models have the same name for the code packaged with them, for example: a preprocessing code name `prep_features` was packaged with two different mlflow models. If the first mlflow model is loaded, then the second one is also loaded, python will only load the `prep_features` module once. This leads to problems if `prep_features` for the second model is different from the first model.\r\n\r\n\r\n\r\n### Tracking information\r\n\r\n_No response_\r\n\r\n### Code to reproduce issue\r\n\r\nprep_features. py change 1 to 2 when creating custom_model2 with the 2m1cn.py script\r\n```python\r\ndef preprocess_features():\r\n return 1 # 2 for custom_model2\r\n```\r\n2m1cn.py use this file to create custom_model1 and custom_model2 \r\n```python\r\nimport mlflow.pyfunc\r\n\r\nclass CM(mlflow.pyfunc.PythonModel):\r\n def __init__(self):\r\n pass\r\n def predict(self, context, model_input):\r\n from prep_features import preprocess_features\r\n return preprocess_features()\r\n\r\n# Construct and save the model\r\nmodel_path = \"custom_model1\" # \"custom_model2\"\r\ncode_path = [\"prep_features.py\"]\r\ncustom_model = CM()\r\nmlflow.pyfunc.save_model(path=model_path, python_model=custom_model, code_path=code_path)\r\n```\r\n\r\ntest.py now use this file to test your created models\r\n```python\r\n# Evaluate the models\r\nimport mlflow.pyfunc\r\nimport pandas as pd\r\nmodel_input = pd.DataFrame([range(10)])\r\nloaded_model1 = mlflow.pyfunc.load_model(\"custom_model1\")\r\nprint(f\"Model1: {loaded_model1.predict(model_input)}\")\r\nloaded_model2 = mlflow.pyfunc.load_model(\"custom_model2\")\r\nprint(f\"Model2: {loaded_model2.predict(model_input)}\")\r\n```\r\n\r\nThe output should be 1 for custom_model1 and 2 for custom_model 2. But because of the bug the output is 1 for both.\r\n\r\n### Other info / logs\r\n\r\n_No response_\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [X] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [ ] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import os\nimport sys\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, RESOURCE_ALREADY_EXISTS\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.uri import append_to_uri_path\nfrom mlflow.utils.file_utils import _copy_file_or_tree\n\nFLAVOR_CONFIG_CODE = \"code\"\n\n\ndef _get_flavor_configuration(model_path, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model path. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_path: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n model_configuration_path = os.path.join(model_path, MLMODEL_FILE_NAME)\n if not os.path.exists(model_configuration_path):\n raise MlflowException(\n 'Could not find an \"{model_file}\" configuration file at \"{model_path}\"'.format(\n model_file=MLMODEL_FILE_NAME, model_path=model_path\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n\n model_conf = Model.load(model_configuration_path)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n conf = model_conf.flavors[flavor_name]\n return conf\n\n\ndef _get_flavor_configuration_from_uri(model_uri, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model uri. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_uri: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n try:\n ml_model_file = _download_artifact_from_uri(\n artifact_uri=append_to_uri_path(model_uri, MLMODEL_FILE_NAME)\n )\n except Exception as ex:\n raise MlflowException(\n 'Failed to download an \"{model_file}\" model file from \"{model_uri}\": {ex}'.format(\n model_file=MLMODEL_FILE_NAME, model_uri=model_uri, ex=ex\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n model_conf = Model.load(ml_model_file)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n return model_conf.flavors[flavor_name]\n\n\ndef _get_code_dirs(src_code_path, dst_code_path=None):\n \"\"\"\n Obtains the names of the subdirectories contained under the specified source code\n path and joins them with the specified destination code path.\n :param src_code_path: The path of the source code directory for which to list subdirectories.\n :param dst_code_path: The destination directory path to which subdirectory names should be\n joined.\n \"\"\"\n if not dst_code_path:\n dst_code_path = src_code_path\n return [\n (os.path.join(dst_code_path, x))\n for x in os.listdir(src_code_path)\n if os.path.isdir(os.path.join(src_code_path, x)) and not x == \"__pycache__\"\n ]\n\n\ndef _validate_code_paths(code_paths):\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n\n\ndef _validate_and_copy_code_paths(code_paths, path, default_subpath=\"code\"):\n \"\"\"\n Validates that a code path is a valid list and copies the code paths to a directory. This\n can later be used to log custom code as an artifact.\n\n :param code_paths: A list of files or directories containing code that should be logged\n as artifacts\n :param path: The local model path.\n :param default_subpath: The default directory name used to store code artifacts.\n \"\"\"\n _validate_code_paths(code_paths)\n if code_paths is not None:\n code_dir_subpath = default_subpath\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n return code_dir_subpath\n\n\ndef _add_code_to_system_path(code_path):\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n\n\ndef _validate_and_prepare_target_save_path(path):\n if os.path.exists(path) and any(os.scandir(path)):\n raise MlflowException(\n message=\"Path '{}' already exists and is not empty\".format(path),\n error_code=RESOURCE_ALREADY_EXISTS,\n )\n\n os.makedirs(path, exist_ok=True)\n\n\ndef _add_code_from_conf_to_system_path(local_path, conf, code_key=FLAVOR_CONFIG_CODE):\n \"\"\"\n Checks if any code_paths were logged with the model in the flavor conf and prepends\n the directory to the system path.\n\n :param local_path: The local path containing model artifacts.\n :param conf: The flavor-specific conf that should contain the FLAVOR_CONFIG_CODE\n key, which specifies the directory containing custom code logged as artifacts.\n :param code_key: The key used by the flavor to indicate custom code artifacts.\n By default this is FLAVOR_CONFIG_CODE.\n \"\"\"\n if code_key in conf and conf[code_key]:\n code_path = os.path.join(local_path, conf[code_key])\n _add_code_to_system_path(code_path)\n", "path": "mlflow/utils/model_utils.py"}], "after_files": [{"content": "import os\nimport sys\nfrom pathlib import Path\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, RESOURCE_ALREADY_EXISTS\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.uri import append_to_uri_path\nfrom mlflow.utils.file_utils import _copy_file_or_tree\n\nFLAVOR_CONFIG_CODE = \"code\"\n\n\ndef _get_flavor_configuration(model_path, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model path. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_path: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n model_configuration_path = os.path.join(model_path, MLMODEL_FILE_NAME)\n if not os.path.exists(model_configuration_path):\n raise MlflowException(\n 'Could not find an \"{model_file}\" configuration file at \"{model_path}\"'.format(\n model_file=MLMODEL_FILE_NAME, model_path=model_path\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n\n model_conf = Model.load(model_configuration_path)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n conf = model_conf.flavors[flavor_name]\n return conf\n\n\ndef _get_flavor_configuration_from_uri(model_uri, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model uri. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_uri: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n try:\n ml_model_file = _download_artifact_from_uri(\n artifact_uri=append_to_uri_path(model_uri, MLMODEL_FILE_NAME)\n )\n except Exception as ex:\n raise MlflowException(\n 'Failed to download an \"{model_file}\" model file from \"{model_uri}\": {ex}'.format(\n model_file=MLMODEL_FILE_NAME, model_uri=model_uri, ex=ex\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n model_conf = Model.load(ml_model_file)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n return model_conf.flavors[flavor_name]\n\n\ndef _get_code_dirs(src_code_path, dst_code_path=None):\n \"\"\"\n Obtains the names of the subdirectories contained under the specified source code\n path and joins them with the specified destination code path.\n :param src_code_path: The path of the source code directory for which to list subdirectories.\n :param dst_code_path: The destination directory path to which subdirectory names should be\n joined.\n \"\"\"\n if not dst_code_path:\n dst_code_path = src_code_path\n return [\n (os.path.join(dst_code_path, x))\n for x in os.listdir(src_code_path)\n if os.path.isdir(os.path.join(src_code_path, x)) and not x == \"__pycache__\"\n ]\n\n\ndef _validate_code_paths(code_paths):\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n\n\ndef _validate_and_copy_code_paths(code_paths, path, default_subpath=\"code\"):\n \"\"\"\n Validates that a code path is a valid list and copies the code paths to a directory. This\n can later be used to log custom code as an artifact.\n\n :param code_paths: A list of files or directories containing code that should be logged\n as artifacts\n :param path: The local model path.\n :param default_subpath: The default directory name used to store code artifacts.\n \"\"\"\n _validate_code_paths(code_paths)\n if code_paths is not None:\n code_dir_subpath = default_subpath\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n return code_dir_subpath\n\n\ndef _add_code_to_system_path(code_path):\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n # Delete cached modules so they will get reloaded anew from the correct code path\n # Otherwise python will use the cached modules\n modules = [\n p.stem for p in Path(code_path).rglob(\"*.py\") if p.is_file() and p.name != \"__init__.py\"\n ]\n for module in modules:\n sys.modules.pop(module, None)\n\n\ndef _validate_and_prepare_target_save_path(path):\n if os.path.exists(path) and any(os.scandir(path)):\n raise MlflowException(\n message=\"Path '{}' already exists and is not empty\".format(path),\n error_code=RESOURCE_ALREADY_EXISTS,\n )\n\n os.makedirs(path, exist_ok=True)\n\n\ndef _add_code_from_conf_to_system_path(local_path, conf, code_key=FLAVOR_CONFIG_CODE):\n \"\"\"\n Checks if any code_paths were logged with the model in the flavor conf and prepends\n the directory to the system path.\n\n :param local_path: The local path containing model artifacts.\n :param conf: The flavor-specific conf that should contain the FLAVOR_CONFIG_CODE\n key, which specifies the directory containing custom code logged as artifacts.\n :param code_key: The key used by the flavor to indicate custom code artifacts.\n By default this is FLAVOR_CONFIG_CODE.\n \"\"\"\n if code_key in conf and conf[code_key]:\n code_path = os.path.join(local_path, conf[code_key])\n _add_code_to_system_path(code_path)\n", "path": "mlflow/utils/model_utils.py"}]}
| 2,833 | 217 |
gh_patches_debug_3636
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-68
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add /check endpoint
This endpoint should be used to check if the server is alive and ready to receive requests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/config/urls.py`
Content:
```
1 from django.contrib import admin
2 from django.urls import include, path
3
4 urlpatterns = [
5 path("api/v1/", include("safe_apps.urls", namespace="v1")),
6 path("admin/", admin.site.urls),
7 ]
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/config/urls.py b/src/config/urls.py
--- a/src/config/urls.py
+++ b/src/config/urls.py
@@ -1,7 +1,9 @@
from django.contrib import admin
+from django.http import HttpResponse
from django.urls import include, path
urlpatterns = [
path("api/v1/", include("safe_apps.urls", namespace="v1")),
path("admin/", admin.site.urls),
+ path("check/", lambda request: HttpResponse("Ok"), name="check"),
]
|
{"golden_diff": "diff --git a/src/config/urls.py b/src/config/urls.py\n--- a/src/config/urls.py\n+++ b/src/config/urls.py\n@@ -1,7 +1,9 @@\n from django.contrib import admin\n+from django.http import HttpResponse\n from django.urls import include, path\n \n urlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n+ path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n ]\n", "issue": "Add /check endpoint\nThis endpoint should be used to check if the server is alive and ready to receive requests\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n]\n", "path": "src/config/urls.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n]\n", "path": "src/config/urls.py"}]}
| 336 | 111 |
gh_patches_debug_24160
|
rasdani/github-patches
|
git_diff
|
voxel51__fiftyone-712
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOCUMENTATION] Add notebook usage options to tutorials/recipes
Analogous to the notebooks in the TF docs, eg https://www.tensorflow.org/guide/data, we should add buttons to the top of the notebooks in our docs (currently, recipes and tutorials) that allow users to
- Run in Colab
- View source on GitHub
- Download notebook
Per the [raw source](https://raw.githubusercontent.com/tensorflow/docs/master/site/en/guide/data.ipynb) of the [github instance](https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb) of the TF example above, it looks like they're achieving this by adding an HTML cell with content as follows:
```html
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 """
2 Sphinx configuration file.
3
4 For a full list of available options, see:
5 https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 | Copyright 2017-2020, Voxel51, Inc.
8 | `voxel51.com <https://voxel51.com/>`_
9 |
10 """
11 import os
12 import re
13 import sys
14
15 sys.path.insert(0, os.path.abspath("."))
16
17 from custom_directives import (
18 CustomButtonDirective,
19 CustomCalloutItemDirective,
20 CustomCardItemDirective,
21 )
22 from redirects import generate_redirects
23
24 import fiftyone.constants as foc
25
26
27 with open("../../setup.py") as f:
28 setup_version = re.search(r'VERSION = "(.+?)"', f.read()).group(1)
29
30 if setup_version != foc.VERSION:
31 raise RuntimeError(
32 "FiftyOne version in setup.py (%r) does not match installed version "
33 "(%r). If this is a dev install, reinstall with `pip install -e .` "
34 "and try again." % (setup_version, foc.VERSION)
35 )
36
37
38 # -- Path setup --------------------------------------------------------------
39
40 # If extensions (or modules to document with autodoc) are in another directory,
41 # add these directories to sys.path here. If the directory is relative to the
42 # documentation root, use os.path.abspath to make it absolute, like shown here.
43 #
44
45
46 # -- Project information -----------------------------------------------------
47
48 project = "FiftyOne"
49 copyright = foc.COPYRIGHT
50 author = foc.AUTHOR
51 release = foc.VERSION
52
53
54 # -- General configuration ---------------------------------------------------
55
56 # Add any Sphinx extension module names here, as strings. They can be
57 # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
58 # ones.
59 extensions = [
60 "sphinx.ext.autodoc",
61 "sphinx.ext.intersphinx",
62 "sphinx.ext.napoleon",
63 "nbsphinx",
64 "sphinx_tabs.tabs",
65 "sphinx_copybutton",
66 "autodocsumm",
67 ]
68
69 # Types of class members to generate documentation for.
70 autodoc_default_options = {
71 "members": True,
72 "inherited-members": True,
73 "member-order": "bysource",
74 "autosummary": True,
75 "autosummary-no-nesting": True,
76 }
77 autodoc_inherit_docstrings = True
78 autoclass_content = "class"
79
80 # Add any paths that contain templates here, relative to this directory.
81 templates_path = ["_templates"]
82
83 # The suffix(es) of source filenames.
84 # You can specify multiple suffix as a list of strings.
85 source_suffix = [".rst", ".md"]
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This pattern also affects html_static_path and html_extra_path.
90 exclude_patterns = ["_includes"]
91
92 # A string of reStructuredText that will be included at the beginning of every
93 # source file that is read
94 rst_prolog = """
95 .. include:: /_includes/substitutions.rst
96 """
97
98 # Disable nbshinx loading require.js - this breaks the pytorch theme's
99 # scrolling handling, and we don't appear to have any notebook content that
100 # requires it
101 nbsphinx_requirejs_path = ""
102
103 # Adds a link to download the notebook to the built HTML
104 nbsphinx_prolog = """
105
106 .. note::
107
108 Download notebook:
109 :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`
110
111 """
112
113 # Path to the redirects file, relative to `source/`
114 redirects_file = "redirects"
115
116 # -- Options for HTML output -------------------------------------------------
117
118 # The theme to use for HTML and HTML Help pages. See the documentation for
119 # a list of builtin themes.
120 #
121 html_theme = "pytorch_sphinx_theme"
122 html_theme_path = ["../theme"]
123 html_theme_options = {
124 "pytorch_project": "docs",
125 }
126
127 # Add any paths that contain custom static files (such as style sheets) here,
128 # relative to this directory. They are copied after the builtin static files,
129 # so a file named "default.css" will overwrite the builtin "default.css".
130 html_static_path = ["_static"]
131
132 # These paths are either relative to html_static_path
133 # or fully qualified paths (eg. https://...)
134 html_css_files = ["css/voxel51-website.css", "css/custom.css"]
135 html_js_files = ["js/voxel51-website.js", "js/custom.js"]
136
137 # Prevent RST source files from being included in output
138 html_copy_source = False
139
140 html_context = {
141 "address_main_line1": "410 N 4th Ave, 3rd Floor",
142 "address_main_line2": "Ann Arbor, MI 48104",
143 "phone_main": "+1 734-489-1134",
144 "email_info": "info@voxel51.com",
145 # Links - copied from website config
146 "link_blog": "https://blog.voxel51.com/",
147 "link_contactus": "mailto:solutions@voxel51.com?subject=[Voxel51]%20Contact%20us",
148 "link_docs_fiftyone": "https://voxel51.com/docs/fiftyone/",
149 "link_fiftyone": "https://voxel51.com/fiftyone/",
150 "link_fiftyone_tutorials": "https://voxel51.com/docs/fiftyone/tutorials/index.html",
151 "link_fiftyone_examples": "https://github.com/voxel51/fiftyone-examples",
152 "link_github": "https://github.com/",
153 "link_home": "https://voxel51.com/",
154 "link_linkedin": "https://www.linkedin.com/in/",
155 "link_ourstory": "https://voxel51.com/ourstory/",
156 "link_press": "https://voxel51.com/press/",
157 "link_privacypolicy": "https://voxel51.com/privacy/",
158 "link_schedulecall": "mailto:solutions@voxel51.com?subject=[Voxel51]%20Schedule%20a%20call",
159 "link_scheduledemo": "https://meetings.hubspot.com/michael908",
160 "link_termsofservice": "https://voxel51.com/terms/",
161 "link_twitter": "https://twitter.com/",
162 "link_usecase_advertising": "https://voxel51.com/usecases/advertising/",
163 "link_usecase_auto": "https://voxel51.com/usecases/automotive/",
164 "link_usecase_research": "https://voxel51.com/usecases/research/",
165 "link_usecases": "https://voxel51.com/usecases/",
166 "link_usecases_entry": "https://voxel51.com/usecases/automotive/",
167 "link_voxel51_facebook": "https://www.facebook.com/voxel51/",
168 "link_voxel51_github": "https://github.com/voxel51/",
169 "link_voxel51_linkedin": "https://www.linkedin.com/company/voxel51/",
170 "link_voxel51_slack": "https://join.slack.com/t/fiftyone-users/shared_invite/zt-gtpmm76o-9AjvzNPBOzevBySKzt02gg",
171 "link_voxel51_twitter": "https://twitter.com/voxel51",
172 "link_voxel51_medium": "https://medium.com/voxel51/",
173 # Banner - set to None or empty string to disable
174 # "banner_link": "https://us02web.zoom.us/meeting/register/tZ0kd-GppzsrHNeJEEYcCKsE_t_lI_7iB6Sy",
175 # "banner_message": "Register for <b>the biweekly FiftyOne webinar series</b> on Wednesdays at 1PM EST!",
176 "banner_link": None,
177 "banner_message": None,
178 }
179
180 # -- Custom app setup --------------------------------------------------------
181
182
183 def setup(app):
184 # Generate page redirects
185 app.add_config_value("redirects_file", "redirects", "env")
186 app.connect("builder-inited", generate_redirects)
187
188 # Custom directives
189 app.add_directive("custombutton", CustomButtonDirective)
190 app.add_directive("customcalloutitem", CustomCalloutItemDirective)
191 app.add_directive("customcarditem", CustomCardItemDirective)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -100,15 +100,35 @@
# requires it
nbsphinx_requirejs_path = ""
-# Adds a link to download the notebook to the built HTML
+# Adds helpful external links to the built HTML
+ref = os.environ.get("FO_DOCS_VERSION_REF", "develop")
nbsphinx_prolog = """
-.. note::
-
- Download notebook:
- :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`
-
-"""
+.. raw:: html
+
+ <table class="fo-notebook-links" align="left">
+ <td>
+ <a target="_blank" href="https://colab.research.google.com/github/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}">
+ <img src="../_static/images/logo/colab-logo-32px.png"/>Run in Google Colab
+ </a>
+ </td>
+ <td>
+ <a target="_blank" href="https://github.com/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}">
+ <img src="../_static/images/logo/github-logo-32px.png"/>View source on GitHub
+ </a>
+ </td>
+ <td>
+ <a target="_blank" href="https://gitcdn.link/repo/voxel51/fiftyone/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}" download>
+ <img src="../_static/images/logo/download-logo-32px.png"/>Download notebook
+ </a>
+ </td>
+ </table>
+
+""" % (
+ ref,
+ ref,
+ ref,
+)
# Path to the redirects file, relative to `source/`
redirects_file = "redirects"
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -100,15 +100,35 @@\n # requires it\n nbsphinx_requirejs_path = \"\"\n \n-# Adds a link to download the notebook to the built HTML\n+# Adds helpful external links to the built HTML\n+ref = os.environ.get(\"FO_DOCS_VERSION_REF\", \"develop\")\n nbsphinx_prolog = \"\"\"\n \n-.. note::\n-\n- Download notebook:\n- :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n-\n-\"\"\"\n+.. raw:: html\n+\n+ <table class=\"fo-notebook-links\" align=\"left\">\n+ <td>\n+ <a target=\"_blank\" href=\"https://colab.research.google.com/github/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n+ <img src=\"../_static/images/logo/colab-logo-32px.png\"/>Run in Google Colab\n+ </a>\n+ </td>\n+ <td>\n+ <a target=\"_blank\" href=\"https://github.com/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n+ <img src=\"../_static/images/logo/github-logo-32px.png\"/>View source on GitHub\n+ </a>\n+ </td>\n+ <td>\n+ <a target=\"_blank\" href=\"https://gitcdn.link/repo/voxel51/fiftyone/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\" download>\n+ <img src=\"../_static/images/logo/download-logo-32px.png\"/>Download notebook\n+ </a>\n+ </td>\n+ </table>\n+\n+\"\"\" % (\n+ ref,\n+ ref,\n+ ref,\n+)\n \n # Path to the redirects file, relative to `source/`\n redirects_file = \"redirects\"\n", "issue": "[DOCUMENTATION] Add notebook usage options to tutorials/recipes\nAnalogous to the notebooks in the TF docs, eg https://www.tensorflow.org/guide/data, we should add buttons to the top of the notebooks in our docs (currently, recipes and tutorials) that allow users to\r\n- Run in Colab\r\n- View source on GitHub\r\n- Download notebook\r\n\r\nPer the [raw source](https://raw.githubusercontent.com/tensorflow/docs/master/site/en/guide/data.ipynb) of the [github instance](https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb) of the TF example above, it looks like they're achieving this by adding an HTML cell with content as follows:\r\n\r\n```html\r\n<table class=\"tfo-notebook-buttons\" align=\"left\">\r\n <td>\r\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/data\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\r\n </td>\r\n <td>\r\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\r\n </td>\r\n <td>\r\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\r\n </td>\r\n <td>\r\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\r\n </td>\r\n</table>\r\n```\n", "before_files": [{"content": "\"\"\"\nSphinx configuration file.\n\nFor a full list of available options, see:\nhttps://www.sphinx-doc.org/en/master/usage/configuration.html\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\nfrom custom_directives import (\n CustomButtonDirective,\n CustomCalloutItemDirective,\n CustomCardItemDirective,\n)\nfrom redirects import generate_redirects\n\nimport fiftyone.constants as foc\n\n\nwith open(\"../../setup.py\") as f:\n setup_version = re.search(r'VERSION = \"(.+?)\"', f.read()).group(1)\n\nif setup_version != foc.VERSION:\n raise RuntimeError(\n \"FiftyOne version in setup.py (%r) does not match installed version \"\n \"(%r). If this is a dev install, reinstall with `pip install -e .` \"\n \"and try again.\" % (setup_version, foc.VERSION)\n )\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"FiftyOne\"\ncopyright = foc.COPYRIGHT\nauthor = foc.AUTHOR\nrelease = foc.VERSION\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"nbsphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n \"autodocsumm\",\n]\n\n# Types of class members to generate documentation for.\nautodoc_default_options = {\n \"members\": True,\n \"inherited-members\": True,\n \"member-order\": \"bysource\",\n \"autosummary\": True,\n \"autosummary-no-nesting\": True,\n}\nautodoc_inherit_docstrings = True\nautoclass_content = \"class\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of strings.\nsource_suffix = [\".rst\", \".md\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_includes\"]\n\n# A string of reStructuredText that will be included at the beginning of every\n# source file that is read\nrst_prolog = \"\"\"\n.. include:: /_includes/substitutions.rst\n\"\"\"\n\n# Disable nbshinx loading require.js - this breaks the pytorch theme's\n# scrolling handling, and we don't appear to have any notebook content that\n# requires it\nnbsphinx_requirejs_path = \"\"\n\n# Adds a link to download the notebook to the built HTML\nnbsphinx_prolog = \"\"\"\n\n.. note::\n\n Download notebook:\n :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n\n\"\"\"\n\n# Path to the redirects file, relative to `source/`\nredirects_file = \"redirects\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [\"../theme\"]\nhtml_theme_options = {\n \"pytorch_project\": \"docs\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\nhtml_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n\n# Prevent RST source files from being included in output\nhtml_copy_source = False\n\nhtml_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n \"phone_main\": \"+1 734-489-1134\",\n \"email_info\": \"info@voxel51.com\",\n # Links - copied from website config\n \"link_blog\": \"https://blog.voxel51.com/\",\n \"link_contactus\": \"mailto:solutions@voxel51.com?subject=[Voxel51]%20Contact%20us\",\n \"link_docs_fiftyone\": \"https://voxel51.com/docs/fiftyone/\",\n \"link_fiftyone\": \"https://voxel51.com/fiftyone/\",\n \"link_fiftyone_tutorials\": \"https://voxel51.com/docs/fiftyone/tutorials/index.html\",\n \"link_fiftyone_examples\": \"https://github.com/voxel51/fiftyone-examples\",\n \"link_github\": \"https://github.com/\",\n \"link_home\": \"https://voxel51.com/\",\n \"link_linkedin\": \"https://www.linkedin.com/in/\",\n \"link_ourstory\": \"https://voxel51.com/ourstory/\",\n \"link_press\": \"https://voxel51.com/press/\",\n \"link_privacypolicy\": \"https://voxel51.com/privacy/\",\n \"link_schedulecall\": \"mailto:solutions@voxel51.com?subject=[Voxel51]%20Schedule%20a%20call\",\n \"link_scheduledemo\": \"https://meetings.hubspot.com/michael908\",\n \"link_termsofservice\": \"https://voxel51.com/terms/\",\n \"link_twitter\": \"https://twitter.com/\",\n \"link_usecase_advertising\": \"https://voxel51.com/usecases/advertising/\",\n \"link_usecase_auto\": \"https://voxel51.com/usecases/automotive/\",\n \"link_usecase_research\": \"https://voxel51.com/usecases/research/\",\n \"link_usecases\": \"https://voxel51.com/usecases/\",\n \"link_usecases_entry\": \"https://voxel51.com/usecases/automotive/\",\n \"link_voxel51_facebook\": \"https://www.facebook.com/voxel51/\",\n \"link_voxel51_github\": \"https://github.com/voxel51/\",\n \"link_voxel51_linkedin\": \"https://www.linkedin.com/company/voxel51/\",\n \"link_voxel51_slack\": \"https://join.slack.com/t/fiftyone-users/shared_invite/zt-gtpmm76o-9AjvzNPBOzevBySKzt02gg\",\n \"link_voxel51_twitter\": \"https://twitter.com/voxel51\",\n \"link_voxel51_medium\": \"https://medium.com/voxel51/\",\n # Banner - set to None or empty string to disable\n # \"banner_link\": \"https://us02web.zoom.us/meeting/register/tZ0kd-GppzsrHNeJEEYcCKsE_t_lI_7iB6Sy\",\n # \"banner_message\": \"Register for <b>the biweekly FiftyOne webinar series</b> on Wednesdays at 1PM EST!\",\n \"banner_link\": None,\n \"banner_message\": None,\n}\n\n# -- Custom app setup --------------------------------------------------------\n\n\ndef setup(app):\n # Generate page redirects\n app.add_config_value(\"redirects_file\", \"redirects\", \"env\")\n app.connect(\"builder-inited\", generate_redirects)\n\n # Custom directives\n app.add_directive(\"custombutton\", CustomButtonDirective)\n app.add_directive(\"customcalloutitem\", CustomCalloutItemDirective)\n app.add_directive(\"customcarditem\", CustomCardItemDirective)\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "\"\"\"\nSphinx configuration file.\n\nFor a full list of available options, see:\nhttps://www.sphinx-doc.org/en/master/usage/configuration.html\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\nfrom custom_directives import (\n CustomButtonDirective,\n CustomCalloutItemDirective,\n CustomCardItemDirective,\n)\nfrom redirects import generate_redirects\n\nimport fiftyone.constants as foc\n\n\nwith open(\"../../setup.py\") as f:\n setup_version = re.search(r'VERSION = \"(.+?)\"', f.read()).group(1)\n\nif setup_version != foc.VERSION:\n raise RuntimeError(\n \"FiftyOne version in setup.py (%r) does not match installed version \"\n \"(%r). If this is a dev install, reinstall with `pip install -e .` \"\n \"and try again.\" % (setup_version, foc.VERSION)\n )\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"FiftyOne\"\ncopyright = foc.COPYRIGHT\nauthor = foc.AUTHOR\nrelease = foc.VERSION\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"nbsphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n \"autodocsumm\",\n]\n\n# Types of class members to generate documentation for.\nautodoc_default_options = {\n \"members\": True,\n \"inherited-members\": True,\n \"member-order\": \"bysource\",\n \"autosummary\": True,\n \"autosummary-no-nesting\": True,\n}\nautodoc_inherit_docstrings = True\nautoclass_content = \"class\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of strings.\nsource_suffix = [\".rst\", \".md\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_includes\"]\n\n# A string of reStructuredText that will be included at the beginning of every\n# source file that is read\nrst_prolog = \"\"\"\n.. include:: /_includes/substitutions.rst\n\"\"\"\n\n# Disable nbshinx loading require.js - this breaks the pytorch theme's\n# scrolling handling, and we don't appear to have any notebook content that\n# requires it\nnbsphinx_requirejs_path = \"\"\n\n# Adds helpful external links to the built HTML\nref = os.environ.get(\"FO_DOCS_VERSION_REF\", \"develop\")\nnbsphinx_prolog = \"\"\"\n\n.. raw:: html\n\n <table class=\"fo-notebook-links\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n <img src=\"../_static/images/logo/colab-logo-32px.png\"/>Run in Google Colab\n </a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n <img src=\"../_static/images/logo/github-logo-32px.png\"/>View source on GitHub\n </a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://gitcdn.link/repo/voxel51/fiftyone/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\" download>\n <img src=\"../_static/images/logo/download-logo-32px.png\"/>Download notebook\n </a>\n </td>\n </table>\n\n\"\"\" % (\n ref,\n ref,\n ref,\n)\n\n# Path to the redirects file, relative to `source/`\nredirects_file = \"redirects\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [\"../theme\"]\nhtml_theme_options = {\n \"pytorch_project\": \"docs\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\nhtml_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n\n# Prevent RST source files from being included in output\nhtml_copy_source = False\n\nhtml_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n \"phone_main\": \"+1 734-489-1134\",\n \"email_info\": \"info@voxel51.com\",\n # Links - copied from website config\n \"link_blog\": \"https://blog.voxel51.com/\",\n \"link_contactus\": \"mailto:solutions@voxel51.com?subject=[Voxel51]%20Contact%20us\",\n \"link_docs_fiftyone\": \"https://voxel51.com/docs/fiftyone/\",\n \"link_fiftyone\": \"https://voxel51.com/fiftyone/\",\n \"link_fiftyone_tutorials\": \"https://voxel51.com/docs/fiftyone/tutorials/index.html\",\n \"link_fiftyone_examples\": \"https://github.com/voxel51/fiftyone-examples\",\n \"link_github\": \"https://github.com/\",\n \"link_home\": \"https://voxel51.com/\",\n \"link_linkedin\": \"https://www.linkedin.com/in/\",\n \"link_ourstory\": \"https://voxel51.com/ourstory/\",\n \"link_press\": \"https://voxel51.com/press/\",\n \"link_privacypolicy\": \"https://voxel51.com/privacy/\",\n \"link_schedulecall\": \"mailto:solutions@voxel51.com?subject=[Voxel51]%20Schedule%20a%20call\",\n \"link_scheduledemo\": \"https://meetings.hubspot.com/michael908\",\n \"link_termsofservice\": \"https://voxel51.com/terms/\",\n \"link_twitter\": \"https://twitter.com/\",\n \"link_usecase_advertising\": \"https://voxel51.com/usecases/advertising/\",\n \"link_usecase_auto\": \"https://voxel51.com/usecases/automotive/\",\n \"link_usecase_research\": \"https://voxel51.com/usecases/research/\",\n \"link_usecases\": \"https://voxel51.com/usecases/\",\n \"link_usecases_entry\": \"https://voxel51.com/usecases/automotive/\",\n \"link_voxel51_facebook\": \"https://www.facebook.com/voxel51/\",\n \"link_voxel51_github\": \"https://github.com/voxel51/\",\n \"link_voxel51_linkedin\": \"https://www.linkedin.com/company/voxel51/\",\n \"link_voxel51_slack\": \"https://join.slack.com/t/fiftyone-users/shared_invite/zt-gtpmm76o-9AjvzNPBOzevBySKzt02gg\",\n \"link_voxel51_twitter\": \"https://twitter.com/voxel51\",\n \"link_voxel51_medium\": \"https://medium.com/voxel51/\",\n # Banner - set to None or empty string to disable\n # \"banner_link\": \"https://us02web.zoom.us/meeting/register/tZ0kd-GppzsrHNeJEEYcCKsE_t_lI_7iB6Sy\",\n # \"banner_message\": \"Register for <b>the biweekly FiftyOne webinar series</b> on Wednesdays at 1PM EST!\",\n \"banner_link\": None,\n \"banner_message\": None,\n}\n\n# -- Custom app setup --------------------------------------------------------\n\n\ndef setup(app):\n # Generate page redirects\n app.add_config_value(\"redirects_file\", \"redirects\", \"env\")\n app.connect(\"builder-inited\", generate_redirects)\n\n # Custom directives\n app.add_directive(\"custombutton\", CustomButtonDirective)\n app.add_directive(\"customcalloutitem\", CustomCalloutItemDirective)\n app.add_directive(\"customcarditem\", CustomCardItemDirective)\n", "path": "docs/source/conf.py"}]}
| 3,004 | 462 |
gh_patches_debug_43055
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-10279
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up previously generated CSV files
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
## Observed behavior
When a new CSV file is generated for session and summary logs, the previously generated files that are stored in the user's database are no longer accessible for download in the UI. A record of the new CSV file request is saved to the logger model `GenerateCSVLogRequest` which contains the facility id, log type (session or summary), the selected start date, the selected end date, and the date that the request was made. There should only ever be one record for each facility and log type combination. The information in this modal is used to scan the database folder `log_exports` and retrieve the previously generated file for download.
Note: For session and summary log files, the file naming convention has been updated to include a date-range selection, with the dates formatted as `YYYY-MM-DD`.
Previous naming convention:
- `{facility_name}_{last 4 digits of facility ID}_{log export type}.csv`
Current naming convention:
- `{facility_name}_{last 4 digits of facility ID}_{log export type}_from_{start date}_to_{end date}.csv`
## Expected behavior
We should remove any files that do not have a record in the database. One way to achieve this is by scanning the folder `log_export`, which contains all exported log files, using the predictable file naming convention, and cross-referencing them with the file records stored in the database. Any session or summary files that do not have a corresponding record in the database should be deleted.
With a 1:1 correlation with the `GenerateCSVLogRequest` record, the database should only contain one exported file for each session and summary log type. A new async task should be created for performing this cleanup, and can be queued whenever a new log export is generated. If the task has a predictable id, it can be ensured that there is only one cleanup task going at once.
More details on the task system can be found [here](https://kolibri-dev.readthedocs.io/en/develop/backend_architecture/tasks.html).
## User-facing consequences
Ensures the database is free from redundant files and optimizes the user's database storage.
## Acceptance Criteria
- [ ] All previously generated session and summary log file exports that lack a corresponding record in the database have been deleted.
- [ ] The previously generated CSV file should be automatically removed from the database upon generating a new one.
- [ ] New python code should be covered by unit tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/logger/tasks.py`
Content:
```
1 import os
2
3 from django.core.management import call_command
4 from rest_framework import serializers
5
6 from kolibri.core.auth.models import Facility
7 from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
8 from kolibri.core.tasks.decorators import register_task
9 from kolibri.core.tasks.permissions import IsAdminForJob
10 from kolibri.core.tasks.validation import JobValidator
11 from kolibri.utils import conf
12
13
14 def get_filepath(log_type, facility_id, start_date, end_date):
15 facility = Facility.objects.get(id=facility_id)
16 logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
17 if not os.path.isdir(logs_dir):
18 os.mkdir(logs_dir)
19 filepath = os.path.join(
20 logs_dir,
21 CSV_EXPORT_FILENAMES[log_type].format(
22 facility.name, facility.id[:4], start_date[:10], end_date[:10]
23 ),
24 )
25 return filepath
26
27
28 class ExportLogCSVValidator(JobValidator):
29 facility = serializers.PrimaryKeyRelatedField(
30 queryset=Facility.objects.all(), required=False
31 )
32 start_date = serializers.CharField()
33 end_date = serializers.CharField()
34 locale = serializers.CharField(required=False)
35
36 def validate(self, data):
37 facility = data.get("facility", None)
38 start_date = data.get("start_date", None)
39 end_date = data.get("end_date", None)
40 locale = data.get("locale", None)
41
42 if facility is None and "user" in self.context:
43 facility = self.context["user"].facility
44 elif facility is None:
45 raise serializers.ValidationError(
46 "Facility must be specified when no user is available."
47 )
48 if not start_date or not end_date:
49 raise serializers.ValidationError(
50 "Start {} and End {} date values are required.".format(
51 start_date, end_date
52 )
53 )
54 kwargs = {
55 "facility": facility.id,
56 "start_date": start_date,
57 "end_date": end_date,
58 "locale": locale,
59 }
60 return {
61 "facility_id": facility.id,
62 "kwargs": kwargs,
63 "args": [facility.id],
64 }
65
66
67 def _exportlogcsv(log_type, facility_id, start_date, end_date, locale):
68 filepath = get_filepath(log_type, facility_id, start_date, end_date)
69 call_command(
70 "exportlogs",
71 log_type=log_type,
72 output_file=filepath,
73 facility=facility_id,
74 overwrite=True,
75 start_date=start_date,
76 end_date=end_date,
77 locale=locale,
78 )
79
80
81 @register_task(
82 validator=ExportLogCSVValidator,
83 track_progress=True,
84 permission_classes=[IsAdminForJob],
85 )
86 def exportsessionlogcsv(facility_id, **kwargs):
87 """
88 Dumps in csv format the content session logs.
89
90 :param: facility.
91 """
92 _exportlogcsv(
93 "session",
94 facility_id,
95 kwargs.get("start_date"),
96 kwargs.get("end_date"),
97 kwargs.get("locale"),
98 )
99
100
101 @register_task(
102 validator=ExportLogCSVValidator,
103 track_progress=True,
104 permission_classes=[IsAdminForJob],
105 )
106 def exportsummarylogcsv(facility_id, **kwargs):
107 """
108 Dumps in csv format the content summary logs.
109
110 :param: facility.
111 """
112 _exportlogcsv(
113 "summary",
114 facility_id,
115 kwargs.get("start_date"),
116 kwargs.get("end_date"),
117 kwargs.get("locale"),
118 )
119
```
Path: `kolibri/core/logger/management/commands/exportlogs.py`
Content:
```
1 import logging
2 import ntpath
3 import os
4
5 from dateutil import parser
6 from django.conf import settings
7 from django.core.management.base import CommandError
8 from django.utils import translation
9
10 from kolibri.core.auth.constants.commands_errors import FILE_WRITE_ERROR
11 from kolibri.core.auth.constants.commands_errors import INVALID
12 from kolibri.core.auth.constants.commands_errors import MESSAGES
13 from kolibri.core.auth.constants.commands_errors import NO_FACILITY
14 from kolibri.core.auth.models import Facility
15 from kolibri.core.logger.csv_export import classes_info
16 from kolibri.core.logger.csv_export import csv_file_generator
17 from kolibri.core.logger.models import GenerateCSVLogRequest
18 from kolibri.core.tasks.management.commands.base import AsyncCommand
19 from kolibri.core.tasks.utils import get_current_job
20 from kolibri.utils.time_utils import local_now
21
22 logger = logging.getLogger(__name__)
23
24
25 class Command(AsyncCommand):
26 def add_arguments(self, parser):
27 parser.add_argument(
28 "-O",
29 "--output-file",
30 action="store",
31 dest="output_file",
32 default=None,
33 type=str,
34 help="The generated file will be saved with this name",
35 )
36 parser.add_argument(
37 "-l",
38 "--log-type",
39 action="store",
40 dest="log_type",
41 default="session",
42 choices=classes_info.keys(),
43 help='Log type to be exported. Valid values are "session" and "summary".',
44 )
45 parser.add_argument(
46 "-w",
47 "--overwrite",
48 action="store_true",
49 dest="overwrite",
50 default=False,
51 help="Allows overwritten of the exported file in case it exists",
52 )
53 parser.add_argument(
54 "--facility",
55 action="store",
56 type=str,
57 help="Facility id to import the users into",
58 )
59 parser.add_argument(
60 "--locale",
61 action="store",
62 type=str,
63 default=None,
64 help="Code of the language for the messages to be translated",
65 )
66 parser.add_argument(
67 "--start_date",
68 action="store",
69 dest="start_date",
70 type=str,
71 help="Start date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS",
72 )
73 parser.add_argument(
74 "--end_date",
75 action="store",
76 dest="end_date",
77 type=str,
78 help="End date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS",
79 )
80
81 def get_facility(self, options):
82 if options["facility"]:
83 default_facility = Facility.objects.get(pk=options["facility"])
84 else:
85 default_facility = Facility.get_default_facility()
86
87 return default_facility
88
89 def validate_date(self, date_str):
90 try:
91 return bool(parser.parse(date_str))
92 except ValueError:
93 return False
94
95 def handle_async(self, *args, **options):
96
97 # set language for the translation of the messages
98 locale = settings.LANGUAGE_CODE if not options["locale"] else options["locale"]
99 translation.activate(locale)
100 self.overall_error = ""
101 job = get_current_job()
102
103 start_date = options["start_date"]
104 end_date = options["end_date"]
105
106 facility = self.get_facility(options)
107 if not facility:
108 self.overall_error = str(MESSAGES[NO_FACILITY])
109
110 elif not self.validate_date(start_date):
111 self.overall_error = str(MESSAGES[INVALID]).format("start_date")
112
113 elif not self.validate_date(end_date):
114 self.overall_error = str(MESSAGES[INVALID]).format("end_date")
115
116 else:
117 log_type = options["log_type"]
118
119 log_info = classes_info[log_type]
120
121 if options["output_file"] is None:
122 filename = log_info["filename"].format(
123 facility.name, facility.id[:4], start_date[:10], end_date[:10]
124 )
125 else:
126 filename = options["output_file"]
127
128 filepath = os.path.join(os.getcwd(), filename)
129
130 queryset = log_info["queryset"]
131
132 total_rows = queryset.count()
133
134 with self.start_progress(total=total_rows) as progress_update:
135 try:
136 for row in csv_file_generator(
137 facility,
138 log_type,
139 filepath,
140 start_date=start_date,
141 end_date=end_date,
142 overwrite=options["overwrite"],
143 ):
144 progress_update(1)
145 except (ValueError, IOError) as e:
146 self.overall_error = str(MESSAGES[FILE_WRITE_ERROR].format(e))
147
148 if job:
149 job.extra_metadata["overall_error"] = self.overall_error
150 self.job.extra_metadata["filename"] = ntpath.basename(filepath)
151 job.save_meta()
152 else:
153 if self.overall_error:
154 raise CommandError(self.overall_error)
155 else:
156 logger.info(
157 "Created csv file {} with {} lines".format(filepath, total_rows)
158 )
159
160 translation.deactivate()
161
162 # create or update record of log request
163 GenerateCSVLogRequest.objects.update_or_create(
164 log_type=log_type,
165 facility=facility,
166 defaults={
167 "selected_start_date": parser.parse(start_date),
168 "selected_end_date": parser.parse(end_date),
169 "date_requested": local_now(),
170 },
171 )
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/core/logger/management/commands/exportlogs.py b/kolibri/core/logger/management/commands/exportlogs.py
--- a/kolibri/core/logger/management/commands/exportlogs.py
+++ b/kolibri/core/logger/management/commands/exportlogs.py
@@ -15,6 +15,7 @@
from kolibri.core.logger.csv_export import classes_info
from kolibri.core.logger.csv_export import csv_file_generator
from kolibri.core.logger.models import GenerateCSVLogRequest
+from kolibri.core.logger.tasks import log_exports_cleanup
from kolibri.core.tasks.management.commands.base import AsyncCommand
from kolibri.core.tasks.utils import get_current_job
from kolibri.utils.time_utils import local_now
@@ -169,3 +170,4 @@
"date_requested": local_now(),
},
)
+ log_exports_cleanup.enqueue()
diff --git a/kolibri/core/logger/tasks.py b/kolibri/core/logger/tasks.py
--- a/kolibri/core/logger/tasks.py
+++ b/kolibri/core/logger/tasks.py
@@ -3,13 +3,19 @@
from django.core.management import call_command
from rest_framework import serializers
+from kolibri.core.auth.management.commands.bulkexportusers import (
+ CSV_EXPORT_FILENAMES as USER_CSV_EXPORT_FILENAMES,
+)
from kolibri.core.auth.models import Facility
from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
+from kolibri.core.logger.models import GenerateCSVLogRequest
from kolibri.core.tasks.decorators import register_task
from kolibri.core.tasks.permissions import IsAdminForJob
from kolibri.core.tasks.validation import JobValidator
from kolibri.utils import conf
+LOGS_CLEANUP_JOB_ID = "18"
+
def get_filepath(log_type, facility_id, start_date, end_date):
facility = Facility.objects.get(id=facility_id)
@@ -25,6 +31,53 @@
return filepath
+def get_valid_logs_csv_filenames():
+ """
+ Returns a set of valid filenames that should exist
+ based on the objects stored in GenerateCSVLogRequest.
+ Any other files except these filenames should be removed.
+ """
+ valid_filenames_set = set()
+ log_requests = GenerateCSVLogRequest.objects.all()
+ for log_request in log_requests:
+ full_path = get_filepath(
+ log_request.log_type,
+ log_request.facility_id,
+ log_request.selected_start_date.strftime("%Y-%m-%d"),
+ log_request.selected_end_date.strftime("%Y-%m-%d"),
+ )
+ valid_filenames_set.add(os.path.basename(full_path))
+ return valid_filenames_set
+
+
+def get_valid_users_csv_filenames():
+ """
+ Returns a set of valid filenames that should exist
+ based on the objects stored in Facility.
+ """
+ valid_filenames_set = set()
+ facilities = Facility.objects.values("id", "name").all()
+ for facility in facilities:
+ file_name = USER_CSV_EXPORT_FILENAMES["user"].format(
+ facility["name"], facility["id"][:4]
+ )
+ valid_filenames_set.add(file_name)
+ return valid_filenames_set
+
+
+def get_valid_filenames():
+ """
+ Returns a union set of valid filenames
+ for log exports and users csv exports.
+ These filenames are valid and will not be
+ cleaned from log_exports_cleanup.
+ """
+ valid_logs_filenames = get_valid_logs_csv_filenames()
+ valid_users_filenames = get_valid_users_csv_filenames()
+ valid_filenames_set = valid_logs_filenames.union(valid_users_filenames)
+ return valid_filenames_set
+
+
class ExportLogCSVValidator(JobValidator):
facility = serializers.PrimaryKeyRelatedField(
queryset=Facility.objects.all(), required=False
@@ -116,3 +169,18 @@
kwargs.get("end_date"),
kwargs.get("locale"),
)
+
+
+@register_task(job_id=LOGS_CLEANUP_JOB_ID)
+def log_exports_cleanup():
+ """
+ Cleanup log_exports csv files that does not have
+ related reocord in GenerateCSVLogRequest model
+ """
+ logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
+ if not os.path.isdir(logs_dir):
+ return
+ valid_filenames_set = get_valid_filenames()
+ for filename in os.listdir(logs_dir):
+ if filename not in valid_filenames_set:
+ os.remove(os.path.join(logs_dir, filename))
|
{"golden_diff": "diff --git a/kolibri/core/logger/management/commands/exportlogs.py b/kolibri/core/logger/management/commands/exportlogs.py\n--- a/kolibri/core/logger/management/commands/exportlogs.py\n+++ b/kolibri/core/logger/management/commands/exportlogs.py\n@@ -15,6 +15,7 @@\n from kolibri.core.logger.csv_export import classes_info\n from kolibri.core.logger.csv_export import csv_file_generator\n from kolibri.core.logger.models import GenerateCSVLogRequest\n+from kolibri.core.logger.tasks import log_exports_cleanup\n from kolibri.core.tasks.management.commands.base import AsyncCommand\n from kolibri.core.tasks.utils import get_current_job\n from kolibri.utils.time_utils import local_now\n@@ -169,3 +170,4 @@\n \"date_requested\": local_now(),\n },\n )\n+ log_exports_cleanup.enqueue()\ndiff --git a/kolibri/core/logger/tasks.py b/kolibri/core/logger/tasks.py\n--- a/kolibri/core/logger/tasks.py\n+++ b/kolibri/core/logger/tasks.py\n@@ -3,13 +3,19 @@\n from django.core.management import call_command\n from rest_framework import serializers\n \n+from kolibri.core.auth.management.commands.bulkexportusers import (\n+ CSV_EXPORT_FILENAMES as USER_CSV_EXPORT_FILENAMES,\n+)\n from kolibri.core.auth.models import Facility\n from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES\n+from kolibri.core.logger.models import GenerateCSVLogRequest\n from kolibri.core.tasks.decorators import register_task\n from kolibri.core.tasks.permissions import IsAdminForJob\n from kolibri.core.tasks.validation import JobValidator\n from kolibri.utils import conf\n \n+LOGS_CLEANUP_JOB_ID = \"18\"\n+\n \n def get_filepath(log_type, facility_id, start_date, end_date):\n facility = Facility.objects.get(id=facility_id)\n@@ -25,6 +31,53 @@\n return filepath\n \n \n+def get_valid_logs_csv_filenames():\n+ \"\"\"\n+ Returns a set of valid filenames that should exist\n+ based on the objects stored in GenerateCSVLogRequest.\n+ Any other files except these filenames should be removed.\n+ \"\"\"\n+ valid_filenames_set = set()\n+ log_requests = GenerateCSVLogRequest.objects.all()\n+ for log_request in log_requests:\n+ full_path = get_filepath(\n+ log_request.log_type,\n+ log_request.facility_id,\n+ log_request.selected_start_date.strftime(\"%Y-%m-%d\"),\n+ log_request.selected_end_date.strftime(\"%Y-%m-%d\"),\n+ )\n+ valid_filenames_set.add(os.path.basename(full_path))\n+ return valid_filenames_set\n+\n+\n+def get_valid_users_csv_filenames():\n+ \"\"\"\n+ Returns a set of valid filenames that should exist\n+ based on the objects stored in Facility.\n+ \"\"\"\n+ valid_filenames_set = set()\n+ facilities = Facility.objects.values(\"id\", \"name\").all()\n+ for facility in facilities:\n+ file_name = USER_CSV_EXPORT_FILENAMES[\"user\"].format(\n+ facility[\"name\"], facility[\"id\"][:4]\n+ )\n+ valid_filenames_set.add(file_name)\n+ return valid_filenames_set\n+\n+\n+def get_valid_filenames():\n+ \"\"\"\n+ Returns a union set of valid filenames\n+ for log exports and users csv exports.\n+ These filenames are valid and will not be\n+ cleaned from log_exports_cleanup.\n+ \"\"\"\n+ valid_logs_filenames = get_valid_logs_csv_filenames()\n+ valid_users_filenames = get_valid_users_csv_filenames()\n+ valid_filenames_set = valid_logs_filenames.union(valid_users_filenames)\n+ return valid_filenames_set\n+\n+\n class ExportLogCSVValidator(JobValidator):\n facility = serializers.PrimaryKeyRelatedField(\n queryset=Facility.objects.all(), required=False\n@@ -116,3 +169,18 @@\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n+\n+\n+@register_task(job_id=LOGS_CLEANUP_JOB_ID)\n+def log_exports_cleanup():\n+ \"\"\"\n+ Cleanup log_exports csv files that does not have\n+ related reocord in GenerateCSVLogRequest model\n+ \"\"\"\n+ logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n+ if not os.path.isdir(logs_dir):\n+ return\n+ valid_filenames_set = get_valid_filenames()\n+ for filename in os.listdir(logs_dir):\n+ if filename not in valid_filenames_set:\n+ os.remove(os.path.join(logs_dir, filename))\n", "issue": "Clean up previously generated CSV files\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n## Observed behavior\r\nWhen a new CSV file is generated for session and summary logs, the previously generated files that are stored in the user's database are no longer accessible for download in the UI. A record of the new CSV file request is saved to the logger model `GenerateCSVLogRequest` which contains the facility id, log type (session or summary), the selected start date, the selected end date, and the date that the request was made. There should only ever be one record for each facility and log type combination. The information in this modal is used to scan the database folder `log_exports` and retrieve the previously generated file for download.\r\n\r\nNote: For session and summary log files, the file naming convention has been updated to include a date-range selection, with the dates formatted as `YYYY-MM-DD`.\r\nPrevious naming convention:\r\n- `{facility_name}_{last 4 digits of facility ID}_{log export type}.csv`\r\n\r\nCurrent naming convention:\r\n- `{facility_name}_{last 4 digits of facility ID}_{log export type}_from_{start date}_to_{end date}.csv`\r\n\r\n## Expected behavior\r\nWe should remove any files that do not have a record in the database. One way to achieve this is by scanning the folder `log_export`, which contains all exported log files, using the predictable file naming convention, and cross-referencing them with the file records stored in the database. Any session or summary files that do not have a corresponding record in the database should be deleted.\r\n\r\nWith a 1:1 correlation with the `GenerateCSVLogRequest` record, the database should only contain one exported file for each session and summary log type. A new async task should be created for performing this cleanup, and can be queued whenever a new log export is generated. If the task has a predictable id, it can be ensured that there is only one cleanup task going at once.\r\n\r\nMore details on the task system can be found [here](https://kolibri-dev.readthedocs.io/en/develop/backend_architecture/tasks.html).\r\n\r\n## User-facing consequences\r\nEnsures the database is free from redundant files and optimizes the user's database storage.\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] All previously generated session and summary log file exports that lack a corresponding record in the database have been deleted.\r\n- [ ] The previously generated CSV file should be automatically removed from the database upon generating a new one.\r\n- [ ] New python code should be covered by unit tests.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.core.management import call_command\nfrom rest_framework import serializers\n\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES\nfrom kolibri.core.tasks.decorators import register_task\nfrom kolibri.core.tasks.permissions import IsAdminForJob\nfrom kolibri.core.tasks.validation import JobValidator\nfrom kolibri.utils import conf\n\n\ndef get_filepath(log_type, facility_id, start_date, end_date):\n facility = Facility.objects.get(id=facility_id)\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n if not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n filepath = os.path.join(\n logs_dir,\n CSV_EXPORT_FILENAMES[log_type].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n ),\n )\n return filepath\n\n\nclass ExportLogCSVValidator(JobValidator):\n facility = serializers.PrimaryKeyRelatedField(\n queryset=Facility.objects.all(), required=False\n )\n start_date = serializers.CharField()\n end_date = serializers.CharField()\n locale = serializers.CharField(required=False)\n\n def validate(self, data):\n facility = data.get(\"facility\", None)\n start_date = data.get(\"start_date\", None)\n end_date = data.get(\"end_date\", None)\n locale = data.get(\"locale\", None)\n\n if facility is None and \"user\" in self.context:\n facility = self.context[\"user\"].facility\n elif facility is None:\n raise serializers.ValidationError(\n \"Facility must be specified when no user is available.\"\n )\n if not start_date or not end_date:\n raise serializers.ValidationError(\n \"Start {} and End {} date values are required.\".format(\n start_date, end_date\n )\n )\n kwargs = {\n \"facility\": facility.id,\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"locale\": locale,\n }\n return {\n \"facility_id\": facility.id,\n \"kwargs\": kwargs,\n \"args\": [facility.id],\n }\n\n\ndef _exportlogcsv(log_type, facility_id, start_date, end_date, locale):\n filepath = get_filepath(log_type, facility_id, start_date, end_date)\n call_command(\n \"exportlogs\",\n log_type=log_type,\n output_file=filepath,\n facility=facility_id,\n overwrite=True,\n start_date=start_date,\n end_date=end_date,\n locale=locale,\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsessionlogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content session logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"session\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsummarylogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content summary logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"summary\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n", "path": "kolibri/core/logger/tasks.py"}, {"content": "import logging\nimport ntpath\nimport os\n\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom django.utils import translation\n\nfrom kolibri.core.auth.constants.commands_errors import FILE_WRITE_ERROR\nfrom kolibri.core.auth.constants.commands_errors import INVALID\nfrom kolibri.core.auth.constants.commands_errors import MESSAGES\nfrom kolibri.core.auth.constants.commands_errors import NO_FACILITY\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import classes_info\nfrom kolibri.core.logger.csv_export import csv_file_generator\nfrom kolibri.core.logger.models import GenerateCSVLogRequest\nfrom kolibri.core.tasks.management.commands.base import AsyncCommand\nfrom kolibri.core.tasks.utils import get_current_job\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"-O\",\n \"--output-file\",\n action=\"store\",\n dest=\"output_file\",\n default=None,\n type=str,\n help=\"The generated file will be saved with this name\",\n )\n parser.add_argument(\n \"-l\",\n \"--log-type\",\n action=\"store\",\n dest=\"log_type\",\n default=\"session\",\n choices=classes_info.keys(),\n help='Log type to be exported. Valid values are \"session\" and \"summary\".',\n )\n parser.add_argument(\n \"-w\",\n \"--overwrite\",\n action=\"store_true\",\n dest=\"overwrite\",\n default=False,\n help=\"Allows overwritten of the exported file in case it exists\",\n )\n parser.add_argument(\n \"--facility\",\n action=\"store\",\n type=str,\n help=\"Facility id to import the users into\",\n )\n parser.add_argument(\n \"--locale\",\n action=\"store\",\n type=str,\n default=None,\n help=\"Code of the language for the messages to be translated\",\n )\n parser.add_argument(\n \"--start_date\",\n action=\"store\",\n dest=\"start_date\",\n type=str,\n help=\"Start date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n parser.add_argument(\n \"--end_date\",\n action=\"store\",\n dest=\"end_date\",\n type=str,\n help=\"End date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n\n def get_facility(self, options):\n if options[\"facility\"]:\n default_facility = Facility.objects.get(pk=options[\"facility\"])\n else:\n default_facility = Facility.get_default_facility()\n\n return default_facility\n\n def validate_date(self, date_str):\n try:\n return bool(parser.parse(date_str))\n except ValueError:\n return False\n\n def handle_async(self, *args, **options):\n\n # set language for the translation of the messages\n locale = settings.LANGUAGE_CODE if not options[\"locale\"] else options[\"locale\"]\n translation.activate(locale)\n self.overall_error = \"\"\n job = get_current_job()\n\n start_date = options[\"start_date\"]\n end_date = options[\"end_date\"]\n\n facility = self.get_facility(options)\n if not facility:\n self.overall_error = str(MESSAGES[NO_FACILITY])\n\n elif not self.validate_date(start_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"start_date\")\n\n elif not self.validate_date(end_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"end_date\")\n\n else:\n log_type = options[\"log_type\"]\n\n log_info = classes_info[log_type]\n\n if options[\"output_file\"] is None:\n filename = log_info[\"filename\"].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n )\n else:\n filename = options[\"output_file\"]\n\n filepath = os.path.join(os.getcwd(), filename)\n\n queryset = log_info[\"queryset\"]\n\n total_rows = queryset.count()\n\n with self.start_progress(total=total_rows) as progress_update:\n try:\n for row in csv_file_generator(\n facility,\n log_type,\n filepath,\n start_date=start_date,\n end_date=end_date,\n overwrite=options[\"overwrite\"],\n ):\n progress_update(1)\n except (ValueError, IOError) as e:\n self.overall_error = str(MESSAGES[FILE_WRITE_ERROR].format(e))\n\n if job:\n job.extra_metadata[\"overall_error\"] = self.overall_error\n self.job.extra_metadata[\"filename\"] = ntpath.basename(filepath)\n job.save_meta()\n else:\n if self.overall_error:\n raise CommandError(self.overall_error)\n else:\n logger.info(\n \"Created csv file {} with {} lines\".format(filepath, total_rows)\n )\n\n translation.deactivate()\n\n # create or update record of log request\n GenerateCSVLogRequest.objects.update_or_create(\n log_type=log_type,\n facility=facility,\n defaults={\n \"selected_start_date\": parser.parse(start_date),\n \"selected_end_date\": parser.parse(end_date),\n \"date_requested\": local_now(),\n },\n )\n", "path": "kolibri/core/logger/management/commands/exportlogs.py"}], "after_files": [{"content": "import os\n\nfrom django.core.management import call_command\nfrom rest_framework import serializers\n\nfrom kolibri.core.auth.management.commands.bulkexportusers import (\n CSV_EXPORT_FILENAMES as USER_CSV_EXPORT_FILENAMES,\n)\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES\nfrom kolibri.core.logger.models import GenerateCSVLogRequest\nfrom kolibri.core.tasks.decorators import register_task\nfrom kolibri.core.tasks.permissions import IsAdminForJob\nfrom kolibri.core.tasks.validation import JobValidator\nfrom kolibri.utils import conf\n\nLOGS_CLEANUP_JOB_ID = \"18\"\n\n\ndef get_filepath(log_type, facility_id, start_date, end_date):\n facility = Facility.objects.get(id=facility_id)\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n if not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n filepath = os.path.join(\n logs_dir,\n CSV_EXPORT_FILENAMES[log_type].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n ),\n )\n return filepath\n\n\ndef get_valid_logs_csv_filenames():\n \"\"\"\n Returns a set of valid filenames that should exist\n based on the objects stored in GenerateCSVLogRequest.\n Any other files except these filenames should be removed.\n \"\"\"\n valid_filenames_set = set()\n log_requests = GenerateCSVLogRequest.objects.all()\n for log_request in log_requests:\n full_path = get_filepath(\n log_request.log_type,\n log_request.facility_id,\n log_request.selected_start_date.strftime(\"%Y-%m-%d\"),\n log_request.selected_end_date.strftime(\"%Y-%m-%d\"),\n )\n valid_filenames_set.add(os.path.basename(full_path))\n return valid_filenames_set\n\n\ndef get_valid_users_csv_filenames():\n \"\"\"\n Returns a set of valid filenames that should exist\n based on the objects stored in Facility.\n \"\"\"\n valid_filenames_set = set()\n facilities = Facility.objects.values(\"id\", \"name\").all()\n for facility in facilities:\n file_name = USER_CSV_EXPORT_FILENAMES[\"user\"].format(\n facility[\"name\"], facility[\"id\"][:4]\n )\n valid_filenames_set.add(file_name)\n return valid_filenames_set\n\n\ndef get_valid_filenames():\n \"\"\"\n Returns a union set of valid filenames\n for log exports and users csv exports.\n These filenames are valid and will not be\n cleaned from log_exports_cleanup.\n \"\"\"\n valid_logs_filenames = get_valid_logs_csv_filenames()\n valid_users_filenames = get_valid_users_csv_filenames()\n valid_filenames_set = valid_logs_filenames.union(valid_users_filenames)\n return valid_filenames_set\n\n\nclass ExportLogCSVValidator(JobValidator):\n facility = serializers.PrimaryKeyRelatedField(\n queryset=Facility.objects.all(), required=False\n )\n start_date = serializers.CharField()\n end_date = serializers.CharField()\n locale = serializers.CharField(required=False)\n\n def validate(self, data):\n facility = data.get(\"facility\", None)\n start_date = data.get(\"start_date\", None)\n end_date = data.get(\"end_date\", None)\n locale = data.get(\"locale\", None)\n\n if facility is None and \"user\" in self.context:\n facility = self.context[\"user\"].facility\n elif facility is None:\n raise serializers.ValidationError(\n \"Facility must be specified when no user is available.\"\n )\n if not start_date or not end_date:\n raise serializers.ValidationError(\n \"Start {} and End {} date values are required.\".format(\n start_date, end_date\n )\n )\n kwargs = {\n \"facility\": facility.id,\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"locale\": locale,\n }\n return {\n \"facility_id\": facility.id,\n \"kwargs\": kwargs,\n \"args\": [facility.id],\n }\n\n\ndef _exportlogcsv(log_type, facility_id, start_date, end_date, locale):\n filepath = get_filepath(log_type, facility_id, start_date, end_date)\n call_command(\n \"exportlogs\",\n log_type=log_type,\n output_file=filepath,\n facility=facility_id,\n overwrite=True,\n start_date=start_date,\n end_date=end_date,\n locale=locale,\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsessionlogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content session logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"session\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsummarylogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content summary logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"summary\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n\n\n@register_task(job_id=LOGS_CLEANUP_JOB_ID)\ndef log_exports_cleanup():\n \"\"\"\n Cleanup log_exports csv files that does not have\n related reocord in GenerateCSVLogRequest model\n \"\"\"\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n if not os.path.isdir(logs_dir):\n return\n valid_filenames_set = get_valid_filenames()\n for filename in os.listdir(logs_dir):\n if filename not in valid_filenames_set:\n os.remove(os.path.join(logs_dir, filename))\n", "path": "kolibri/core/logger/tasks.py"}, {"content": "import logging\nimport ntpath\nimport os\n\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom django.utils import translation\n\nfrom kolibri.core.auth.constants.commands_errors import FILE_WRITE_ERROR\nfrom kolibri.core.auth.constants.commands_errors import INVALID\nfrom kolibri.core.auth.constants.commands_errors import MESSAGES\nfrom kolibri.core.auth.constants.commands_errors import NO_FACILITY\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import classes_info\nfrom kolibri.core.logger.csv_export import csv_file_generator\nfrom kolibri.core.logger.models import GenerateCSVLogRequest\nfrom kolibri.core.logger.tasks import log_exports_cleanup\nfrom kolibri.core.tasks.management.commands.base import AsyncCommand\nfrom kolibri.core.tasks.utils import get_current_job\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"-O\",\n \"--output-file\",\n action=\"store\",\n dest=\"output_file\",\n default=None,\n type=str,\n help=\"The generated file will be saved with this name\",\n )\n parser.add_argument(\n \"-l\",\n \"--log-type\",\n action=\"store\",\n dest=\"log_type\",\n default=\"session\",\n choices=classes_info.keys(),\n help='Log type to be exported. Valid values are \"session\" and \"summary\".',\n )\n parser.add_argument(\n \"-w\",\n \"--overwrite\",\n action=\"store_true\",\n dest=\"overwrite\",\n default=False,\n help=\"Allows overwritten of the exported file in case it exists\",\n )\n parser.add_argument(\n \"--facility\",\n action=\"store\",\n type=str,\n help=\"Facility id to import the users into\",\n )\n parser.add_argument(\n \"--locale\",\n action=\"store\",\n type=str,\n default=None,\n help=\"Code of the language for the messages to be translated\",\n )\n parser.add_argument(\n \"--start_date\",\n action=\"store\",\n dest=\"start_date\",\n type=str,\n help=\"Start date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n parser.add_argument(\n \"--end_date\",\n action=\"store\",\n dest=\"end_date\",\n type=str,\n help=\"End date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n\n def get_facility(self, options):\n if options[\"facility\"]:\n default_facility = Facility.objects.get(pk=options[\"facility\"])\n else:\n default_facility = Facility.get_default_facility()\n\n return default_facility\n\n def validate_date(self, date_str):\n try:\n return bool(parser.parse(date_str))\n except ValueError:\n return False\n\n def handle_async(self, *args, **options):\n\n # set language for the translation of the messages\n locale = settings.LANGUAGE_CODE if not options[\"locale\"] else options[\"locale\"]\n translation.activate(locale)\n self.overall_error = \"\"\n job = get_current_job()\n\n start_date = options[\"start_date\"]\n end_date = options[\"end_date\"]\n\n facility = self.get_facility(options)\n if not facility:\n self.overall_error = str(MESSAGES[NO_FACILITY])\n\n elif not self.validate_date(start_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"start_date\")\n\n elif not self.validate_date(end_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"end_date\")\n\n else:\n log_type = options[\"log_type\"]\n\n log_info = classes_info[log_type]\n\n if options[\"output_file\"] is None:\n filename = log_info[\"filename\"].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n )\n else:\n filename = options[\"output_file\"]\n\n filepath = os.path.join(os.getcwd(), filename)\n\n queryset = log_info[\"queryset\"]\n\n total_rows = queryset.count()\n\n with self.start_progress(total=total_rows) as progress_update:\n try:\n for row in csv_file_generator(\n facility,\n log_type,\n filepath,\n start_date=start_date,\n end_date=end_date,\n overwrite=options[\"overwrite\"],\n ):\n progress_update(1)\n except (ValueError, IOError) as e:\n self.overall_error = str(MESSAGES[FILE_WRITE_ERROR].format(e))\n\n if job:\n job.extra_metadata[\"overall_error\"] = self.overall_error\n self.job.extra_metadata[\"filename\"] = ntpath.basename(filepath)\n job.save_meta()\n else:\n if self.overall_error:\n raise CommandError(self.overall_error)\n else:\n logger.info(\n \"Created csv file {} with {} lines\".format(filepath, total_rows)\n )\n\n translation.deactivate()\n\n # create or update record of log request\n GenerateCSVLogRequest.objects.update_or_create(\n log_type=log_type,\n facility=facility,\n defaults={\n \"selected_start_date\": parser.parse(start_date),\n \"selected_end_date\": parser.parse(end_date),\n \"date_requested\": local_now(),\n },\n )\n log_exports_cleanup.enqueue()\n", "path": "kolibri/core/logger/management/commands/exportlogs.py"}]}
| 3,363 | 974 |
gh_patches_debug_34281
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-830
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
zh_CN (and possible other providers) return invalid results for domain_name
Brief summary of the issue goes here.
### Steps to reproduce
f = Faker('zh_CN')
f.domain_name()
>he.cn
### Expected behavior
it should return valid domain names
### Actual behavior
he.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.
it's unclear where this change should/could be made after briefly reviewing the source code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/internet/zh_CN/__init__.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as InternetProvider
4 from faker.utils.decorators import slugify
5
6
7 class Provider(InternetProvider):
8 user_name_formats = (
9 '{{last_romanized_name}}.{{first_romanized_name}}',
10 '{{first_romanized_name}}.{{last_romanized_name}}',
11 '{{first_romanized_name}}##',
12 '?{{last_romanized_name}}',
13 )
14 tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
15
16 @slugify
17 def domain_word(self):
18 return self.generator.format('last_romanized_name')
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py
--- a/faker/providers/internet/zh_CN/__init__.py
+++ b/faker/providers/internet/zh_CN/__init__.py
@@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import unicode_literals
+from collections import OrderedDict
from .. import Provider as InternetProvider
from faker.utils.decorators import slugify
@@ -11,8 +12,54 @@
'{{first_romanized_name}}##',
'?{{last_romanized_name}}',
)
- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
+
+ tlds = OrderedDict((
+ ('cn', 0.8),
+ ('net', 0.1),
+ ('com', 0.05),
+ ('org', 0.05),
+ ))
+
+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',
+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',
+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',
+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',
+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')
+
+ domain_formats = (
+ '##', '??',
+ '{{first_romanized_name}}',
+ '{{last_romanized_name}}',
+ '{{first_romanized_name}}{{last_romanized_name}}',
+ '{{last_romanized_name}}{{last_romanized_name}}',
+ '{{first_romanized_name}}{{first_romanized_name}}',
+ )
@slugify
def domain_word(self):
- return self.generator.format('last_romanized_name')
+ pattern = self.random_element(self.domain_formats)
+ if '#' in pattern or '?' in pattern:
+ return self.bothify(pattern)
+ else:
+ return self.generator.parse(pattern)
+
+ def domain_name(self, levels=1):
+ if levels < 1:
+ raise ValueError("levels must be greater than or equal to 1")
+ if levels == 1:
+ domain_word = self.domain_word()
+ # Avoids he.cn as seen in issue #687
+ while domain_word in self.second_level_domains:
+ domain_word = self.domain_word()
+ return domain_word + '.' + self.tld()
+ elif levels == 2:
+ my_tld = self.tld()
+ my_second_level = ''
+ if my_tld == 'cn':
+ my_second_level = self.random_element(self.second_level_domains)
+ else:
+ my_second_level = self.domain_word()
+ return self.domain_word() + '.' + my_second_level + '.' + my_tld
+ else:
+ return self.domain_word() + '.' + self.domain_name(levels - 1)
+
|
{"golden_diff": "diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py\n--- a/faker/providers/internet/zh_CN/__init__.py\n+++ b/faker/providers/internet/zh_CN/__init__.py\n@@ -1,5 +1,6 @@\n # coding=utf-8\n from __future__ import unicode_literals\n+from collections import OrderedDict\n from .. import Provider as InternetProvider\n from faker.utils.decorators import slugify\n \n@@ -11,8 +12,54 @@\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n+\n+ tlds = OrderedDict((\n+ ('cn', 0.8),\n+ ('net', 0.1),\n+ ('com', 0.05),\n+ ('org', 0.05),\n+ ))\n+\n+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',\n+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',\n+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',\n+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',\n+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')\n+\n+ domain_formats = (\n+ '##', '??',\n+ '{{first_romanized_name}}',\n+ '{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{last_romanized_name}}',\n+ '{{last_romanized_name}}{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{first_romanized_name}}',\n+ )\n \n @slugify\n def domain_word(self):\n- return self.generator.format('last_romanized_name')\n+ pattern = self.random_element(self.domain_formats)\n+ if '#' in pattern or '?' in pattern:\n+ return self.bothify(pattern)\n+ else:\n+ return self.generator.parse(pattern)\n+\n+ def domain_name(self, levels=1):\n+ if levels < 1:\n+ raise ValueError(\"levels must be greater than or equal to 1\")\n+ if levels == 1:\n+ domain_word = self.domain_word()\n+ # Avoids he.cn as seen in issue #687\n+ while domain_word in self.second_level_domains:\n+ domain_word = self.domain_word()\n+ return domain_word + '.' + self.tld()\n+ elif levels == 2:\n+ my_tld = self.tld()\n+ my_second_level = ''\n+ if my_tld == 'cn':\n+ my_second_level = self.random_element(self.second_level_domains)\n+ else:\n+ my_second_level = self.domain_word()\n+ return self.domain_word() + '.' + my_second_level + '.' + my_tld\n+ else:\n+ return self.domain_word() + '.' + self.domain_name(levels - 1)\n+\n", "issue": "zh_CN (and possible other providers) return invalid results for domain_name\nBrief summary of the issue goes here.\r\n\r\n### Steps to reproduce\r\n\r\nf = Faker('zh_CN')\r\nf.domain_name()\r\n>he.cn\r\n\r\n### Expected behavior\r\n\r\nit should return valid domain names\r\n\r\n### Actual behavior\r\n\r\nhe.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.\r\n\r\nit's unclear where this change should/could be made after briefly reviewing the source code.\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\nfrom faker.utils.decorators import slugify\n\n\nclass Provider(InternetProvider):\n user_name_formats = (\n '{{last_romanized_name}}.{{first_romanized_name}}',\n '{{first_romanized_name}}.{{last_romanized_name}}',\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n\n @slugify\n def domain_word(self):\n return self.generator.format('last_romanized_name')\n", "path": "faker/providers/internet/zh_CN/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\nfrom .. import Provider as InternetProvider\nfrom faker.utils.decorators import slugify\n\n\nclass Provider(InternetProvider):\n user_name_formats = (\n '{{last_romanized_name}}.{{first_romanized_name}}',\n '{{first_romanized_name}}.{{last_romanized_name}}',\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n\n tlds = OrderedDict((\n ('cn', 0.8),\n ('net', 0.1),\n ('com', 0.05),\n ('org', 0.05),\n ))\n\n second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',\n 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',\n 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',\n 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',\n 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')\n\n domain_formats = (\n '##', '??',\n '{{first_romanized_name}}',\n '{{last_romanized_name}}',\n '{{first_romanized_name}}{{last_romanized_name}}',\n '{{last_romanized_name}}{{last_romanized_name}}',\n '{{first_romanized_name}}{{first_romanized_name}}',\n )\n\n @slugify\n def domain_word(self):\n pattern = self.random_element(self.domain_formats)\n if '#' in pattern or '?' in pattern:\n return self.bothify(pattern)\n else:\n return self.generator.parse(pattern)\n\n def domain_name(self, levels=1):\n if levels < 1:\n raise ValueError(\"levels must be greater than or equal to 1\")\n if levels == 1:\n domain_word = self.domain_word()\n # Avoids he.cn as seen in issue #687\n while domain_word in self.second_level_domains:\n domain_word = self.domain_word()\n return domain_word + '.' + self.tld()\n elif levels == 2:\n my_tld = self.tld()\n my_second_level = ''\n if my_tld == 'cn':\n my_second_level = self.random_element(self.second_level_domains)\n else:\n my_second_level = self.domain_word()\n return self.domain_word() + '.' + my_second_level + '.' + my_tld\n else:\n return self.domain_word() + '.' + self.domain_name(levels - 1)\n\n", "path": "faker/providers/internet/zh_CN/__init__.py"}]}
| 570 | 724 |
gh_patches_debug_20196
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-8465
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failing to load sqlalchemy-plugin in pyinstaller'ed exe
This line of code works perfectly fine while running as a file from spyder.
engine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')
However, after making an exe file using pyinstaller, I get the following error while running the exe:
sqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata
This looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-sqlalchemy.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import re
13 import importlib.util
14
15 from PyInstaller import isolated
16 from PyInstaller.lib.modulegraph.modulegraph import SourceModule
17 from PyInstaller.utils.hooks import check_requirement, logger
18
19 # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
20 excludedimports = ['sqlalchemy.testing']
21
22 # Include most common database bindings some database bindings are detected and include some are not. We should
23 # explicitly include database backends.
24 hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']
25
26 if check_requirement('sqlalchemy >= 1.4'):
27 hiddenimports.append("sqlalchemy.sql.default_comparator")
28
29
30 @isolated.decorate
31 def _get_dialect_modules(module_name):
32 import importlib
33 module = importlib.import_module(module_name)
34 return [f"{module_name}.{submodule_name}" for submodule_name in module.__all__]
35
36
37 # In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
38 # In SQLAlchemy <= 0.5, the "sqlalchemy.databases" package provides dialects.
39 if check_requirement('sqlalchemy >= 0.6'):
40 hiddenimports += _get_dialect_modules("sqlalchemy.dialects")
41 else:
42 hiddenimports += _get_dialect_modules("sqlalchemy.databases")
43
44
45 def hook(hook_api):
46 """
47 SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:
48
49 @util.dependencies("sqlalchemy.sql.schema")
50
51 This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks
52 those modules as hidden imports.
53 """
54
55 if not check_requirement('sqlalchemy >= 0.9'):
56 return
57
58 # this parser is very simplistic but seems to catch all cases as of V1.1
59 depend_regex = re.compile(r'@util.dependencies\([\'"](.*?)[\'"]\)')
60
61 hidden_imports_set = set()
62 known_imports = set()
63 for node in hook_api.module_graph.iter_graph(start=hook_api.module):
64 if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):
65 known_imports.add(node.identifier)
66
67 # Read the source...
68 with open(node.filename, 'rb') as f:
69 source_code = f.read()
70 source_code = importlib.util.decode_source(source_code)
71
72 # ... and scan it
73 for match in depend_regex.findall(source_code):
74 hidden_imports_set.add(match)
75
76 hidden_imports_set -= known_imports
77 if len(hidden_imports_set):
78 logger.info(" Found %d sqlalchemy hidden imports", len(hidden_imports_set))
79 hook_api.add_imports(*list(hidden_imports_set))
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py
--- a/PyInstaller/hooks/hook-sqlalchemy.py
+++ b/PyInstaller/hooks/hook-sqlalchemy.py
@@ -14,7 +14,9 @@
from PyInstaller import isolated
from PyInstaller.lib.modulegraph.modulegraph import SourceModule
-from PyInstaller.utils.hooks import check_requirement, logger
+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger
+
+datas = []
# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
excludedimports = ['sqlalchemy.testing']
@@ -41,6 +43,13 @@
else:
hiddenimports += _get_dialect_modules("sqlalchemy.databases")
+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available
+# in the build environment for a reason (i.e., they are used).
+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):
+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)
+ datas += ep_datas
+ hiddenimports += ep_hiddenimports
+
def hook(hook_api):
"""
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py\n--- a/PyInstaller/hooks/hook-sqlalchemy.py\n+++ b/PyInstaller/hooks/hook-sqlalchemy.py\n@@ -14,7 +14,9 @@\n \n from PyInstaller import isolated\n from PyInstaller.lib.modulegraph.modulegraph import SourceModule\n-from PyInstaller.utils.hooks import check_requirement, logger\n+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger\n+\n+datas = []\n \n # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\n excludedimports = ['sqlalchemy.testing']\n@@ -41,6 +43,13 @@\n else:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n \n+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available\n+# in the build environment for a reason (i.e., they are used).\n+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):\n+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)\n+ datas += ep_datas\n+ hiddenimports += ep_hiddenimports\n+\n \n def hook(hook_api):\n \"\"\"\n", "issue": "Failing to load sqlalchemy-plugin in pyinstaller'ed exe\nThis line of code works perfectly fine while running as a file from spyder.\r\n\r\nengine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')\r\nHowever, after making an exe file using pyinstaller, I get the following error while running the exe:\r\n\r\nsqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata\r\n\r\nThis looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nimport importlib.util\n\nfrom PyInstaller import isolated\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.utils.hooks import check_requirement, logger\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# Include most common database bindings some database bindings are detected and include some are not. We should\n# explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n\nif check_requirement('sqlalchemy >= 1.4'):\n hiddenimports.append(\"sqlalchemy.sql.default_comparator\")\n\n\n@isolated.decorate\ndef _get_dialect_modules(module_name):\n import importlib\n module = importlib.import_module(module_name)\n return [f\"{module_name}.{submodule_name}\" for submodule_name in module.__all__]\n\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nif check_requirement('sqlalchemy >= 0.6'):\n hiddenimports += _get_dialect_modules(\"sqlalchemy.dialects\")\nelse:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks\n those modules as hidden imports.\n \"\"\"\n\n if not check_requirement('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.iter_graph(start=hook_api.module):\n if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n\n # Read the source...\n with open(node.filename, 'rb') as f:\n source_code = f.read()\n source_code = importlib.util.decode_source(source_code)\n\n # ... and scan it\n for match in depend_regex.findall(source_code):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\", len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nimport importlib.util\n\nfrom PyInstaller import isolated\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger\n\ndatas = []\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# Include most common database bindings some database bindings are detected and include some are not. We should\n# explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n\nif check_requirement('sqlalchemy >= 1.4'):\n hiddenimports.append(\"sqlalchemy.sql.default_comparator\")\n\n\n@isolated.decorate\ndef _get_dialect_modules(module_name):\n import importlib\n module = importlib.import_module(module_name)\n return [f\"{module_name}.{submodule_name}\" for submodule_name in module.__all__]\n\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nif check_requirement('sqlalchemy >= 0.6'):\n hiddenimports += _get_dialect_modules(\"sqlalchemy.dialects\")\nelse:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n\n# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available\n# in the build environment for a reason (i.e., they are used).\nfor entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):\n ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)\n datas += ep_datas\n hiddenimports += ep_hiddenimports\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks\n those modules as hidden imports.\n \"\"\"\n\n if not check_requirement('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.iter_graph(start=hook_api.module):\n if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n\n # Read the source...\n with open(node.filename, 'rb') as f:\n source_code = f.read()\n source_code = importlib.util.decode_source(source_code)\n\n # ... and scan it\n for match in depend_regex.findall(source_code):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\", len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}]}
| 1,225 | 271 |
gh_patches_debug_9333
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-6638
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Intermittent duplicate key errors when creating default sysadmin
**CKAN version**
CKAN 2.9.x. Does not appear to affect 2.8.
**Describe the bug**
When our continuous integration tests initialise a test CKAN 2.9 instance, they sometimes encounter a duplicate key exception when CKAN is attempting to create the 'default' sysadmin account. There does not appear to be any specific pattern to when the errors occur; re-running the build may resolve it, or not. This did not occur on CKAN 2.8.
**Steps to reproduce**
Our test scripts run the following CLI commands:
ckan -c /etc/ckan/default/production.ini db clean --yes
ckan -c /etc/ckan/default/production.ini db init
ckan -c /etc/ckan/default/production.ini comments initdb
ckan -c /etc/ckan/default/production.ini comments updatedb
ckan -c /etc/ckan/default/production.ini comments init_notifications_db
**Additional details**
Sample failed build:
https://github.com/qld-gov-au/ckanext-ytp-comments/runs/4480610398?check_suite_focus=true
(Search for "already exists" to locate the stack trace)
NB Although the error actually occurred on that run during a `ckanext-ytp-comments` command, it was not specific to the extension, as shown by an excerpt from the stack trace:
ckan_1 | File "/app/ckan/default/src/ckan/ckan/cli/cli.py", line 102, in _init_ckan_config
ckan_1 | ctx.obj = CkanCommand(value)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/cli/cli.py", line 52, in __init__
ckan_1 | self.app = make_app(self.config)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/middleware/__init__.py", line 56, in make_app
ckan_1 | load_environment(conf)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/environment.py", line 123, in load_environment
ckan_1 | p.load_all()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 161, in load_all
ckan_1 | unload_all()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 208, in unload_all
ckan_1 | unload(*reversed(_PLUGINS))
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 236, in unload
ckan_1 | plugins_update()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 153, in plugins_update
ckan_1 | environment.update_config()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/environment.py", line 322, in update_config
ckan_1 | logic.get_action('get_site_user')({'ignore_auth': True}, None)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/logic/__init__.py", line 477, in wrapped
ckan_1 | result = _action(context, data_dict, **kw)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/logic/action/get.py", line 2398, in get_site_user
ckan_1 | model.Session.flush()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/environment.py`
Content:
```
1 # encoding: utf-8
2
3 '''CKAN environment configuration'''
4 import os
5 import logging
6 import warnings
7 import pytz
8
9 import sqlalchemy
10
11 import ckan.model as model
12 import ckan.plugins as p
13 import ckan.lib.plugins as lib_plugins
14 import ckan.lib.helpers as helpers
15 import ckan.lib.app_globals as app_globals
16 from ckan.lib.redis import is_redis_available
17 import ckan.lib.search as search
18 import ckan.logic as logic
19 import ckan.authz as authz
20 from ckan.lib.webassets_tools import webassets_init
21 from ckan.lib.i18n import build_js_translations
22
23 from ckan.common import config, config_declaration
24 from ckan.exceptions import CkanConfigurationException
25 log = logging.getLogger(__name__)
26
27 # Suppress benign warning 'Unbuilt egg for setuptools'
28 warnings.simplefilter('ignore', UserWarning)
29
30
31 def load_environment(conf):
32 """
33 Configure the Pylons environment via the ``pylons.config`` object. This
34 code should only need to be run once.
35 """
36 os.environ['CKAN_CONFIG'] = conf['__file__']
37
38 valid_base_public_folder_names = ['public']
39 static_files = conf.get('ckan.base_public_folder', 'public')
40 conf['ckan.base_public_folder'] = static_files
41
42 if static_files not in valid_base_public_folder_names:
43 raise CkanConfigurationException(
44 'You provided an invalid value for ckan.base_public_folder. '
45 'Possible values are: "public".'
46 )
47
48 log.info('Loading static files from %s' % static_files)
49
50 # Initialize main CKAN config object
51 config.update(conf)
52
53 # Setup the SQLAlchemy database engine
54 # Suppress a couple of sqlalchemy warnings
55 msgs = ['^Unicode type received non-unicode bind param value',
56 "^Did not recognize type 'BIGINT' of column 'size'",
57 "^Did not recognize type 'tsvector' of column 'search_vector'"
58 ]
59 for msg in msgs:
60 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
61
62 # load all CKAN plugins
63 p.load_all()
64
65 # Check Redis availability
66 if not is_redis_available():
67 log.critical('Could not connect to Redis.')
68
69 app_globals.reset()
70
71 # issue #3260: remove idle transaction
72 # Session that was used for getting all config params nor committed,
73 # neither removed and we have idle connection as result
74 model.Session.commit()
75
76 # Build JavaScript translations. Must be done after plugins have
77 # been loaded.
78 build_js_translations()
79
80
81 # A mapping of config settings that can be overridden by env vars.
82 # Note: Do not remove the following lines, they are used in the docs
83 # Start CONFIG_FROM_ENV_VARS
84 CONFIG_FROM_ENV_VARS = {
85 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
86 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
87 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
88 'ckan.redis.url': 'CKAN_REDIS_URL',
89 'solr_url': 'CKAN_SOLR_URL',
90 'solr_user': 'CKAN_SOLR_USER',
91 'solr_password': 'CKAN_SOLR_PASSWORD',
92 'ckan.site_id': 'CKAN_SITE_ID',
93 'ckan.site_url': 'CKAN_SITE_URL',
94 'ckan.storage_path': 'CKAN_STORAGE_PATH',
95 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
96 'smtp.server': 'CKAN_SMTP_SERVER',
97 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
98 'smtp.user': 'CKAN_SMTP_USER',
99 'smtp.password': 'CKAN_SMTP_PASSWORD',
100 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',
101 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'
102 }
103 # End CONFIG_FROM_ENV_VARS
104
105
106 def update_config():
107 ''' This code needs to be run when the config is changed to take those
108 changes into account. It is called whenever a plugin is loaded as the
109 plugin might have changed the config values (for instance it might
110 change ckan.site_url) '''
111
112 config_declaration.setup()
113 config_declaration.make_safe(config)
114 config_declaration.normalize(config)
115
116 webassets_init()
117
118 for plugin in p.PluginImplementations(p.IConfigurer):
119 # must do update in place as this does not work:
120 # config = plugin.update_config(config)
121 plugin.update_config(config)
122
123 # Set whitelisted env vars on config object
124 # This is set up before globals are initialized
125
126 ckan_db = os.environ.get('CKAN_DB', None)
127 if ckan_db:
128 msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
129 ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
130 log.warn(msg)
131 config['sqlalchemy.url'] = ckan_db
132
133 for option in CONFIG_FROM_ENV_VARS:
134 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
135 if from_env:
136 config[option] = from_env
137
138 if config.get_value("config.mode") == "strict":
139 _, errors = config_declaration.validate(config)
140 if errors:
141 msg = "\n".join(
142 "{}: {}".format(key, "; ".join(issues))
143 for key, issues in errors.items()
144 )
145 raise CkanConfigurationException(msg)
146
147 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
148
149 site_url = config.get_value('ckan.site_url')
150 if not site_url:
151 raise RuntimeError(
152 'ckan.site_url is not configured and it must have a value.'
153 ' Please amend your .ini file.')
154 if not site_url.lower().startswith('http'):
155 raise RuntimeError(
156 'ckan.site_url should be a full URL, including the schema '
157 '(http or https)')
158 # Remove backslash from site_url if present
159 config['ckan.site_url'] = site_url.rstrip('/')
160
161 display_timezone = config.get_value('ckan.display_timezone')
162 if (display_timezone and
163 display_timezone != 'server' and
164 display_timezone not in pytz.all_timezones):
165 raise CkanConfigurationException(
166 "ckan.display_timezone is not 'server' or a valid timezone"
167 )
168
169 # Init SOLR settings and check if the schema is compatible
170 # from ckan.lib.search import SolrSettings, check_solr_schema_version
171
172 # lib.search is imported here as we need the config enabled and parsed
173 search.SolrSettings.init(config.get_value('solr_url'),
174 config.get_value('solr_user'),
175 config.get_value('solr_password'))
176 search.check_solr_schema_version()
177
178 lib_plugins.reset_package_plugins()
179 lib_plugins.register_package_plugins()
180 lib_plugins.reset_group_plugins()
181 lib_plugins.register_group_plugins()
182
183 # initialise the globals
184 app_globals.app_globals._init()
185
186 helpers.load_plugin_helpers()
187
188 # Templates and CSS loading from configuration
189 valid_base_templates_folder_names = ['templates']
190 templates = config.get_value('ckan.base_templates_folder')
191 config['ckan.base_templates_folder'] = templates
192
193 if templates not in valid_base_templates_folder_names:
194 raise CkanConfigurationException(
195 'You provided an invalid value for ckan.base_templates_folder. '
196 'Possible values are: "templates".'
197 )
198
199 jinja2_templates_path = os.path.join(root, templates)
200 log.info('Loading templates from %s' % jinja2_templates_path)
201 template_paths = [jinja2_templates_path]
202
203 extra_template_paths = config.get_value('extra_template_paths')
204 if extra_template_paths:
205 # must be first for them to override defaults
206 template_paths = extra_template_paths.split(',') + template_paths
207 config['computed_template_paths'] = template_paths
208
209 # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
210 # to eliminate database errors due to stale pooled connections
211 config.setdefault('sqlalchemy.pool_pre_ping', True)
212 # Initialize SQLAlchemy
213 engine = sqlalchemy.engine_from_config(config)
214 model.init_model(engine)
215
216 for plugin in p.PluginImplementations(p.IConfigurable):
217 plugin.configure(config)
218
219 # clear other caches
220 logic.clear_actions_cache()
221 logic.clear_validators_cache()
222 authz.clear_auth_functions_cache()
223
224 # Here we create the site user if they are not already in the database
225 try:
226 logic.get_action('get_site_user')({'ignore_auth': True}, None)
227 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
228 # The database is not yet initialised. It happens in `ckan db init`
229 pass
230
231 # Close current session and open database connections to ensure a clean
232 # clean environment even if an error occurs later on
233 model.Session.remove()
234 model.Session.bind.dispose()
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -227,6 +227,9 @@
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
# The database is not yet initialised. It happens in `ckan db init`
pass
+ except sqlalchemy.exc.IntegrityError:
+ # Race condition, user already exists.
+ pass
# Close current session and open database connections to ensure a clean
# clean environment even if an error occurs later on
|
{"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -227,6 +227,9 @@\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n+ except sqlalchemy.exc.IntegrityError:\n+ # Race condition, user already exists.\n+ pass\n \n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n", "issue": "Intermittent duplicate key errors when creating default sysadmin\n**CKAN version**\r\n\r\nCKAN 2.9.x. Does not appear to affect 2.8.\r\n\r\n**Describe the bug**\r\n\r\nWhen our continuous integration tests initialise a test CKAN 2.9 instance, they sometimes encounter a duplicate key exception when CKAN is attempting to create the 'default' sysadmin account. There does not appear to be any specific pattern to when the errors occur; re-running the build may resolve it, or not. This did not occur on CKAN 2.8.\r\n\r\n**Steps to reproduce**\r\n\r\nOur test scripts run the following CLI commands:\r\n\r\nckan -c /etc/ckan/default/production.ini db clean --yes\r\nckan -c /etc/ckan/default/production.ini db init\r\nckan -c /etc/ckan/default/production.ini comments initdb\r\nckan -c /etc/ckan/default/production.ini comments updatedb\r\nckan -c /etc/ckan/default/production.ini comments init_notifications_db\r\n\r\n**Additional details**\r\nSample failed build:\r\n\r\nhttps://github.com/qld-gov-au/ckanext-ytp-comments/runs/4480610398?check_suite_focus=true\r\n\r\n(Search for \"already exists\" to locate the stack trace)\r\n\r\nNB Although the error actually occurred on that run during a `ckanext-ytp-comments` command, it was not specific to the extension, as shown by an excerpt from the stack trace:\r\n\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/cli/cli.py\", line 102, in _init_ckan_config\r\n ckan_1 | ctx.obj = CkanCommand(value)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/cli/cli.py\", line 52, in __init__\r\n ckan_1 | self.app = make_app(self.config)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/middleware/__init__.py\", line 56, in make_app\r\n ckan_1 | load_environment(conf)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/environment.py\", line 123, in load_environment\r\n ckan_1 | p.load_all()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 161, in load_all\r\n ckan_1 | unload_all()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 208, in unload_all\r\n ckan_1 | unload(*reversed(_PLUGINS))\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 236, in unload\r\n ckan_1 | plugins_update()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 153, in plugins_update\r\n ckan_1 | environment.update_config()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/environment.py\", line 322, in update_config\r\n ckan_1 | logic.get_action('get_site_user')({'ignore_auth': True}, None)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/logic/__init__.py\", line 477, in wrapped\r\n ckan_1 | result = _action(context, data_dict, **kw)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/logic/action/get.py\", line 2398, in get_site_user\r\n ckan_1 | model.Session.flush()\n", "before_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nimport pytz\n\nimport sqlalchemy\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import config, config_declaration\nfrom ckan.exceptions import CkanConfigurationException\nlog = logging.getLogger(__name__)\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n os.environ['CKAN_CONFIG'] = conf['__file__']\n\n valid_base_public_folder_names = ['public']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n\n # Initialize main CKAN config object\n config.update(conf)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n config_declaration.setup()\n config_declaration.make_safe(config)\n config_declaration.normalize(config)\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n if config.get_value(\"config.mode\") == \"strict\":\n _, errors = config_declaration.validate(config)\n if errors:\n msg = \"\\n\".join(\n \"{}: {}\".format(key, \"; \".join(issues))\n for key, issues in errors.items()\n )\n raise CkanConfigurationException(msg)\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get_value('ckan.site_url')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n # Remove backslash from site_url if present\n config['ckan.site_url'] = site_url.rstrip('/')\n\n display_timezone = config.get_value('ckan.display_timezone')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get_value('solr_url'),\n config.get_value('solr_user'),\n config.get_value('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates']\n templates = config.get_value('ckan.base_templates_folder')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get_value('extra_template_paths')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nimport pytz\n\nimport sqlalchemy\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import config, config_declaration\nfrom ckan.exceptions import CkanConfigurationException\nlog = logging.getLogger(__name__)\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n os.environ['CKAN_CONFIG'] = conf['__file__']\n\n valid_base_public_folder_names = ['public']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n\n # Initialize main CKAN config object\n config.update(conf)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n config_declaration.setup()\n config_declaration.make_safe(config)\n config_declaration.normalize(config)\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n if config.get_value(\"config.mode\") == \"strict\":\n _, errors = config_declaration.validate(config)\n if errors:\n msg = \"\\n\".join(\n \"{}: {}\".format(key, \"; \".join(issues))\n for key, issues in errors.items()\n )\n raise CkanConfigurationException(msg)\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get_value('ckan.site_url')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n # Remove backslash from site_url if present\n config['ckan.site_url'] = site_url.rstrip('/')\n\n display_timezone = config.get_value('ckan.display_timezone')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get_value('solr_url'),\n config.get_value('solr_user'),\n config.get_value('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates']\n templates = config.get_value('ckan.base_templates_folder')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get_value('extra_template_paths')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n except sqlalchemy.exc.IntegrityError:\n # Race condition, user already exists.\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]}
| 3,657 | 137 |
gh_patches_debug_25836
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-1735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for FT.AGGREGATE with LOAD *
redis-py should support recent changes to RediSearch. For reference, see the following: RediSearch/RediSearch#2301
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/commands/search/aggregation.py`
Content:
```
1 FIELDNAME = object()
2
3
4 class Limit:
5 def __init__(self, offset=0, count=0):
6 self.offset = offset
7 self.count = count
8
9 def build_args(self):
10 if self.count:
11 return ["LIMIT", str(self.offset), str(self.count)]
12 else:
13 return []
14
15
16 class Reducer:
17 """
18 Base reducer object for all reducers.
19
20 See the `redisearch.reducers` module for the actual reducers.
21 """
22
23 NAME = None
24
25 def __init__(self, *args):
26 self._args = args
27 self._field = None
28 self._alias = None
29
30 def alias(self, alias):
31 """
32 Set the alias for this reducer.
33
34 ### Parameters
35
36 - **alias**: The value of the alias for this reducer. If this is the
37 special value `aggregation.FIELDNAME` then this reducer will be
38 aliased using the same name as the field upon which it operates.
39 Note that using `FIELDNAME` is only possible on reducers which
40 operate on a single field value.
41
42 This method returns the `Reducer` object making it suitable for
43 chaining.
44 """
45 if alias is FIELDNAME:
46 if not self._field:
47 raise ValueError("Cannot use FIELDNAME alias with no field")
48 # Chop off initial '@'
49 alias = self._field[1:]
50 self._alias = alias
51 return self
52
53 @property
54 def args(self):
55 return self._args
56
57
58 class SortDirection:
59 """
60 This special class is used to indicate sort direction.
61 """
62
63 DIRSTRING = None
64
65 def __init__(self, field):
66 self.field = field
67
68
69 class Asc(SortDirection):
70 """
71 Indicate that the given field should be sorted in ascending order
72 """
73
74 DIRSTRING = "ASC"
75
76
77 class Desc(SortDirection):
78 """
79 Indicate that the given field should be sorted in descending order
80 """
81
82 DIRSTRING = "DESC"
83
84
85 class AggregateRequest:
86 """
87 Aggregation request which can be passed to `Client.aggregate`.
88 """
89
90 def __init__(self, query="*"):
91 """
92 Create an aggregation request. This request may then be passed to
93 `client.aggregate()`.
94
95 In order for the request to be usable, it must contain at least one
96 group.
97
98 - **query** Query string for filtering records.
99
100 All member methods (except `build_args()`)
101 return the object itself, making them useful for chaining.
102 """
103 self._query = query
104 self._aggregateplan = []
105 self._loadfields = []
106 self._limit = Limit()
107 self._max = 0
108 self._with_schema = False
109 self._verbatim = False
110 self._cursor = []
111
112 def load(self, *fields):
113 """
114 Indicate the fields to be returned in the response. These fields are
115 returned in addition to any others implicitly specified.
116
117 ### Parameters
118
119 - **fields**: One or more fields in the format of `@field`
120 """
121 self._loadfields.extend(fields)
122 return self
123
124 def group_by(self, fields, *reducers):
125 """
126 Specify by which fields to group the aggregation.
127
128 ### Parameters
129
130 - **fields**: Fields to group by. This can either be a single string,
131 or a list of strings. both cases, the field should be specified as
132 `@field`.
133 - **reducers**: One or more reducers. Reducers may be found in the
134 `aggregation` module.
135 """
136 fields = [fields] if isinstance(fields, str) else fields
137 reducers = [reducers] if isinstance(reducers, Reducer) else reducers
138
139 ret = ["GROUPBY", str(len(fields)), *fields]
140 for reducer in reducers:
141 ret += ["REDUCE", reducer.NAME, str(len(reducer.args))]
142 ret.extend(reducer.args)
143 if reducer._alias is not None:
144 ret += ["AS", reducer._alias]
145
146 self._aggregateplan.extend(ret)
147 return self
148
149 def apply(self, **kwexpr):
150 """
151 Specify one or more projection expressions to add to each result
152
153 ### Parameters
154
155 - **kwexpr**: One or more key-value pairs for a projection. The key is
156 the alias for the projection, and the value is the projection
157 expression itself, for example `apply(square_root="sqrt(@foo)")`
158 """
159 for alias, expr in kwexpr.items():
160 ret = ["APPLY", expr]
161 if alias is not None:
162 ret += ["AS", alias]
163 self._aggregateplan.extend(ret)
164
165 return self
166
167 def limit(self, offset, num):
168 """
169 Sets the limit for the most recent group or query.
170
171 If no group has been defined yet (via `group_by()`) then this sets
172 the limit for the initial pool of results from the query. Otherwise,
173 this limits the number of items operated on from the previous group.
174
175 Setting a limit on the initial search results may be useful when
176 attempting to execute an aggregation on a sample of a large data set.
177
178 ### Parameters
179
180 - **offset**: Result offset from which to begin paging
181 - **num**: Number of results to return
182
183
184 Example of sorting the initial results:
185
186 ```
187 AggregateRequest("@sale_amount:[10000, inf]")\
188 .limit(0, 10)\
189 .group_by("@state", r.count())
190 ```
191
192 Will only group by the states found in the first 10 results of the
193 query `@sale_amount:[10000, inf]`. On the other hand,
194
195 ```
196 AggregateRequest("@sale_amount:[10000, inf]")\
197 .limit(0, 1000)\
198 .group_by("@state", r.count()\
199 .limit(0, 10)
200 ```
201
202 Will group all the results matching the query, but only return the
203 first 10 groups.
204
205 If you only wish to return a *top-N* style query, consider using
206 `sort_by()` instead.
207
208 """
209 self._limit = Limit(offset, num)
210 return self
211
212 def sort_by(self, *fields, **kwargs):
213 """
214 Indicate how the results should be sorted. This can also be used for
215 *top-N* style queries
216
217 ### Parameters
218
219 - **fields**: The fields by which to sort. This can be either a single
220 field or a list of fields. If you wish to specify order, you can
221 use the `Asc` or `Desc` wrapper classes.
222 - **max**: Maximum number of results to return. This can be
223 used instead of `LIMIT` and is also faster.
224
225
226 Example of sorting by `foo` ascending and `bar` descending:
227
228 ```
229 sort_by(Asc("@foo"), Desc("@bar"))
230 ```
231
232 Return the top 10 customers:
233
234 ```
235 AggregateRequest()\
236 .group_by("@customer", r.sum("@paid").alias(FIELDNAME))\
237 .sort_by(Desc("@paid"), max=10)
238 ```
239 """
240 if isinstance(fields, (str, SortDirection)):
241 fields = [fields]
242
243 fields_args = []
244 for f in fields:
245 if isinstance(f, SortDirection):
246 fields_args += [f.field, f.DIRSTRING]
247 else:
248 fields_args += [f]
249
250 ret = ["SORTBY", str(len(fields_args))]
251 ret.extend(fields_args)
252 max = kwargs.get("max", 0)
253 if max > 0:
254 ret += ["MAX", str(max)]
255
256 self._aggregateplan.extend(ret)
257 return self
258
259 def filter(self, expressions):
260 """
261 Specify filter for post-query results using predicates relating to
262 values in the result set.
263
264 ### Parameters
265
266 - **fields**: Fields to group by. This can either be a single string,
267 or a list of strings.
268 """
269 if isinstance(expressions, str):
270 expressions = [expressions]
271
272 for expression in expressions:
273 self._aggregateplan.extend(["FILTER", expression])
274
275 return self
276
277 def with_schema(self):
278 """
279 If set, the `schema` property will contain a list of `[field, type]`
280 entries in the result object.
281 """
282 self._with_schema = True
283 return self
284
285 def verbatim(self):
286 self._verbatim = True
287 return self
288
289 def cursor(self, count=0, max_idle=0.0):
290 args = ["WITHCURSOR"]
291 if count:
292 args += ["COUNT", str(count)]
293 if max_idle:
294 args += ["MAXIDLE", str(max_idle * 1000)]
295 self._cursor = args
296 return self
297
298 def build_args(self):
299 # @foo:bar ...
300 ret = [self._query]
301
302 if self._with_schema:
303 ret.append("WITHSCHEMA")
304
305 if self._verbatim:
306 ret.append("VERBATIM")
307
308 if self._cursor:
309 ret += self._cursor
310
311 if self._loadfields:
312 ret.append("LOAD")
313 ret.append(str(len(self._loadfields)))
314 ret.extend(self._loadfields)
315
316 ret.extend(self._aggregateplan)
317
318 ret += self._limit.build_args()
319
320 return ret
321
322
323 class Cursor:
324 def __init__(self, cid):
325 self.cid = cid
326 self.max_idle = 0
327 self.count = 0
328
329 def build_args(self):
330 args = [str(self.cid)]
331 if self.max_idle:
332 args += ["MAXIDLE", str(self.max_idle)]
333 if self.count:
334 args += ["COUNT", str(self.count)]
335 return args
336
337
338 class AggregateResult:
339 def __init__(self, rows, cursor, schema):
340 self.rows = rows
341 self.cursor = cursor
342 self.schema = schema
343
344 def __repr__(self):
345 cid = self.cursor.cid if self.cursor else -1
346 return (
347 f"<{self.__class__.__name__} at 0x{id(self):x} "
348 f"Rows={len(self.rows)}, Cursor={cid}>"
349 )
350
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py
--- a/redis/commands/search/aggregation.py
+++ b/redis/commands/search/aggregation.py
@@ -103,6 +103,7 @@
self._query = query
self._aggregateplan = []
self._loadfields = []
+ self._loadall = False
self._limit = Limit()
self._max = 0
self._with_schema = False
@@ -116,9 +117,13 @@
### Parameters
- - **fields**: One or more fields in the format of `@field`
+ - **fields**: If fields not specified, all the fields will be loaded.
+ Otherwise, fields should be given in the format of `@field`.
"""
- self._loadfields.extend(fields)
+ if fields:
+ self._loadfields.extend(fields)
+ else:
+ self._loadall = True
return self
def group_by(self, fields, *reducers):
@@ -308,7 +313,10 @@
if self._cursor:
ret += self._cursor
- if self._loadfields:
+ if self._loadall:
+ ret.append("LOAD")
+ ret.append("*")
+ elif self._loadfields:
ret.append("LOAD")
ret.append(str(len(self._loadfields)))
ret.extend(self._loadfields)
|
{"golden_diff": "diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py\n--- a/redis/commands/search/aggregation.py\n+++ b/redis/commands/search/aggregation.py\n@@ -103,6 +103,7 @@\n self._query = query\n self._aggregateplan = []\n self._loadfields = []\n+ self._loadall = False\n self._limit = Limit()\n self._max = 0\n self._with_schema = False\n@@ -116,9 +117,13 @@\n \n ### Parameters\n \n- - **fields**: One or more fields in the format of `@field`\n+ - **fields**: If fields not specified, all the fields will be loaded.\n+ Otherwise, fields should be given in the format of `@field`.\n \"\"\"\n- self._loadfields.extend(fields)\n+ if fields:\n+ self._loadfields.extend(fields)\n+ else:\n+ self._loadall = True\n return self\n \n def group_by(self, fields, *reducers):\n@@ -308,7 +313,10 @@\n if self._cursor:\n ret += self._cursor\n \n- if self._loadfields:\n+ if self._loadall:\n+ ret.append(\"LOAD\")\n+ ret.append(\"*\")\n+ elif self._loadfields:\n ret.append(\"LOAD\")\n ret.append(str(len(self._loadfields)))\n ret.extend(self._loadfields)\n", "issue": "Add support for FT.AGGREGATE with LOAD *\nredis-py should support recent changes to RediSearch. For reference, see the following: RediSearch/RediSearch#2301\n", "before_files": [{"content": "FIELDNAME = object()\n\n\nclass Limit:\n def __init__(self, offset=0, count=0):\n self.offset = offset\n self.count = count\n\n def build_args(self):\n if self.count:\n return [\"LIMIT\", str(self.offset), str(self.count)]\n else:\n return []\n\n\nclass Reducer:\n \"\"\"\n Base reducer object for all reducers.\n\n See the `redisearch.reducers` module for the actual reducers.\n \"\"\"\n\n NAME = None\n\n def __init__(self, *args):\n self._args = args\n self._field = None\n self._alias = None\n\n def alias(self, alias):\n \"\"\"\n Set the alias for this reducer.\n\n ### Parameters\n\n - **alias**: The value of the alias for this reducer. If this is the\n special value `aggregation.FIELDNAME` then this reducer will be\n aliased using the same name as the field upon which it operates.\n Note that using `FIELDNAME` is only possible on reducers which\n operate on a single field value.\n\n This method returns the `Reducer` object making it suitable for\n chaining.\n \"\"\"\n if alias is FIELDNAME:\n if not self._field:\n raise ValueError(\"Cannot use FIELDNAME alias with no field\")\n # Chop off initial '@'\n alias = self._field[1:]\n self._alias = alias\n return self\n\n @property\n def args(self):\n return self._args\n\n\nclass SortDirection:\n \"\"\"\n This special class is used to indicate sort direction.\n \"\"\"\n\n DIRSTRING = None\n\n def __init__(self, field):\n self.field = field\n\n\nclass Asc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in ascending order\n \"\"\"\n\n DIRSTRING = \"ASC\"\n\n\nclass Desc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in descending order\n \"\"\"\n\n DIRSTRING = \"DESC\"\n\n\nclass AggregateRequest:\n \"\"\"\n Aggregation request which can be passed to `Client.aggregate`.\n \"\"\"\n\n def __init__(self, query=\"*\"):\n \"\"\"\n Create an aggregation request. This request may then be passed to\n `client.aggregate()`.\n\n In order for the request to be usable, it must contain at least one\n group.\n\n - **query** Query string for filtering records.\n\n All member methods (except `build_args()`)\n return the object itself, making them useful for chaining.\n \"\"\"\n self._query = query\n self._aggregateplan = []\n self._loadfields = []\n self._limit = Limit()\n self._max = 0\n self._with_schema = False\n self._verbatim = False\n self._cursor = []\n\n def load(self, *fields):\n \"\"\"\n Indicate the fields to be returned in the response. These fields are\n returned in addition to any others implicitly specified.\n\n ### Parameters\n\n - **fields**: One or more fields in the format of `@field`\n \"\"\"\n self._loadfields.extend(fields)\n return self\n\n def group_by(self, fields, *reducers):\n \"\"\"\n Specify by which fields to group the aggregation.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings. both cases, the field should be specified as\n `@field`.\n - **reducers**: One or more reducers. Reducers may be found in the\n `aggregation` module.\n \"\"\"\n fields = [fields] if isinstance(fields, str) else fields\n reducers = [reducers] if isinstance(reducers, Reducer) else reducers\n\n ret = [\"GROUPBY\", str(len(fields)), *fields]\n for reducer in reducers:\n ret += [\"REDUCE\", reducer.NAME, str(len(reducer.args))]\n ret.extend(reducer.args)\n if reducer._alias is not None:\n ret += [\"AS\", reducer._alias]\n\n self._aggregateplan.extend(ret)\n return self\n\n def apply(self, **kwexpr):\n \"\"\"\n Specify one or more projection expressions to add to each result\n\n ### Parameters\n\n - **kwexpr**: One or more key-value pairs for a projection. The key is\n the alias for the projection, and the value is the projection\n expression itself, for example `apply(square_root=\"sqrt(@foo)\")`\n \"\"\"\n for alias, expr in kwexpr.items():\n ret = [\"APPLY\", expr]\n if alias is not None:\n ret += [\"AS\", alias]\n self._aggregateplan.extend(ret)\n\n return self\n\n def limit(self, offset, num):\n \"\"\"\n Sets the limit for the most recent group or query.\n\n If no group has been defined yet (via `group_by()`) then this sets\n the limit for the initial pool of results from the query. Otherwise,\n this limits the number of items operated on from the previous group.\n\n Setting a limit on the initial search results may be useful when\n attempting to execute an aggregation on a sample of a large data set.\n\n ### Parameters\n\n - **offset**: Result offset from which to begin paging\n - **num**: Number of results to return\n\n\n Example of sorting the initial results:\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 10)\\\n .group_by(\"@state\", r.count())\n ```\n\n Will only group by the states found in the first 10 results of the\n query `@sale_amount:[10000, inf]`. On the other hand,\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 1000)\\\n .group_by(\"@state\", r.count()\\\n .limit(0, 10)\n ```\n\n Will group all the results matching the query, but only return the\n first 10 groups.\n\n If you only wish to return a *top-N* style query, consider using\n `sort_by()` instead.\n\n \"\"\"\n self._limit = Limit(offset, num)\n return self\n\n def sort_by(self, *fields, **kwargs):\n \"\"\"\n Indicate how the results should be sorted. This can also be used for\n *top-N* style queries\n\n ### Parameters\n\n - **fields**: The fields by which to sort. This can be either a single\n field or a list of fields. If you wish to specify order, you can\n use the `Asc` or `Desc` wrapper classes.\n - **max**: Maximum number of results to return. This can be\n used instead of `LIMIT` and is also faster.\n\n\n Example of sorting by `foo` ascending and `bar` descending:\n\n ```\n sort_by(Asc(\"@foo\"), Desc(\"@bar\"))\n ```\n\n Return the top 10 customers:\n\n ```\n AggregateRequest()\\\n .group_by(\"@customer\", r.sum(\"@paid\").alias(FIELDNAME))\\\n .sort_by(Desc(\"@paid\"), max=10)\n ```\n \"\"\"\n if isinstance(fields, (str, SortDirection)):\n fields = [fields]\n\n fields_args = []\n for f in fields:\n if isinstance(f, SortDirection):\n fields_args += [f.field, f.DIRSTRING]\n else:\n fields_args += [f]\n\n ret = [\"SORTBY\", str(len(fields_args))]\n ret.extend(fields_args)\n max = kwargs.get(\"max\", 0)\n if max > 0:\n ret += [\"MAX\", str(max)]\n\n self._aggregateplan.extend(ret)\n return self\n\n def filter(self, expressions):\n \"\"\"\n Specify filter for post-query results using predicates relating to\n values in the result set.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings.\n \"\"\"\n if isinstance(expressions, str):\n expressions = [expressions]\n\n for expression in expressions:\n self._aggregateplan.extend([\"FILTER\", expression])\n\n return self\n\n def with_schema(self):\n \"\"\"\n If set, the `schema` property will contain a list of `[field, type]`\n entries in the result object.\n \"\"\"\n self._with_schema = True\n return self\n\n def verbatim(self):\n self._verbatim = True\n return self\n\n def cursor(self, count=0, max_idle=0.0):\n args = [\"WITHCURSOR\"]\n if count:\n args += [\"COUNT\", str(count)]\n if max_idle:\n args += [\"MAXIDLE\", str(max_idle * 1000)]\n self._cursor = args\n return self\n\n def build_args(self):\n # @foo:bar ...\n ret = [self._query]\n\n if self._with_schema:\n ret.append(\"WITHSCHEMA\")\n\n if self._verbatim:\n ret.append(\"VERBATIM\")\n\n if self._cursor:\n ret += self._cursor\n\n if self._loadfields:\n ret.append(\"LOAD\")\n ret.append(str(len(self._loadfields)))\n ret.extend(self._loadfields)\n\n ret.extend(self._aggregateplan)\n\n ret += self._limit.build_args()\n\n return ret\n\n\nclass Cursor:\n def __init__(self, cid):\n self.cid = cid\n self.max_idle = 0\n self.count = 0\n\n def build_args(self):\n args = [str(self.cid)]\n if self.max_idle:\n args += [\"MAXIDLE\", str(self.max_idle)]\n if self.count:\n args += [\"COUNT\", str(self.count)]\n return args\n\n\nclass AggregateResult:\n def __init__(self, rows, cursor, schema):\n self.rows = rows\n self.cursor = cursor\n self.schema = schema\n\n def __repr__(self):\n cid = self.cursor.cid if self.cursor else -1\n return (\n f\"<{self.__class__.__name__} at 0x{id(self):x} \"\n f\"Rows={len(self.rows)}, Cursor={cid}>\"\n )\n", "path": "redis/commands/search/aggregation.py"}], "after_files": [{"content": "FIELDNAME = object()\n\n\nclass Limit:\n def __init__(self, offset=0, count=0):\n self.offset = offset\n self.count = count\n\n def build_args(self):\n if self.count:\n return [\"LIMIT\", str(self.offset), str(self.count)]\n else:\n return []\n\n\nclass Reducer:\n \"\"\"\n Base reducer object for all reducers.\n\n See the `redisearch.reducers` module for the actual reducers.\n \"\"\"\n\n NAME = None\n\n def __init__(self, *args):\n self._args = args\n self._field = None\n self._alias = None\n\n def alias(self, alias):\n \"\"\"\n Set the alias for this reducer.\n\n ### Parameters\n\n - **alias**: The value of the alias for this reducer. If this is the\n special value `aggregation.FIELDNAME` then this reducer will be\n aliased using the same name as the field upon which it operates.\n Note that using `FIELDNAME` is only possible on reducers which\n operate on a single field value.\n\n This method returns the `Reducer` object making it suitable for\n chaining.\n \"\"\"\n if alias is FIELDNAME:\n if not self._field:\n raise ValueError(\"Cannot use FIELDNAME alias with no field\")\n # Chop off initial '@'\n alias = self._field[1:]\n self._alias = alias\n return self\n\n @property\n def args(self):\n return self._args\n\n\nclass SortDirection:\n \"\"\"\n This special class is used to indicate sort direction.\n \"\"\"\n\n DIRSTRING = None\n\n def __init__(self, field):\n self.field = field\n\n\nclass Asc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in ascending order\n \"\"\"\n\n DIRSTRING = \"ASC\"\n\n\nclass Desc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in descending order\n \"\"\"\n\n DIRSTRING = \"DESC\"\n\n\nclass AggregateRequest:\n \"\"\"\n Aggregation request which can be passed to `Client.aggregate`.\n \"\"\"\n\n def __init__(self, query=\"*\"):\n \"\"\"\n Create an aggregation request. This request may then be passed to\n `client.aggregate()`.\n\n In order for the request to be usable, it must contain at least one\n group.\n\n - **query** Query string for filtering records.\n\n All member methods (except `build_args()`)\n return the object itself, making them useful for chaining.\n \"\"\"\n self._query = query\n self._aggregateplan = []\n self._loadfields = []\n self._loadall = False\n self._limit = Limit()\n self._max = 0\n self._with_schema = False\n self._verbatim = False\n self._cursor = []\n\n def load(self, *fields):\n \"\"\"\n Indicate the fields to be returned in the response. These fields are\n returned in addition to any others implicitly specified.\n\n ### Parameters\n\n - **fields**: If fields not specified, all the fields will be loaded.\n Otherwise, fields should be given in the format of `@field`.\n \"\"\"\n if fields:\n self._loadfields.extend(fields)\n else:\n self._loadall = True\n return self\n\n def group_by(self, fields, *reducers):\n \"\"\"\n Specify by which fields to group the aggregation.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings. both cases, the field should be specified as\n `@field`.\n - **reducers**: One or more reducers. Reducers may be found in the\n `aggregation` module.\n \"\"\"\n fields = [fields] if isinstance(fields, str) else fields\n reducers = [reducers] if isinstance(reducers, Reducer) else reducers\n\n ret = [\"GROUPBY\", str(len(fields)), *fields]\n for reducer in reducers:\n ret += [\"REDUCE\", reducer.NAME, str(len(reducer.args))]\n ret.extend(reducer.args)\n if reducer._alias is not None:\n ret += [\"AS\", reducer._alias]\n\n self._aggregateplan.extend(ret)\n return self\n\n def apply(self, **kwexpr):\n \"\"\"\n Specify one or more projection expressions to add to each result\n\n ### Parameters\n\n - **kwexpr**: One or more key-value pairs for a projection. The key is\n the alias for the projection, and the value is the projection\n expression itself, for example `apply(square_root=\"sqrt(@foo)\")`\n \"\"\"\n for alias, expr in kwexpr.items():\n ret = [\"APPLY\", expr]\n if alias is not None:\n ret += [\"AS\", alias]\n self._aggregateplan.extend(ret)\n\n return self\n\n def limit(self, offset, num):\n \"\"\"\n Sets the limit for the most recent group or query.\n\n If no group has been defined yet (via `group_by()`) then this sets\n the limit for the initial pool of results from the query. Otherwise,\n this limits the number of items operated on from the previous group.\n\n Setting a limit on the initial search results may be useful when\n attempting to execute an aggregation on a sample of a large data set.\n\n ### Parameters\n\n - **offset**: Result offset from which to begin paging\n - **num**: Number of results to return\n\n\n Example of sorting the initial results:\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 10)\\\n .group_by(\"@state\", r.count())\n ```\n\n Will only group by the states found in the first 10 results of the\n query `@sale_amount:[10000, inf]`. On the other hand,\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 1000)\\\n .group_by(\"@state\", r.count()\\\n .limit(0, 10)\n ```\n\n Will group all the results matching the query, but only return the\n first 10 groups.\n\n If you only wish to return a *top-N* style query, consider using\n `sort_by()` instead.\n\n \"\"\"\n self._limit = Limit(offset, num)\n return self\n\n def sort_by(self, *fields, **kwargs):\n \"\"\"\n Indicate how the results should be sorted. This can also be used for\n *top-N* style queries\n\n ### Parameters\n\n - **fields**: The fields by which to sort. This can be either a single\n field or a list of fields. If you wish to specify order, you can\n use the `Asc` or `Desc` wrapper classes.\n - **max**: Maximum number of results to return. This can be\n used instead of `LIMIT` and is also faster.\n\n\n Example of sorting by `foo` ascending and `bar` descending:\n\n ```\n sort_by(Asc(\"@foo\"), Desc(\"@bar\"))\n ```\n\n Return the top 10 customers:\n\n ```\n AggregateRequest()\\\n .group_by(\"@customer\", r.sum(\"@paid\").alias(FIELDNAME))\\\n .sort_by(Desc(\"@paid\"), max=10)\n ```\n \"\"\"\n if isinstance(fields, (str, SortDirection)):\n fields = [fields]\n\n fields_args = []\n for f in fields:\n if isinstance(f, SortDirection):\n fields_args += [f.field, f.DIRSTRING]\n else:\n fields_args += [f]\n\n ret = [\"SORTBY\", str(len(fields_args))]\n ret.extend(fields_args)\n max = kwargs.get(\"max\", 0)\n if max > 0:\n ret += [\"MAX\", str(max)]\n\n self._aggregateplan.extend(ret)\n return self\n\n def filter(self, expressions):\n \"\"\"\n Specify filter for post-query results using predicates relating to\n values in the result set.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings.\n \"\"\"\n if isinstance(expressions, str):\n expressions = [expressions]\n\n for expression in expressions:\n self._aggregateplan.extend([\"FILTER\", expression])\n\n return self\n\n def with_schema(self):\n \"\"\"\n If set, the `schema` property will contain a list of `[field, type]`\n entries in the result object.\n \"\"\"\n self._with_schema = True\n return self\n\n def verbatim(self):\n self._verbatim = True\n return self\n\n def cursor(self, count=0, max_idle=0.0):\n args = [\"WITHCURSOR\"]\n if count:\n args += [\"COUNT\", str(count)]\n if max_idle:\n args += [\"MAXIDLE\", str(max_idle * 1000)]\n self._cursor = args\n return self\n\n def build_args(self):\n # @foo:bar ...\n ret = [self._query]\n\n if self._with_schema:\n ret.append(\"WITHSCHEMA\")\n\n if self._verbatim:\n ret.append(\"VERBATIM\")\n\n if self._cursor:\n ret += self._cursor\n\n if self._loadall:\n ret.append(\"LOAD\")\n ret.append(\"*\")\n elif self._loadfields:\n ret.append(\"LOAD\")\n ret.append(str(len(self._loadfields)))\n ret.extend(self._loadfields)\n\n ret.extend(self._aggregateplan)\n\n ret += self._limit.build_args()\n\n return ret\n\n\nclass Cursor:\n def __init__(self, cid):\n self.cid = cid\n self.max_idle = 0\n self.count = 0\n\n def build_args(self):\n args = [str(self.cid)]\n if self.max_idle:\n args += [\"MAXIDLE\", str(self.max_idle)]\n if self.count:\n args += [\"COUNT\", str(self.count)]\n return args\n\n\nclass AggregateResult:\n def __init__(self, rows, cursor, schema):\n self.rows = rows\n self.cursor = cursor\n self.schema = schema\n\n def __repr__(self):\n cid = self.cursor.cid if self.cursor else -1\n return (\n f\"<{self.__class__.__name__} at 0x{id(self):x} \"\n f\"Rows={len(self.rows)}, Cursor={cid}>\"\n )\n", "path": "redis/commands/search/aggregation.py"}]}
| 3,492 | 325 |
gh_patches_debug_5297
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-1012
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] IndexError when GridInterpolationKernel wraps a PeriodicKernel
# 🐛 Bug
I tried to adapt the [KISS-GP for 1D data tutorial](https://gpytorch.readthedocs.io/en/latest/examples/02_Scalable_Exact_GPs/KISSGP_Regression.html#KISS-GP-for-1D-Data) to use a `PeriodicKernel` instead of an `RBFKernel`.
However, trying to evaluate the MLL fails with an `IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)`, thrown from [grid_kernel.py: 133](https://github.com/cornellius-gp/gpytorch/blob/0317b121ebaaa921a7851a6af4f2219ff18eeaf0/gpytorch/kernels/grid_kernel.py#L133).
It seems to me this can only be a bug, as an RBF kernel and a Periodic kernel are really not very different semantically?
## To reproduce
** Code snippet to reproduce **
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 7 13:52:48 2020
@author: val
"""
import math
import torch
import gpytorch
#%matplotlib inline
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.
grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
## NOTE the only difference from the 1D KISS-GP tutorial is that the RBFKernel got replaced with a PeriodicKernel()
gpytorch.kernels.PeriodicKernel(), #gpytorch.kernels.RBFKernel(),
grid_size=grid_size, num_dims=1
)
)
#self.covar_module = gpytorch.kernels.PeriodicKernel()
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
training_iterations = 1
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y) ## NOTE fails here.
loss.backward()
optimizer.step()
```
** Stack trace/error message **
```
Traceback (most recent call last):
File "<ipython-input-13-effc04c4ab77>", line 61, in <module>
loss = -mll(output, train_y) ## NOTE fails here.
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py", line 51, in forward
res = output.log_prob(target)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 135, in log_prob
inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1038, in inv_quad_logdet
args = self.representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1268, in representation
representation += list(arg.representation())
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 311, in representation
return self.evaluate_kernel().representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/utils/memoize.py", line 34, in g
add_to_cache(self, cache_name, method(self, *args, **kwargs))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 278, in evaluate_kernel
res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/kernel.py", line 395, in __call__
res = super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py", line 90, in forward
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 177, in forward
base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 143, in _inducing_forward
return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in forward
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in <listcomp>
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
```
## Expected Behavior
I expected the training loop to terminate successfully, without throwing errors.
## System information
**Please complete the following information:**
- GPyTorch Version: 1.0.0
- PyTorch Version: 1.3.1
- Computer OS macOS High Sierra 10.13.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/periodic_kernel.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import math
4
5 import torch
6
7 from ..constraints import Positive
8 from .kernel import Kernel
9
10
11 class PeriodicKernel(Kernel):
12 r""" Computes a covariance matrix based on the periodic kernel
13 between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
14
15 .. math::
16
17 \begin{equation*}
18 k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
19 \frac{2 \sin^2 \left( \pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_1 / p \right) }
20 { \ell^2 } \right)
21 \end{equation*}
22
23 where
24
25 * :math:`p` is the periord length parameter.
26 * :math:`\ell` is a lengthscale parameter.
27
28 .. note::
29
30 This kernel does not have an `outputscale` parameter. To add a scaling parameter,
31 decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
32
33 .. note::
34
35 This kernel does not have an ARD lengthscale option.
36
37 Args:
38 :attr:`batch_shape` (torch.Size, optional):
39 Set this if you want a separate lengthscale for each
40 batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
41 :attr:`active_dims` (tuple of ints, optional):
42 Set this if you want to compute the covariance of only a few input dimensions. The ints
43 corresponds to the indices of the dimensions. Default: `None`.
44 :attr:`period_length_prior` (Prior, optional):
45 Set this if you want to apply a prior to the period length parameter. Default: `None`.
46 :attr:`lengthscale_prior` (Prior, optional):
47 Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
48 :attr:`lengthscale_constraint` (Constraint, optional):
49 Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.
50 :attr:`period_length_constraint` (Constraint, optional):
51 Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.
52 :attr:`eps` (float):
53 The minimum value that the lengthscale/period length can take
54 (prevents divide by zero errors). Default: `1e-6`.
55
56 Attributes:
57 :attr:`lengthscale` (Tensor):
58 The lengthscale parameter. Size = `*batch_shape x 1 x 1`.
59 :attr:`period_length` (Tensor):
60 The period length parameter. Size = `*batch_shape x 1 x 1`.
61
62 Example:
63 >>> x = torch.randn(10, 5)
64 >>> # Non-batch: Simple option
65 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
66 >>>
67 >>> batch_x = torch.randn(2, 10, 5)
68 >>> # Batch: Simple option
69 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
70 >>> # Batch: different lengthscale for each batch
71 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))
72 >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
73 """
74
75 has_lengthscale = True
76
77 def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):
78 super(PeriodicKernel, self).__init__(**kwargs)
79 if period_length_constraint is None:
80 period_length_constraint = Positive()
81
82 self.register_parameter(
83 name="raw_period_length", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))
84 )
85
86 if period_length_prior is not None:
87 self.register_prior(
88 "period_length_prior",
89 period_length_prior,
90 lambda: self.period_length,
91 lambda v: self._set_period_length(v),
92 )
93
94 self.register_constraint("raw_period_length", period_length_constraint)
95
96 @property
97 def period_length(self):
98 return self.raw_period_length_constraint.transform(self.raw_period_length)
99
100 @period_length.setter
101 def period_length(self, value):
102 self._set_period_length(value)
103
104 def _set_period_length(self, value):
105 if not torch.is_tensor(value):
106 value = torch.as_tensor(value).to(self.raw_period_length)
107 self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))
108
109 def forward(self, x1, x2, diag=False, **params):
110 x1_ = x1.div(self.period_length)
111 x2_ = x2.div(self.period_length)
112 diff = self.covar_dist(x1_, x2_, diag=diag, **params)
113 res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
114 if diff.ndimension() == 2 or diag:
115 res = res.squeeze(0)
116 return res
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py
--- a/gpytorch/kernels/periodic_kernel.py
+++ b/gpytorch/kernels/periodic_kernel.py
@@ -111,6 +111,6 @@
x2_ = x2.div(self.period_length)
diff = self.covar_dist(x1_, x2_, diag=diag, **params)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
- if diff.ndimension() == 2 or diag:
+ if diag:
res = res.squeeze(0)
return res
|
{"golden_diff": "diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py\n--- a/gpytorch/kernels/periodic_kernel.py\n+++ b/gpytorch/kernels/periodic_kernel.py\n@@ -111,6 +111,6 @@\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n- if diff.ndimension() == 2 or diag:\n+ if diag:\n res = res.squeeze(0)\n return res\n", "issue": "[Bug] IndexError when GridInterpolationKernel wraps a PeriodicKernel\n# \ud83d\udc1b Bug\r\n\r\nI tried to adapt the [KISS-GP for 1D data tutorial](https://gpytorch.readthedocs.io/en/latest/examples/02_Scalable_Exact_GPs/KISSGP_Regression.html#KISS-GP-for-1D-Data) to use a `PeriodicKernel` instead of an `RBFKernel`.\r\n\r\nHowever, trying to evaluate the MLL fails with an `IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)`, thrown from [grid_kernel.py: 133](https://github.com/cornellius-gp/gpytorch/blob/0317b121ebaaa921a7851a6af4f2219ff18eeaf0/gpytorch/kernels/grid_kernel.py#L133).\r\n\r\nIt seems to me this can only be a bug, as an RBF kernel and a Periodic kernel are really not very different semantically?\r\n\r\n## To reproduce\r\n\r\n** Code snippet to reproduce **\r\n\r\n```python\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 7 13:52:48 2020\r\n\r\n@author: val\r\n\"\"\"\r\n\r\n\r\nimport math\r\nimport torch\r\nimport gpytorch\r\n\r\n#%matplotlib inline\r\n\r\ntrain_x = torch.linspace(0, 1, 1000)\r\ntrain_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)\r\n\r\nclass GPRegressionModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood):\r\n super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)\r\n\r\n # SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.\r\n grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)\r\n\r\n self.mean_module = gpytorch.means.ConstantMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.GridInterpolationKernel( \r\n ## NOTE the only difference from the 1D KISS-GP tutorial is that the RBFKernel got replaced with a PeriodicKernel()\r\n gpytorch.kernels.PeriodicKernel(), #gpytorch.kernels.RBFKernel(),\r\n grid_size=grid_size, num_dims=1\r\n )\r\n )\r\n #self.covar_module = gpytorch.kernels.PeriodicKernel() \r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x)\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n\r\n\r\nlikelihood = gpytorch.likelihoods.GaussianLikelihood()\r\nmodel = GPRegressionModel(train_x, train_y, likelihood)\r\n\r\ntraining_iterations = 1\r\n\r\n# Find optimal model hyperparameters\r\nmodel.train()\r\nlikelihood.train()\r\n\r\n# Use the adam optimizer\r\noptimizer = torch.optim.Adam([\r\n {'params': model.parameters()}, # Includes GaussianLikelihood parameters\r\n], lr=0.1)\r\n\r\n# \"Loss\" for GPs - the marginal log likelihood\r\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\r\n\r\nfor i in range(training_iterations):\r\n optimizer.zero_grad()\r\n output = model(train_x)\r\n loss = -mll(output, train_y) ## NOTE fails here.\r\n loss.backward()\r\n optimizer.step()\r\n```\r\n\r\n** Stack trace/error message **\r\n\r\n```\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-13-effc04c4ab77>\", line 61, in <module>\r\n loss = -mll(output, train_y) ## NOTE fails here.\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py\", line 24, in __call__\r\n outputs = self.forward(*inputs, **kwargs)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py\", line 51, in forward\r\n res = output.log_prob(target)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py\", line 135, in log_prob\r\n inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py\", line 1038, in inv_quad_logdet\r\n args = self.representation()\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py\", line 1268, in representation\r\n representation += list(arg.representation())\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py\", line 311, in representation\r\n return self.evaluate_kernel().representation()\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/utils/memoize.py\", line 34, in g\r\n add_to_cache(self, cache_name, method(self, *args, **kwargs))\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py\", line 278, in evaluate_kernel\r\n res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/kernel.py\", line 395, in __call__\r\n res = super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py\", line 24, in __call__\r\n outputs = self.forward(*inputs, **kwargs)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py\", line 90, in forward\r\n orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py\", line 177, in forward\r\n base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py\", line 143, in _inducing_forward\r\n return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py\", line 133, in forward\r\n covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py\", line 133, in <listcomp>\r\n covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]\r\n\r\nIndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)\r\n```\r\n\r\n## Expected Behavior\r\n\r\nI expected the training loop to terminate successfully, without throwing errors.\r\n\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- GPyTorch Version: 1.0.0\r\n- PyTorch Version: 1.3.1\r\n- Computer OS macOS High Sierra 10.13.3\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\n\nfrom ..constraints import Positive\nfrom .kernel import Kernel\n\n\nclass PeriodicKernel(Kernel):\n r\"\"\" Computes a covariance matrix based on the periodic kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n\n \\begin{equation*}\n k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n \\frac{2 \\sin^2 \\left( \\pi \\Vert \\mathbf{x_1} - \\mathbf{x_2} \\Vert_1 / p \\right) }\n { \\ell^2 } \\right)\n \\end{equation*}\n\n where\n\n * :math:`p` is the periord length parameter.\n * :math:`\\ell` is a lengthscale parameter.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n .. note::\n\n This kernel does not have an ARD lengthscale option.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`period_length_prior` (Prior, optional):\n Set this if you want to apply a prior to the period length parameter. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.\n :attr:`period_length_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale/period length can take\n (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size = `*batch_shape x 1 x 1`.\n :attr:`period_length` (Tensor):\n The period length parameter. Size = `*batch_shape x 1 x 1`.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))\n >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):\n super(PeriodicKernel, self).__init__(**kwargs)\n if period_length_constraint is None:\n period_length_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_period_length\", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))\n )\n\n if period_length_prior is not None:\n self.register_prior(\n \"period_length_prior\",\n period_length_prior,\n lambda: self.period_length,\n lambda v: self._set_period_length(v),\n )\n\n self.register_constraint(\"raw_period_length\", period_length_constraint)\n\n @property\n def period_length(self):\n return self.raw_period_length_constraint.transform(self.raw_period_length)\n\n @period_length.setter\n def period_length(self, value):\n self._set_period_length(value)\n\n def _set_period_length(self, value):\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_period_length)\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.period_length)\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n if diff.ndimension() == 2 or diag:\n res = res.squeeze(0)\n return res\n", "path": "gpytorch/kernels/periodic_kernel.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\n\nfrom ..constraints import Positive\nfrom .kernel import Kernel\n\n\nclass PeriodicKernel(Kernel):\n r\"\"\" Computes a covariance matrix based on the periodic kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n\n \\begin{equation*}\n k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n \\frac{2 \\sin^2 \\left( \\pi \\Vert \\mathbf{x_1} - \\mathbf{x_2} \\Vert_1 / p \\right) }\n { \\ell^2 } \\right)\n \\end{equation*}\n\n where\n\n * :math:`p` is the periord length parameter.\n * :math:`\\ell` is a lengthscale parameter.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n .. note::\n\n This kernel does not have an ARD lengthscale option.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`period_length_prior` (Prior, optional):\n Set this if you want to apply a prior to the period length parameter. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.\n :attr:`period_length_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale/period length can take\n (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size = `*batch_shape x 1 x 1`.\n :attr:`period_length` (Tensor):\n The period length parameter. Size = `*batch_shape x 1 x 1`.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))\n >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):\n super(PeriodicKernel, self).__init__(**kwargs)\n if period_length_constraint is None:\n period_length_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_period_length\", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))\n )\n\n if period_length_prior is not None:\n self.register_prior(\n \"period_length_prior\",\n period_length_prior,\n lambda: self.period_length,\n lambda v: self._set_period_length(v),\n )\n\n self.register_constraint(\"raw_period_length\", period_length_constraint)\n\n @property\n def period_length(self):\n return self.raw_period_length_constraint.transform(self.raw_period_length)\n\n @period_length.setter\n def period_length(self, value):\n self._set_period_length(value)\n\n def _set_period_length(self, value):\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_period_length)\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.period_length)\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n if diag:\n res = res.squeeze(0)\n return res\n", "path": "gpytorch/kernels/periodic_kernel.py"}]}
| 3,490 | 153 |
gh_patches_debug_21
|
rasdani/github-patches
|
git_diff
|
sbi-dev__sbi-31
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move tests to top folder
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sbi/dummytests/whateverstuff.py`
Content:
```
1 # content of test_compute.py
2
3
4 def test_compute(param1):
5 assert param1 < 4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py
deleted file mode 100644
--- a/sbi/dummytests/whateverstuff.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# content of test_compute.py
-
-
-def test_compute(param1):
- assert param1 < 4
\ No newline at end of file
|
{"golden_diff": "diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py\ndeleted file mode 100644\n--- a/sbi/dummytests/whateverstuff.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-# content of test_compute.py\n-\n-\n-def test_compute(param1):\n- assert param1 < 4\n\\ No newline at end of file\n", "issue": "Move tests to top folder\n\n", "before_files": [{"content": "# content of test_compute.py\n\n\ndef test_compute(param1):\n assert param1 < 4", "path": "sbi/dummytests/whateverstuff.py"}], "after_files": [{"content": null, "path": "sbi/dummytests/whateverstuff.py"}]}
| 296 | 95 |
gh_patches_debug_4101
|
rasdani/github-patches
|
git_diff
|
pwr-Solaar__Solaar-1001
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In Linux Mint 20, Solaar suddenly won't start
I have installed Solaar on my Linux Mint 20 O/S and was working fine till this morning when Solaar suddenly will not start. Have no idea why.
- Solaar version: solaar 1.0.4
- Distribution: Linux Mint 20 XFCE
- Kernel version (ex. `uname -srmo`): `Linux 5.4.0-53-generic x86_64 GNU/Linux`
- Output of `solaar show`:
(solaar:28696): dbind-WARNING **: 22:33:44.353: Couldn't register with accessibility bus: Did not receive a reply. Possible causes include: the remote application did not send a reply, the message bus security policy blocked the reply, the reply timeout expired, or the network connection was broken.
Unifying Receiver
Device path : /dev/hidraw1
USB id : 046d:C52B
Serial : 43799195
Firmware : 12.11.B0032
Bootloader : 04.16
Other : AA.AA
Has 1 paired device(s) out of a maximum of 6.
Notifications: wireless, software present (0x000900)
Device activity counters: 2=128
2: ERGO M575 Trackball
Device path : /dev/hidraw6
WPID : 4096
Codename : ERGO M575
Kind : mouse
Protocol : HID++ 4.5
Polling rate : 8 ms (125Hz)
Serial number: 0EB362BF
Model ID: B02740960000
Unit ID: 0EB362BF
Bootloader: BOT 40.00.B0009
Firmware: MPM 26.00.B0009
Other:
The power switch is located on the base.
Supports 26 HID++ 2.0 features:
0: ROOT {0000}
1: FEATURE SET {0001}
2: DEVICE FW VERSION {0003}
Firmware: Bootloader BOT 40.00.B0009 0000ABF1CB8B01
Firmware: Firmware MPM 26.00.B0009 4096ABF1CB8B01
Firmware: Other
Unit ID: 0EB362BF Model ID: B02740960000 Transport IDs: {'btleid': 'B027', 'wpid': '4096'}
3: DEVICE NAME {0005}
Name: ERGO M575 Trackball
Kind: trackball
4: WIRELESS DEVICE STATUS {1D4B}
5: RESET {0020}
6: DEVICE FRIENDLY NAME {0007}
7: UNIFIED BATTERY {1004}
8: REPROG CONTROLS V4 {1B04}
Key/Button Actions: {'80': 80, '81': 81, '82': 82, '86': 86, '83': 83}
Key/Button Diversion: {'82': 0, '86': 0, '83': 0, '215': 0}
9: ADJUSTABLE DPI {2201}
Sensitivity (DPI): 400
10: unknown:2250 {2250}
11: DFUCONTROL SIGNED {00C2}
12: DEVICE RESET {1802} internal, hidden
13: unknown:1803 {1803} internal, hidden
14: CONFIG DEVICE PROPS {1806} internal, hidden
15: unknown:1812 {1812} internal, hidden
16: OOBSTATE {1805} internal, hidden
17: unknown:1830 {1830} internal, hidden
18: unknown:1890 {1890} internal, hidden
19: unknown:1891 {1891} internal, hidden
20: unknown:18A1 {18A1} internal, hidden
21: unknown:1E00 {1E00} hidden
22: unknown:1EB0 {1EB0} internal, hidden
23: unknown:1861 {1861} internal, hidden
24: unknown:1E22 {1E22} internal, hidden
25: unknown:1F03 {1F03} internal, hidden
Has 6 reprogrammable keys:
0: Left Button , default: Left Click => Left Click
mse, reprogrammable, analytics key events, pos:0, group:1, group mask:g1
reporting: default
1: Right Button , default: Right Click => Right Click
mse, reprogrammable, analytics key events, pos:0, group:1, group mask:g1
reporting: default
2: Middle Button , default: Mouse Middle Button => Mouse Middle Button
mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2
reporting: default
3: Forward Button , default: Mouse Forward Button => Mouse Forward Button
mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2
reporting: default
4: Back Button , default: Mouse Back Button => Mouse Back Button
mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2
reporting: default
5: Virtual Gesture Button , default: Virtual Gesture Button => Virtual Gesture Button
divertable, virtual, raw XY, force raw XY, pos:0, group:3, group mask:empty
reporting: default
Battery: 100%, discharging.
Nano Receiver
Device path : /dev/hidraw3
USB id : 046d:C534
Serial : None
Firmware : 29.01.B0016
Has 2 paired device(s) out of a maximum of 2.
Has 18 successful pairing(s) remaining.
Notifications: wireless, software present (0x000900)
1: Wireless Keyboard MK270
Device path : /dev/hidraw4
WPID : 4023
Codename : MK270
Kind : keyboard
Protocol : HID++ 2.0
Polling rate : 20 ms (50Hz)
Serial number: 00000000
Model ID: 000000000000
Unit ID: 00000000
Firmware: RQK 49.00.B0029
Supports 18 HID++ 2.0 features:
0: ROOT {0000}
1: FEATURE SET {0001}
2: DEVICE FW VERSION {0003}
Firmware: Firmware RQK 49.00.B0029 4023
Unit ID: 00000000 Model ID: 000000000000 Transport IDs: {}
3: DEVICE NAME {0005}
Name: Wireless Keyboard
Kind: keyboard
4: BATTERY STATUS {1000}
Battery: 30%, discharging, next level 5%.
5: REPROG CONTROLS {1B00}
6: WIRELESS DEVICE STATUS {1D4B}
7: FN INVERSION {40A0}
Swap Fx function: False
8: ENCRYPTION {4100}
9: KEYBOARD LAYOUT {4520}
10: unknown:1810 {1810} internal, hidden
11: unknown:1830 {1830} internal, hidden
12: unknown:1890 {1890} internal, hidden
13: unknown:18A0 {18A0} internal, hidden
14: unknown:18B0 {18B0} internal, hidden
15: unknown:1DF3 {1DF3} internal, hidden
16: unknown:1E00 {1E00} hidden
17: unknown:1868 {1868} internal, hidden
Has 11 reprogrammable keys:
0: My Home => HomePage is FN, FN sensitive, reprogrammable
1: Mail => Email is FN, FN sensitive, reprogrammable
2: Search => Search is FN, FN sensitive, reprogrammable
3: Calculator => Calculator is FN, FN sensitive, reprogrammable
4: Media Player => Music is FN, FN sensitive, reprogrammable
5: Previous => Previous is FN, FN sensitive
6: Play/Pause => Play/Pause is FN, FN sensitive
7: Next => Next is FN, FN sensitive
8: Mute => Mute is FN, FN sensitive
9: Volume Down => Volume Down is FN, FN sensitive
10: Volume Up => Volume Up is FN, FN sensitive
Battery: 30%, discharging, next level 5%.
2: Wireless Mouse M185 new
Device path : /dev/hidraw5
WPID : 4054
Codename : M185n
Kind : mouse
Protocol : HID++ 4.5
Polling rate : 8 ms (125Hz)
Serial number: 00000000
The power switch is located on the base.
Battery: unknown (device is offline).
```
**Describe the bug**
Solaar will Not show icon on task bar and if I run it in terminal I get this:
(solaar:28445): dbind-WARNING **: 22:30:07.881: Couldn't register with accessibility bus: Did not receive a reply. Possible causes include: the remote application did not send a reply, the message bus security policy blocked the reply, the reply timeout expired, or the network connection was broken.
Solaar depends on a udev file that is not present
For more information see the Solaar installation directions
at https://pwr-solaar.github.io/Solaar/installation
solaar: error: Traceback (most recent call last):
File "/usr/share/solaar/lib/solaar/gtk.py", line 153, in main
import solaar.listener as listener
File "/usr/share/solaar/lib/solaar/listener.py", line 32, in <module>
from logitech_receiver import notifications as _notifications
File "/usr/share/solaar/lib/logitech_receiver/notifications.py", line 30, in <module>
from . import diversion as _diversion
File "/usr/share/solaar/lib/logitech_receiver/diversion.py", line 30, in <module>
from pynput import keyboard as _keyboard
ModuleNotFoundError: No module named 'pynput'
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'terminal (or try to start Solaar)'
2. Type solaar in terminal
3. See above error.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
I have NO Idea why Solaar stopped showing the Panel Icon, or opening the GUI in terminal/icon mode. Unless some other update has messed with it. I even removed it completely in Synaptic and re-Installed it.
Never done this before, so I hope this helps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from glob import glob as _glob
4
5 try:
6 from setuptools import setup
7 except ImportError:
8 from distutils.core import setup
9
10 # from solaar import NAME, __version__
11 __version__ = '1.0.4'
12 NAME = 'Solaar'
13
14
15 def _data_files():
16 from os.path import dirname as _dirname
17
18 yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
19 yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
20 yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']
21
22 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
23 yield _dirname(mo), [mo]
24
25 yield 'share/applications', ['share/applications/solaar.desktop']
26 yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']
27
28 del _dirname
29
30
31 setup(
32 name=NAME.lower(),
33 version=__version__,
34 description='Linux devices manager for the Logitech Unifying Receiver.',
35 long_description='''
36 Solaar is a Linux device manager for Logitech's Unifying Receiver peripherals.
37 It is able to pair/unpair devices with the receiver, for many devices show
38 battery status, and show and modify some of the modifiable features of devices.
39 '''.strip(),
40 author='Daniel Pavel',
41 license='GPLv2',
42 url='http://pwr-solaar.github.io/Solaar/',
43 classifiers=[
44 'Development Status :: 4 - Beta',
45 'Environment :: X11 Applications :: GTK',
46 'Environment :: Console',
47 'Intended Audience :: End Users/Desktop',
48 'License :: DFSG approved',
49 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
50 'Natural Language :: English',
51 'Programming Language :: Python :: 3 :: Only',
52 'Operating System :: POSIX :: Linux',
53 'Topic :: Utilities',
54 ],
55 platforms=['linux'],
56
57 # sudo apt install python-gi python3-gi \
58 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1
59 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],
60 python_requires='>=3.6',
61 install_requires=[
62 'pyudev (>= 0.13)',
63 ],
64 package_dir={'': 'lib'},
65 packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
66 data_files=list(_data_files()),
67 scripts=_glob('bin/*'),
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -60,6 +60,10 @@
python_requires='>=3.6',
install_requires=[
'pyudev (>= 0.13)',
+ 'PyYAML (>= 5.1)',
+ 'python-xlib (>= 0.27)',
+ 'pynput (>= 1.7.0)',
+ 'psutil (>= 5.7.3)',
],
package_dir={'': 'lib'},
packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,6 +60,10 @@\n python_requires='>=3.6',\n install_requires=[\n 'pyudev (>= 0.13)',\n+ 'PyYAML (>= 5.1)',\n+ 'python-xlib (>= 0.27)',\n+ 'pynput (>= 1.7.0)',\n+ 'psutil (>= 5.7.3)',\n ],\n package_dir={'': 'lib'},\n packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n", "issue": "In Linux Mint 20, Solaar suddenly won't start\nI have installed Solaar on my Linux Mint 20 O/S and was working fine till this morning when Solaar suddenly will not start. Have no idea why.\r\n\r\n- Solaar version: solaar 1.0.4\r\n- Distribution: Linux Mint 20 XFCE\r\n- Kernel version (ex. `uname -srmo`): `Linux 5.4.0-53-generic x86_64 GNU/Linux`\r\n- Output of `solaar show`:\r\n(solaar:28696): dbind-WARNING **: 22:33:44.353: Couldn't register with accessibility bus: Did not receive a reply. Possible causes include: the remote application did not send a reply, the message bus security policy blocked the reply, the reply timeout expired, or the network connection was broken.\r\nUnifying Receiver\r\n Device path : /dev/hidraw1\r\n USB id : 046d:C52B\r\n Serial : 43799195\r\n Firmware : 12.11.B0032\r\n Bootloader : 04.16\r\n Other : AA.AA\r\n Has 1 paired device(s) out of a maximum of 6.\r\n Notifications: wireless, software present (0x000900)\r\n Device activity counters: 2=128\r\n\r\n 2: ERGO M575 Trackball\r\n Device path : /dev/hidraw6\r\n WPID : 4096\r\n Codename : ERGO M575\r\n Kind : mouse\r\n Protocol : HID++ 4.5\r\n Polling rate : 8 ms (125Hz)\r\n Serial number: 0EB362BF\r\n Model ID: B02740960000\r\n Unit ID: 0EB362BF\r\n Bootloader: BOT 40.00.B0009\r\n Firmware: MPM 26.00.B0009\r\n Other: \r\n The power switch is located on the base.\r\n Supports 26 HID++ 2.0 features:\r\n 0: ROOT {0000} \r\n 1: FEATURE SET {0001} \r\n 2: DEVICE FW VERSION {0003} \r\n Firmware: Bootloader BOT 40.00.B0009 0000ABF1CB8B01\r\n Firmware: Firmware MPM 26.00.B0009 4096ABF1CB8B01\r\n Firmware: Other \r\n Unit ID: 0EB362BF Model ID: B02740960000 Transport IDs: {'btleid': 'B027', 'wpid': '4096'}\r\n 3: DEVICE NAME {0005} \r\n Name: ERGO M575 Trackball\r\n Kind: trackball\r\n 4: WIRELESS DEVICE STATUS {1D4B} \r\n 5: RESET {0020} \r\n 6: DEVICE FRIENDLY NAME {0007} \r\n 7: UNIFIED BATTERY {1004} \r\n 8: REPROG CONTROLS V4 {1B04} \r\n Key/Button Actions: {'80': 80, '81': 81, '82': 82, '86': 86, '83': 83}\r\n Key/Button Diversion: {'82': 0, '86': 0, '83': 0, '215': 0}\r\n 9: ADJUSTABLE DPI {2201} \r\n Sensitivity (DPI): 400\r\n 10: unknown:2250 {2250} \r\n 11: DFUCONTROL SIGNED {00C2} \r\n 12: DEVICE RESET {1802} internal, hidden\r\n 13: unknown:1803 {1803} internal, hidden\r\n 14: CONFIG DEVICE PROPS {1806} internal, hidden\r\n 15: unknown:1812 {1812} internal, hidden\r\n 16: OOBSTATE {1805} internal, hidden\r\n 17: unknown:1830 {1830} internal, hidden\r\n 18: unknown:1890 {1890} internal, hidden\r\n 19: unknown:1891 {1891} internal, hidden\r\n 20: unknown:18A1 {18A1} internal, hidden\r\n 21: unknown:1E00 {1E00} hidden\r\n 22: unknown:1EB0 {1EB0} internal, hidden\r\n 23: unknown:1861 {1861} internal, hidden\r\n 24: unknown:1E22 {1E22} internal, hidden\r\n 25: unknown:1F03 {1F03} internal, hidden\r\n Has 6 reprogrammable keys:\r\n 0: Left Button , default: Left Click => Left Click \r\n mse, reprogrammable, analytics key events, pos:0, group:1, group mask:g1\r\n reporting: default\r\n 1: Right Button , default: Right Click => Right Click \r\n mse, reprogrammable, analytics key events, pos:0, group:1, group mask:g1\r\n reporting: default\r\n 2: Middle Button , default: Mouse Middle Button => Mouse Middle Button \r\n mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 3: Forward Button , default: Mouse Forward Button => Mouse Forward Button \r\n mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 4: Back Button , default: Mouse Back Button => Mouse Back Button \r\n mse, reprogrammable, divertable, persistently divertable, raw XY, analytics key events, pos:0, group:2, group mask:g1,g2\r\n reporting: default\r\n 5: Virtual Gesture Button , default: Virtual Gesture Button => Virtual Gesture Button \r\n divertable, virtual, raw XY, force raw XY, pos:0, group:3, group mask:empty\r\n reporting: default\r\n Battery: 100%, discharging.\r\n\r\nNano Receiver\r\n Device path : /dev/hidraw3\r\n USB id : 046d:C534\r\n Serial : None\r\n Firmware : 29.01.B0016\r\n Has 2 paired device(s) out of a maximum of 2.\r\n Has 18 successful pairing(s) remaining.\r\n Notifications: wireless, software present (0x000900)\r\n\r\n 1: Wireless Keyboard MK270\r\n Device path : /dev/hidraw4\r\n WPID : 4023\r\n Codename : MK270\r\n Kind : keyboard\r\n Protocol : HID++ 2.0\r\n Polling rate : 20 ms (50Hz)\r\n Serial number: 00000000\r\n Model ID: 000000000000\r\n Unit ID: 00000000\r\n Firmware: RQK 49.00.B0029\r\n Supports 18 HID++ 2.0 features:\r\n 0: ROOT {0000} \r\n 1: FEATURE SET {0001} \r\n 2: DEVICE FW VERSION {0003} \r\n Firmware: Firmware RQK 49.00.B0029 4023\r\n Unit ID: 00000000 Model ID: 000000000000 Transport IDs: {}\r\n 3: DEVICE NAME {0005} \r\n Name: Wireless Keyboard \r\n Kind: keyboard\r\n 4: BATTERY STATUS {1000} \r\n Battery: 30%, discharging, next level 5%.\r\n 5: REPROG CONTROLS {1B00} \r\n 6: WIRELESS DEVICE STATUS {1D4B} \r\n 7: FN INVERSION {40A0} \r\n Swap Fx function: False\r\n 8: ENCRYPTION {4100} \r\n 9: KEYBOARD LAYOUT {4520} \r\n 10: unknown:1810 {1810} internal, hidden\r\n 11: unknown:1830 {1830} internal, hidden\r\n 12: unknown:1890 {1890} internal, hidden\r\n 13: unknown:18A0 {18A0} internal, hidden\r\n 14: unknown:18B0 {18B0} internal, hidden\r\n 15: unknown:1DF3 {1DF3} internal, hidden\r\n 16: unknown:1E00 {1E00} hidden\r\n 17: unknown:1868 {1868} internal, hidden\r\n Has 11 reprogrammable keys:\r\n 0: My Home => HomePage is FN, FN sensitive, reprogrammable\r\n 1: Mail => Email is FN, FN sensitive, reprogrammable\r\n 2: Search => Search is FN, FN sensitive, reprogrammable\r\n 3: Calculator => Calculator is FN, FN sensitive, reprogrammable\r\n 4: Media Player => Music is FN, FN sensitive, reprogrammable\r\n 5: Previous => Previous is FN, FN sensitive\r\n 6: Play/Pause => Play/Pause is FN, FN sensitive\r\n 7: Next => Next is FN, FN sensitive\r\n 8: Mute => Mute is FN, FN sensitive\r\n 9: Volume Down => Volume Down is FN, FN sensitive\r\n 10: Volume Up => Volume Up is FN, FN sensitive\r\n Battery: 30%, discharging, next level 5%.\r\n\r\n 2: Wireless Mouse M185 new\r\n Device path : /dev/hidraw5\r\n WPID : 4054\r\n Codename : M185n\r\n Kind : mouse\r\n Protocol : HID++ 4.5\r\n Polling rate : 8 ms (125Hz)\r\n Serial number: 00000000\r\n The power switch is located on the base.\r\n Battery: unknown (device is offline).\r\n\r\n```\r\n\r\n**Describe the bug**\r\nSolaar will Not show icon on task bar and if I run it in terminal I get this:\r\n(solaar:28445): dbind-WARNING **: 22:30:07.881: Couldn't register with accessibility bus: Did not receive a reply. Possible causes include: the remote application did not send a reply, the message bus security policy blocked the reply, the reply timeout expired, or the network connection was broken.\r\nSolaar depends on a udev file that is not present\r\nFor more information see the Solaar installation directions\r\nat https://pwr-solaar.github.io/Solaar/installation\r\nsolaar: error: Traceback (most recent call last):\r\n File \"/usr/share/solaar/lib/solaar/gtk.py\", line 153, in main\r\n import solaar.listener as listener\r\n File \"/usr/share/solaar/lib/solaar/listener.py\", line 32, in <module>\r\n from logitech_receiver import notifications as _notifications\r\n File \"/usr/share/solaar/lib/logitech_receiver/notifications.py\", line 30, in <module>\r\n from . import diversion as _diversion\r\n File \"/usr/share/solaar/lib/logitech_receiver/diversion.py\", line 30, in <module>\r\n from pynput import keyboard as _keyboard\r\nModuleNotFoundError: No module named 'pynput'\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'terminal (or try to start Solaar)'\r\n2. Type solaar in terminal\r\n3. See above error.\r\n\r\n\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Additional context**\r\nI have NO Idea why Solaar stopped showing the Panel Icon, or opening the GUI in terminal/icon mode. Unless some other update has messed with it. I even removed it completely in Synaptic and re-Installed it.\r\n\r\nNever done this before, so I hope this helps.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.0.4'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux devices manager for the Logitech Unifying Receiver.',\n long_description='''\nSolaar is a Linux device manager for Logitech's Unifying Receiver peripherals.\nIt is able to pair/unpair devices with the receiver, for many devices show\nbattery status, and show and modify some of the modifiable features of devices.\n'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.6',\n install_requires=[\n 'pyudev (>= 0.13)',\n ],\n package_dir={'': 'lib'},\n packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.0.4'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux devices manager for the Logitech Unifying Receiver.',\n long_description='''\nSolaar is a Linux device manager for Logitech's Unifying Receiver peripherals.\nIt is able to pair/unpair devices with the receiver, for many devices show\nbattery status, and show and modify some of the modifiable features of devices.\n'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.6',\n install_requires=[\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 5.1)',\n 'python-xlib (>= 0.27)',\n 'pynput (>= 1.7.0)',\n 'psutil (>= 5.7.3)',\n ],\n package_dir={'': 'lib'},\n packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}]}
| 4,034 | 152 |
gh_patches_debug_8681
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-2809
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WarpedVRT issue when alpha
In https://github.com/rasterio/rasterio/blob/d008e8aab38afd5d14d98499847754bd0eeaa501/rasterio/_warp.pyx#L978-L979
`dst_alpha` will always be set to `0` when `add_alpha` is False.
This cause an issue when we already have an alpha band in the data. For some reason the output data will then be totally different.
#### Show image difference
```python
#### Comparison between rasterio / WarpedVRT and GDALWarpedVRT
```python
import os
import rasterio
from rasterio.vrt import WarpedVRT
# Pure Rasterio Read
with rasterio.open("fake.tif") as src:
alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))
arr = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))
print("Rasterio - mean: ", arr[alpha > 0].mean())
# Use WarpedVRT but without reprojection
with rasterio.open("fake.tif") as src:
with WarpedVRT(src, add_alpha=False) as vrt:
alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))
arr = vrt.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))
print("WarpedVRT - mean: ", arr[alpha > 0].mean())
# Create a WarpedVRT using GDAL
os.system("gdalwarp -of VRT fake.tif fake.vrt -overwrite")
with rasterio.open("fake.vrt") as src:
alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))
arr = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))
print("GDAL - mean: ", arr[alpha > 0].mean())
>>> Rasterio - mean: 10000
>>> WarpedVRT - mean: 39
>>> GDAL - mean: 10000
```
First reported in https://github.com/developmentseed/titiler/discussions/629
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/vrt.py`
Content:
```
1 """rasterio.vrt: a module concerned with GDAL VRTs"""
2
3 import xml.etree.ElementTree as ET
4
5 import rasterio
6 from rasterio._warp import WarpedVRTReaderBase
7 from rasterio.dtypes import _gdal_typename
8 from rasterio.enums import MaskFlags
9 from rasterio._path import _parse_path
10 from rasterio.transform import TransformMethodsMixin
11 from rasterio.windows import WindowMethodsMixin
12
13
14 class WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,
15 TransformMethodsMixin):
16 """A virtual warped dataset.
17
18 Abstracts the details of raster warping and allows access to data
19 that is reprojected when read.
20
21 This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.
22
23 Parameters
24 ----------
25 src_dataset : dataset object
26 The warp source.
27 src_crs : CRS or str, optional
28 Overrides the coordinate reference system of `src_dataset`.
29 src_transfrom : Affine, optional
30 Overrides the transform of `src_dataset`.
31 src_nodata : float, optional
32 Overrides the nodata value of `src_dataset`, which is the
33 default.
34 crs : CRS or str, optional
35 The coordinate reference system at the end of the warp
36 operation. Default: the crs of `src_dataset`. dst_crs was
37 a deprecated alias for this parameter.
38 transform : Affine, optional
39 The transform for the virtual dataset. Default: will be
40 computed from the attributes of `src_dataset`. dst_transform
41 was a deprecated alias for this parameter.
42 height, width: int, optional
43 The dimensions of the virtual dataset. Defaults: will be
44 computed from the attributes of `src_dataset`. dst_height
45 and dst_width were deprecated alias for these parameters.
46 nodata : float, optional
47 Nodata value for the virtual dataset. Default: the nodata
48 value of `src_dataset` or 0.0. dst_nodata was a deprecated
49 alias for this parameter.
50 resampling : Resampling, optional
51 Warp resampling algorithm. Default: `Resampling.nearest`.
52 tolerance : float, optional
53 The maximum error tolerance in input pixels when
54 approximating the warp transformation. Default: 0.125,
55 or one-eigth of a pixel.
56 src_alpha : int, optional
57 Index of a source band to use as an alpha band for warping.
58 add_alpha : bool, optional
59 Whether to add an alpha masking band to the virtual dataset.
60 Default: False. This option will cause deletion of the VRT
61 nodata value.
62 init_dest_nodata : bool, optional
63 Whether or not to initialize output to `nodata`. Default:
64 True.
65 warp_mem_limit : int, optional
66 The warp operation's memory limit in MB. The default (0)
67 means 64 MB with GDAL 2.2.
68 dtype : str, optional
69 The working data type for warp operation and output.
70 warp_extras : dict
71 GDAL extra warp options. See
72 https://gdal.org/doxygen/structGDALWarpOptions.html.
73
74 Attributes
75 ----------
76 src_dataset : dataset
77 The dataset object to be virtually warped.
78 resampling : int
79 One of the values from rasterio.enums.Resampling. The default is
80 `Resampling.nearest`.
81 tolerance : float
82 The maximum error tolerance in input pixels when approximating
83 the warp transformation. The default is 0.125.
84 src_nodata: int or float, optional
85 The source nodata value. Pixels with this value will not be
86 used for interpolation. If not set, it will be default to the
87 nodata value of the source image, if available.
88 dst_nodata: int or float, optional
89 The nodata value used to initialize the destination; it will
90 remain in all areas not covered by the reprojected source.
91 Defaults to the value of src_nodata, or 0 (gdal default).
92 working_dtype : str, optional
93 The working data type for warp operation and output.
94 warp_extras : dict
95 GDAL extra warp options. See
96 https://gdal.org/doxygen/structGDALWarpOptions.html.
97
98 Examples
99 --------
100
101 >>> with rasterio.open('tests/data/RGB.byte.tif') as src:
102 ... with WarpedVRT(src, crs='EPSG:3857') as vrt:
103 ... data = vrt.read()
104
105 """
106
107 def __repr__(self):
108 return "<{} WarpedVRT name='{}' mode='{}'>".format(
109 self.closed and 'closed' or 'open', self.name, self.mode)
110
111 def __enter__(self):
112 self.start()
113 return self
114
115 def __exit__(self, *args, **kwargs):
116 if not self._closed:
117 self.close()
118
119 def __del__(self):
120 if not self._closed:
121 self.close()
122
123
124 def _boundless_vrt_doc(
125 src_dataset, nodata=None, background=None, hidenodata=False,
126 width=None, height=None, transform=None, masked=False):
127 """Make a VRT XML document.
128
129 Parameters
130 ----------
131 src_dataset : Dataset
132 The dataset to wrap.
133 background : int or float, optional
134 The background fill value for the boundless VRT.
135 masked : bool
136 If True, the src_dataset is replaced by its valid data mask.
137
138 Returns
139 -------
140 str
141 An XML text string.
142 """
143
144 nodata = nodata or src_dataset.nodata
145 width = width or src_dataset.width
146 height = height or src_dataset.height
147 transform = transform or src_dataset.transform
148
149 vrtdataset = ET.Element('VRTDataset')
150 vrtdataset.attrib['rasterYSize'] = str(height)
151 vrtdataset.attrib['rasterXSize'] = str(width)
152 srs = ET.SubElement(vrtdataset, 'SRS')
153 srs.text = src_dataset.crs.wkt if src_dataset.crs else ""
154 geotransform = ET.SubElement(vrtdataset, 'GeoTransform')
155 geotransform.text = ','.join([str(v) for v in transform.to_gdal()])
156
157 for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):
158 vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')
159 vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)
160 vrtrasterband.attrib['band'] = str(bidx)
161
162 if background is not None or nodata is not None:
163 nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')
164 nodatavalue.text = str(background or nodata)
165
166 if hidenodata:
167 hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')
168 hidenodatavalue.text = "1"
169
170 colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')
171 colorinterp.text = ci.name.capitalize()
172
173 complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')
174 sourcefilename = ET.SubElement(complexsource, 'SourceFilename')
175 sourcefilename.attrib['relativeToVRT'] = "0"
176 sourcefilename.attrib["shared"] = "0"
177 sourcefilename.text = _parse_path(src_dataset.name).as_vsi()
178 sourceband = ET.SubElement(complexsource, 'SourceBand')
179 sourceband.text = str(bidx)
180 sourceproperties = ET.SubElement(complexsource, 'SourceProperties')
181 sourceproperties.attrib['RasterXSize'] = str(width)
182 sourceproperties.attrib['RasterYSize'] = str(height)
183 sourceproperties.attrib['dataType'] = _gdal_typename(dtype)
184 sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
185 sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
186 srcrect = ET.SubElement(complexsource, 'SrcRect')
187 srcrect.attrib['xOff'] = '0'
188 srcrect.attrib['yOff'] = '0'
189 srcrect.attrib['xSize'] = str(src_dataset.width)
190 srcrect.attrib['ySize'] = str(src_dataset.height)
191 dstrect = ET.SubElement(complexsource, 'DstRect')
192 dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)
193 dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)
194 dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)
195 dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)
196
197 if src_dataset.nodata is not None:
198 nodata_elem = ET.SubElement(complexsource, 'NODATA')
199 nodata_elem.text = str(src_dataset.nodata)
200
201 if src_dataset.options is not None:
202 openoptions = ET.SubElement(complexsource, 'OpenOptions')
203 for ookey, oovalue in src_dataset.options.items():
204 ooi = ET.SubElement(openoptions, 'OOI')
205 ooi.attrib['key'] = str(ookey)
206 ooi.text = str(oovalue)
207
208 # Effectively replaces all values of the source dataset with
209 # 255. Due to GDAL optimizations, the source dataset will not
210 # be read, so we get a performance improvement.
211 if masked:
212 scaleratio = ET.SubElement(complexsource, 'ScaleRatio')
213 scaleratio.text = '0'
214 scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')
215 scaleoffset.text = '255'
216
217 if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):
218 maskband = ET.SubElement(vrtdataset, 'MaskBand')
219 vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')
220 vrtrasterband.attrib['dataType'] = 'Byte'
221
222 simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')
223 sourcefilename = ET.SubElement(simplesource, 'SourceFilename')
224 sourcefilename.attrib['relativeToVRT'] = "0"
225 sourcefilename.attrib["shared"] = "0"
226 sourcefilename.text = _parse_path(src_dataset.name).as_vsi()
227
228 sourceband = ET.SubElement(simplesource, 'SourceBand')
229 sourceband.text = 'mask,1'
230 sourceproperties = ET.SubElement(simplesource, 'SourceProperties')
231 sourceproperties.attrib['RasterXSize'] = str(width)
232 sourceproperties.attrib['RasterYSize'] = str(height)
233 sourceproperties.attrib['dataType'] = 'Byte'
234 sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
235 sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
236 srcrect = ET.SubElement(simplesource, 'SrcRect')
237 srcrect.attrib['xOff'] = '0'
238 srcrect.attrib['yOff'] = '0'
239 srcrect.attrib['xSize'] = str(src_dataset.width)
240 srcrect.attrib['ySize'] = str(src_dataset.height)
241 dstrect = ET.SubElement(simplesource, 'DstRect')
242 dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)
243 dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)
244 dstrect.attrib['xSize'] = str(src_dataset.width)
245 dstrect.attrib['ySize'] = str(src_dataset.height)
246
247 return ET.tostring(vrtdataset).decode('ascii')
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/vrt.py b/rasterio/vrt.py
--- a/rasterio/vrt.py
+++ b/rasterio/vrt.py
@@ -55,6 +55,8 @@
or one-eigth of a pixel.
src_alpha : int, optional
Index of a source band to use as an alpha band for warping.
+ dst_alpha : int, optional
+ Index of a destination band to use as an alpha band for warping.
add_alpha : bool, optional
Whether to add an alpha masking band to the virtual dataset.
Default: False. This option will cause deletion of the VRT
|
{"golden_diff": "diff --git a/rasterio/vrt.py b/rasterio/vrt.py\n--- a/rasterio/vrt.py\n+++ b/rasterio/vrt.py\n@@ -55,6 +55,8 @@\n or one-eigth of a pixel.\n src_alpha : int, optional\n Index of a source band to use as an alpha band for warping.\n+ dst_alpha : int, optional\n+ Index of a destination band to use as an alpha band for warping.\n add_alpha : bool, optional\n Whether to add an alpha masking band to the virtual dataset.\n Default: False. This option will cause deletion of the VRT\n", "issue": "WarpedVRT issue when alpha \nIn https://github.com/rasterio/rasterio/blob/d008e8aab38afd5d14d98499847754bd0eeaa501/rasterio/_warp.pyx#L978-L979\r\n`dst_alpha` will always be set to `0` when `add_alpha` is False. \r\n\r\nThis cause an issue when we already have an alpha band in the data. For some reason the output data will then be totally different.\r\n\r\n#### Show image difference \r\n\r\n```python\r\n#### Comparison between rasterio / WarpedVRT and GDALWarpedVRT\r\n\r\n```python\r\nimport os\r\nimport rasterio\r\nfrom rasterio.vrt import WarpedVRT\r\n\r\n# Pure Rasterio Read\r\nwith rasterio.open(\"fake.tif\") as src:\r\n alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))\r\n arr = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))\r\n print(\"Rasterio - mean: \", arr[alpha > 0].mean())\r\n\r\n# Use WarpedVRT but without reprojection\r\nwith rasterio.open(\"fake.tif\") as src:\r\n with WarpedVRT(src, add_alpha=False) as vrt:\r\n alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))\r\n arr = vrt.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))\r\n print(\"WarpedVRT - mean: \", arr[alpha > 0].mean())\r\n\r\n\r\n# Create a WarpedVRT using GDAL\r\nos.system(\"gdalwarp -of VRT fake.tif fake.vrt -overwrite\")\r\n\r\nwith rasterio.open(\"fake.vrt\") as src:\r\n alpha = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(6,))\r\n arr = src.read(out_shape=(1, int(src.height/10), int(src.width/10)), indexes=(1,))\r\n print(\"GDAL - mean: \", arr[alpha > 0].mean())\r\n>>> Rasterio - mean: 10000\r\n>>> WarpedVRT - mean: 39\r\n>>> GDAL - mean: 10000\r\n```\r\n\r\nFirst reported in https://github.com/developmentseed/titiler/discussions/629\n", "before_files": [{"content": "\"\"\"rasterio.vrt: a module concerned with GDAL VRTs\"\"\"\n\nimport xml.etree.ElementTree as ET\n\nimport rasterio\nfrom rasterio._warp import WarpedVRTReaderBase\nfrom rasterio.dtypes import _gdal_typename\nfrom rasterio.enums import MaskFlags\nfrom rasterio._path import _parse_path\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.windows import WindowMethodsMixin\n\n\nclass WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"A virtual warped dataset.\n\n Abstracts the details of raster warping and allows access to data\n that is reprojected when read.\n\n This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.\n\n Parameters\n ----------\n src_dataset : dataset object\n The warp source.\n src_crs : CRS or str, optional\n Overrides the coordinate reference system of `src_dataset`.\n src_transfrom : Affine, optional\n Overrides the transform of `src_dataset`.\n src_nodata : float, optional\n Overrides the nodata value of `src_dataset`, which is the\n default.\n crs : CRS or str, optional\n The coordinate reference system at the end of the warp\n operation. Default: the crs of `src_dataset`. dst_crs was\n a deprecated alias for this parameter.\n transform : Affine, optional\n The transform for the virtual dataset. Default: will be\n computed from the attributes of `src_dataset`. dst_transform\n was a deprecated alias for this parameter.\n height, width: int, optional\n The dimensions of the virtual dataset. Defaults: will be\n computed from the attributes of `src_dataset`. dst_height\n and dst_width were deprecated alias for these parameters.\n nodata : float, optional\n Nodata value for the virtual dataset. Default: the nodata\n value of `src_dataset` or 0.0. dst_nodata was a deprecated\n alias for this parameter.\n resampling : Resampling, optional\n Warp resampling algorithm. Default: `Resampling.nearest`.\n tolerance : float, optional\n The maximum error tolerance in input pixels when\n approximating the warp transformation. Default: 0.125,\n or one-eigth of a pixel.\n src_alpha : int, optional\n Index of a source band to use as an alpha band for warping.\n add_alpha : bool, optional\n Whether to add an alpha masking band to the virtual dataset.\n Default: False. This option will cause deletion of the VRT\n nodata value.\n init_dest_nodata : bool, optional\n Whether or not to initialize output to `nodata`. Default:\n True.\n warp_mem_limit : int, optional\n The warp operation's memory limit in MB. The default (0)\n means 64 MB with GDAL 2.2.\n dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Attributes\n ----------\n src_dataset : dataset\n The dataset object to be virtually warped.\n resampling : int\n One of the values from rasterio.enums.Resampling. The default is\n `Resampling.nearest`.\n tolerance : float\n The maximum error tolerance in input pixels when approximating\n the warp transformation. The default is 0.125.\n src_nodata: int or float, optional\n The source nodata value. Pixels with this value will not be\n used for interpolation. If not set, it will be default to the\n nodata value of the source image, if available.\n dst_nodata: int or float, optional\n The nodata value used to initialize the destination; it will\n remain in all areas not covered by the reprojected source.\n Defaults to the value of src_nodata, or 0 (gdal default).\n working_dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Examples\n --------\n\n >>> with rasterio.open('tests/data/RGB.byte.tif') as src:\n ... with WarpedVRT(src, crs='EPSG:3857') as vrt:\n ... data = vrt.read()\n\n \"\"\"\n\n def __repr__(self):\n return \"<{} WarpedVRT name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, *args, **kwargs):\n if not self._closed:\n self.close()\n\n def __del__(self):\n if not self._closed:\n self.close()\n\n\ndef _boundless_vrt_doc(\n src_dataset, nodata=None, background=None, hidenodata=False,\n width=None, height=None, transform=None, masked=False):\n \"\"\"Make a VRT XML document.\n\n Parameters\n ----------\n src_dataset : Dataset\n The dataset to wrap.\n background : int or float, optional\n The background fill value for the boundless VRT.\n masked : bool\n If True, the src_dataset is replaced by its valid data mask.\n\n Returns\n -------\n str\n An XML text string.\n \"\"\"\n\n nodata = nodata or src_dataset.nodata\n width = width or src_dataset.width\n height = height or src_dataset.height\n transform = transform or src_dataset.transform\n\n vrtdataset = ET.Element('VRTDataset')\n vrtdataset.attrib['rasterYSize'] = str(height)\n vrtdataset.attrib['rasterXSize'] = str(width)\n srs = ET.SubElement(vrtdataset, 'SRS')\n srs.text = src_dataset.crs.wkt if src_dataset.crs else \"\"\n geotransform = ET.SubElement(vrtdataset, 'GeoTransform')\n geotransform.text = ','.join([str(v) for v in transform.to_gdal()])\n\n for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):\n vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)\n vrtrasterband.attrib['band'] = str(bidx)\n\n if background is not None or nodata is not None:\n nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')\n nodatavalue.text = str(background or nodata)\n\n if hidenodata:\n hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')\n hidenodatavalue.text = \"1\"\n\n colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')\n colorinterp.text = ci.name.capitalize()\n\n complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')\n sourcefilename = ET.SubElement(complexsource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = _parse_path(src_dataset.name).as_vsi()\n sourceband = ET.SubElement(complexsource, 'SourceBand')\n sourceband.text = str(bidx)\n sourceproperties = ET.SubElement(complexsource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = _gdal_typename(dtype)\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(complexsource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(complexsource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)\n dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)\n\n if src_dataset.nodata is not None:\n nodata_elem = ET.SubElement(complexsource, 'NODATA')\n nodata_elem.text = str(src_dataset.nodata)\n\n if src_dataset.options is not None:\n openoptions = ET.SubElement(complexsource, 'OpenOptions')\n for ookey, oovalue in src_dataset.options.items():\n ooi = ET.SubElement(openoptions, 'OOI')\n ooi.attrib['key'] = str(ookey)\n ooi.text = str(oovalue)\n\n # Effectively replaces all values of the source dataset with\n # 255. Due to GDAL optimizations, the source dataset will not\n # be read, so we get a performance improvement.\n if masked:\n scaleratio = ET.SubElement(complexsource, 'ScaleRatio')\n scaleratio.text = '0'\n scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')\n scaleoffset.text = '255'\n\n if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):\n maskband = ET.SubElement(vrtdataset, 'MaskBand')\n vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = 'Byte'\n\n simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')\n sourcefilename = ET.SubElement(simplesource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = _parse_path(src_dataset.name).as_vsi()\n\n sourceband = ET.SubElement(simplesource, 'SourceBand')\n sourceband.text = 'mask,1'\n sourceproperties = ET.SubElement(simplesource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = 'Byte'\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(simplesource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(simplesource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width)\n dstrect.attrib['ySize'] = str(src_dataset.height)\n\n return ET.tostring(vrtdataset).decode('ascii')\n", "path": "rasterio/vrt.py"}], "after_files": [{"content": "\"\"\"rasterio.vrt: a module concerned with GDAL VRTs\"\"\"\n\nimport xml.etree.ElementTree as ET\n\nimport rasterio\nfrom rasterio._warp import WarpedVRTReaderBase\nfrom rasterio.dtypes import _gdal_typename\nfrom rasterio.enums import MaskFlags\nfrom rasterio._path import _parse_path\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.windows import WindowMethodsMixin\n\n\nclass WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"A virtual warped dataset.\n\n Abstracts the details of raster warping and allows access to data\n that is reprojected when read.\n\n This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.\n\n Parameters\n ----------\n src_dataset : dataset object\n The warp source.\n src_crs : CRS or str, optional\n Overrides the coordinate reference system of `src_dataset`.\n src_transfrom : Affine, optional\n Overrides the transform of `src_dataset`.\n src_nodata : float, optional\n Overrides the nodata value of `src_dataset`, which is the\n default.\n crs : CRS or str, optional\n The coordinate reference system at the end of the warp\n operation. Default: the crs of `src_dataset`. dst_crs was\n a deprecated alias for this parameter.\n transform : Affine, optional\n The transform for the virtual dataset. Default: will be\n computed from the attributes of `src_dataset`. dst_transform\n was a deprecated alias for this parameter.\n height, width: int, optional\n The dimensions of the virtual dataset. Defaults: will be\n computed from the attributes of `src_dataset`. dst_height\n and dst_width were deprecated alias for these parameters.\n nodata : float, optional\n Nodata value for the virtual dataset. Default: the nodata\n value of `src_dataset` or 0.0. dst_nodata was a deprecated\n alias for this parameter.\n resampling : Resampling, optional\n Warp resampling algorithm. Default: `Resampling.nearest`.\n tolerance : float, optional\n The maximum error tolerance in input pixels when\n approximating the warp transformation. Default: 0.125,\n or one-eigth of a pixel.\n src_alpha : int, optional\n Index of a source band to use as an alpha band for warping.\n dst_alpha : int, optional\n Index of a destination band to use as an alpha band for warping.\n add_alpha : bool, optional\n Whether to add an alpha masking band to the virtual dataset.\n Default: False. This option will cause deletion of the VRT\n nodata value.\n init_dest_nodata : bool, optional\n Whether or not to initialize output to `nodata`. Default:\n True.\n warp_mem_limit : int, optional\n The warp operation's memory limit in MB. The default (0)\n means 64 MB with GDAL 2.2.\n dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Attributes\n ----------\n src_dataset : dataset\n The dataset object to be virtually warped.\n resampling : int\n One of the values from rasterio.enums.Resampling. The default is\n `Resampling.nearest`.\n tolerance : float\n The maximum error tolerance in input pixels when approximating\n the warp transformation. The default is 0.125.\n src_nodata: int or float, optional\n The source nodata value. Pixels with this value will not be\n used for interpolation. If not set, it will be default to the\n nodata value of the source image, if available.\n dst_nodata: int or float, optional\n The nodata value used to initialize the destination; it will\n remain in all areas not covered by the reprojected source.\n Defaults to the value of src_nodata, or 0 (gdal default).\n working_dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Examples\n --------\n\n >>> with rasterio.open('tests/data/RGB.byte.tif') as src:\n ... with WarpedVRT(src, crs='EPSG:3857') as vrt:\n ... data = vrt.read()\n\n \"\"\"\n\n def __repr__(self):\n return \"<{} WarpedVRT name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, *args, **kwargs):\n if not self._closed:\n self.close()\n\n def __del__(self):\n if not self._closed:\n self.close()\n\n\ndef _boundless_vrt_doc(\n src_dataset, nodata=None, background=None, hidenodata=False,\n width=None, height=None, transform=None, masked=False):\n \"\"\"Make a VRT XML document.\n\n Parameters\n ----------\n src_dataset : Dataset\n The dataset to wrap.\n background : int or float, optional\n The background fill value for the boundless VRT.\n masked : bool\n If True, the src_dataset is replaced by its valid data mask.\n\n Returns\n -------\n str\n An XML text string.\n \"\"\"\n\n nodata = nodata or src_dataset.nodata\n width = width or src_dataset.width\n height = height or src_dataset.height\n transform = transform or src_dataset.transform\n\n vrtdataset = ET.Element('VRTDataset')\n vrtdataset.attrib['rasterYSize'] = str(height)\n vrtdataset.attrib['rasterXSize'] = str(width)\n srs = ET.SubElement(vrtdataset, 'SRS')\n srs.text = src_dataset.crs.wkt if src_dataset.crs else \"\"\n geotransform = ET.SubElement(vrtdataset, 'GeoTransform')\n geotransform.text = ','.join([str(v) for v in transform.to_gdal()])\n\n for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):\n vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)\n vrtrasterband.attrib['band'] = str(bidx)\n\n if background is not None or nodata is not None:\n nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')\n nodatavalue.text = str(background or nodata)\n\n if hidenodata:\n hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')\n hidenodatavalue.text = \"1\"\n\n colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')\n colorinterp.text = ci.name.capitalize()\n\n complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')\n sourcefilename = ET.SubElement(complexsource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = _parse_path(src_dataset.name).as_vsi()\n sourceband = ET.SubElement(complexsource, 'SourceBand')\n sourceband.text = str(bidx)\n sourceproperties = ET.SubElement(complexsource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = _gdal_typename(dtype)\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(complexsource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(complexsource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)\n dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)\n\n if src_dataset.nodata is not None:\n nodata_elem = ET.SubElement(complexsource, 'NODATA')\n nodata_elem.text = str(src_dataset.nodata)\n\n if src_dataset.options is not None:\n openoptions = ET.SubElement(complexsource, 'OpenOptions')\n for ookey, oovalue in src_dataset.options.items():\n ooi = ET.SubElement(openoptions, 'OOI')\n ooi.attrib['key'] = str(ookey)\n ooi.text = str(oovalue)\n\n # Effectively replaces all values of the source dataset with\n # 255. Due to GDAL optimizations, the source dataset will not\n # be read, so we get a performance improvement.\n if masked:\n scaleratio = ET.SubElement(complexsource, 'ScaleRatio')\n scaleratio.text = '0'\n scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')\n scaleoffset.text = '255'\n\n if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):\n maskband = ET.SubElement(vrtdataset, 'MaskBand')\n vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = 'Byte'\n\n simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')\n sourcefilename = ET.SubElement(simplesource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = _parse_path(src_dataset.name).as_vsi()\n\n sourceband = ET.SubElement(simplesource, 'SourceBand')\n sourceband.text = 'mask,1'\n sourceproperties = ET.SubElement(simplesource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = 'Byte'\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(simplesource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(simplesource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width)\n dstrect.attrib['ySize'] = str(src_dataset.height)\n\n return ET.tostring(vrtdataset).decode('ascii')\n", "path": "rasterio/vrt.py"}]}
| 4,009 | 143 |
gh_patches_debug_42
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-221
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make setup.py an executable script
Change file permissions and reformat the shebang line
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # !/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5
6
7 def package_files(directory):
8 paths = []
9 for (path, directories, filenames) in os.walk(directory):
10 for filename in filenames:
11 paths.append(os.path.join('..', path, filename))
12 return paths
13
14
15 here = os.path.abspath(os.path.dirname(__file__))
16
17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
18 extra_files.append(os.path.join(here, 'CONTRIBUTORS'))
19 extra_files.append(os.path.join(here, 'LICENSE'))
20 extra_files.append(os.path.join(here, 'README.rst'))
21 extra_files.append(os.path.join(here, 'requirements.txt'))
22 extra_files.append(os.path.join(here, 'VERSION'))
23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))
24
25 with open(os.path.join(here, 'requirements.txt')) as f:
26 required = f.read().splitlines()
27
28 with open(os.path.join(here, 'VERSION')) as version_file:
29 version = version_file.read().strip()
30
31 setup(
32 name='WrightTools',
33 packages=find_packages(),
34 package_data={'': extra_files},
35 setup_requires=['pytest-runner'],
36 tests_require=['pytest'],
37 install_requires=required,
38 extras_require={'docs': ['sphinx-gallery>=0.1.9']},
39 version=version,
40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',
41 author='Blaise Thompson',
42 author_email='blaise@untzag.com',
43 license='MIT',
44 url='http://wright.tools',
45 keywords='spectroscopy science multidimensional visualization',
46 classifiers=['Development Status :: 5 - Production/Stable',
47 'Intended Audience :: Science/Research',
48 'License :: OSI Approved :: MIT License',
49 'Natural Language :: English',
50 'Programming Language :: Python :: 2',
51 'Programming Language :: Python :: 2.7',
52 'Programming Language :: Python :: 3',
53 'Programming Language :: Python :: 3.3',
54 'Programming Language :: Python :: 3.4',
55 'Programming Language :: Python :: 3.5',
56 'Topic :: Scientific/Engineering']
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-# !/usr/bin/env python
+#! /usr/bin/env python
import os
from setuptools import setup, find_packages
|
{"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,4 @@\n-# !/usr/bin/env python\n+#! /usr/bin/env python\n \n import os\n from setuptools import setup, find_packages\n", "issue": "make setup.py an executable script\nChange file permissions and reformat the shebang line\n", "before_files": [{"content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='blaise@untzag.com',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='blaise@untzag.com',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]}
| 858 | 78 |
gh_patches_debug_8151
|
rasdani/github-patches
|
git_diff
|
google__turbinia-826
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Jupyter FileExtractionTask task fails
```
2021-02-25 17:17:55 [INFO] Execution of [['sudo', 'image_export.py', '--logfile', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/4a7632dfe76c428797ce0ddeafc684bf.log', '-w', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/export', '--partitions', 'all', '--artifact_filters', 'JupyterConfigFile', '-d', '/dev/loop1']] failed with status 1
2021-02-25 17:17:55 [INFO] image_export.py failed for artifact JupyterConfigFile.
```
https://github.com/google/turbinia/pull/766/checks?check_run_id=1981113701#step:9:842
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/analysis/jupyter.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2020 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task for analysing Jupyter."""
16
17 from __future__ import unicode_literals
18
19 import os
20 import re
21
22 from turbinia import TurbiniaException
23 from turbinia.evidence import EvidenceState as state
24 from turbinia.evidence import ReportText
25 from turbinia.lib import text_formatter as fmt
26 from turbinia.workers import TurbiniaTask
27 from turbinia.workers import Priority
28
29
30 class JupyterAnalysisTask(TurbiniaTask):
31 """Task to analyze a Jupyter Notebook config."""
32
33 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
34
35 def run(self, evidence, result):
36 """Run the Jupyter worker.
37
38 Args:
39 evidence (Evidence object): The evidence to process
40 result (TurbiniaTaskResult): The object to place task results into.
41
42 Returns:
43 TurbiniaTaskResult object.
44 """
45
46 # Where to store the resulting output file.
47 output_file_name = 'jupyter_analysis.txt'
48 output_file_path = os.path.join(self.output_dir, output_file_name)
49
50 # What type of evidence we should output.
51 output_evidence = ReportText(source_path=output_file_path)
52
53 # Read the config file.
54
55 jupyter_config = open(evidence.local_path, 'r').read()
56
57 # Extract the config and return the report
58 (report, priority, summary) = self.analyse_config(jupyter_config)
59 output_evidence.text_data = report
60 result.report_priority = priority
61 result.report_data = report
62
63 # Write the report to the output file.
64 with open(output_file_path, 'w') as fh:
65 fh.write(output_evidence.text_data.encode('utf8'))
66 fh.write('\n'.encode('utf8'))
67
68 # Add the resulting evidence to the result object.
69 result.add_evidence(output_evidence, evidence.config)
70 result.close(self, success=True, status=summary)
71
72 return result
73
74 def analyse_config(self, jupyter_config):
75 """Extract security related configs from Jupyter configuration files.
76
77 Args:
78 config (str): configuration file content.
79
80 Returns:
81 Tuple(
82 report_text(str): The report data
83 report_priority(int): The priority of the report (0 - 100)
84 summary(str): A summary of the report (used for task status)
85 )
86 """
87 findings = []
88 num_misconfigs = 0
89 for line in jupyter_config.split('\n'):
90
91 if all(x in line for x in ['disable_check_xsrf', 'True']):
92 findings.append(fmt.bullet('XSRF protection is disabled.'))
93 num_misconfigs += 1
94 continue
95 if all(x in line for x in ['allow_root', 'True']):
96 findings.append(fmt.bullet('Juypter Notebook allowed to run as root.'))
97 num_misconfigs += 1
98 continue
99 if 'NotebookApp.password' in line:
100 if all(x in line for x in ['required', 'False']):
101 findings.append(
102 fmt.bullet(
103 'Password is not required to access this Jupyter Notebook.'))
104 num_misconfigs += 1
105 continue
106 if 'required' not in line:
107 password_hash = line.split('=')
108 if len(password_hash) > 1:
109 if password_hash[1].strip() == "''":
110 findings.append(
111 fmt.bullet(
112 'There is no password set for this Jupyter Notebook.'))
113 num_misconfigs += 1
114 if all(x in line for x in ['allow_remote_access', 'True']):
115 findings.append(
116 fmt.bullet('Remote access is enabled on this Jupyter Notebook.'))
117 num_misconfigs += 1
118 continue
119
120 if findings:
121 summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(
122 num_misconfigs)
123 findings.insert(0, fmt.heading4(fmt.bold(summary)))
124 report = '\n'.join(findings)
125 return (report, Priority.HIGH, summary)
126
127 report = 'No issues found in Jupyter Notebook configuration.'
128 return (report, Priority.LOW, report)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/workers/analysis/jupyter.py b/turbinia/workers/analysis/jupyter.py
--- a/turbinia/workers/analysis/jupyter.py
+++ b/turbinia/workers/analysis/jupyter.py
@@ -61,9 +61,9 @@
result.report_data = report
# Write the report to the output file.
- with open(output_file_path, 'w') as fh:
- fh.write(output_evidence.text_data.encode('utf8'))
- fh.write('\n'.encode('utf8'))
+ with open(output_file_path, 'w', encoding='utf-8') as fh:
+ fh.write(output_evidence.text_data)
+ fh.write('\n')
# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
|
{"golden_diff": "diff --git a/turbinia/workers/analysis/jupyter.py b/turbinia/workers/analysis/jupyter.py\n--- a/turbinia/workers/analysis/jupyter.py\n+++ b/turbinia/workers/analysis/jupyter.py\n@@ -61,9 +61,9 @@\n result.report_data = report\n \n # Write the report to the output file.\n- with open(output_file_path, 'w') as fh:\n- fh.write(output_evidence.text_data.encode('utf8'))\n- fh.write('\\n'.encode('utf8'))\n+ with open(output_file_path, 'w', encoding='utf-8') as fh:\n+ fh.write(output_evidence.text_data)\n+ fh.write('\\n')\n \n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n", "issue": "Jupyter FileExtractionTask task fails\n```\r\n2021-02-25 17:17:55 [INFO] Execution of [['sudo', 'image_export.py', '--logfile', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/4a7632dfe76c428797ce0ddeafc684bf.log', '-w', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/export', '--partitions', 'all', '--artifact_filters', 'JupyterConfigFile', '-d', '/dev/loop1']] failed with status 1\r\n2021-02-25 17:17:55 [INFO] image_export.py failed for artifact JupyterConfigFile.\r\n```\r\n\r\nhttps://github.com/google/turbinia/pull/766/checks?check_run_id=1981113701#step:9:842\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for analysing Jupyter.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import ReportText\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.workers import Priority\n\n\nclass JupyterAnalysisTask(TurbiniaTask):\n \"\"\"Task to analyze a Jupyter Notebook config.\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def run(self, evidence, result):\n \"\"\"Run the Jupyter worker.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n # Where to store the resulting output file.\n output_file_name = 'jupyter_analysis.txt'\n output_file_path = os.path.join(self.output_dir, output_file_name)\n\n # What type of evidence we should output.\n output_evidence = ReportText(source_path=output_file_path)\n\n # Read the config file.\n\n jupyter_config = open(evidence.local_path, 'r').read()\n\n # Extract the config and return the report\n (report, priority, summary) = self.analyse_config(jupyter_config)\n output_evidence.text_data = report\n result.report_priority = priority\n result.report_data = report\n\n # Write the report to the output file.\n with open(output_file_path, 'w') as fh:\n fh.write(output_evidence.text_data.encode('utf8'))\n fh.write('\\n'.encode('utf8'))\n\n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n result.close(self, success=True, status=summary)\n\n return result\n\n def analyse_config(self, jupyter_config):\n \"\"\"Extract security related configs from Jupyter configuration files.\n\n Args:\n config (str): configuration file content.\n\n Returns:\n Tuple(\n report_text(str): The report data\n report_priority(int): The priority of the report (0 - 100)\n summary(str): A summary of the report (used for task status)\n )\n \"\"\"\n findings = []\n num_misconfigs = 0\n for line in jupyter_config.split('\\n'):\n\n if all(x in line for x in ['disable_check_xsrf', 'True']):\n findings.append(fmt.bullet('XSRF protection is disabled.'))\n num_misconfigs += 1\n continue\n if all(x in line for x in ['allow_root', 'True']):\n findings.append(fmt.bullet('Juypter Notebook allowed to run as root.'))\n num_misconfigs += 1\n continue\n if 'NotebookApp.password' in line:\n if all(x in line for x in ['required', 'False']):\n findings.append(\n fmt.bullet(\n 'Password is not required to access this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n if 'required' not in line:\n password_hash = line.split('=')\n if len(password_hash) > 1:\n if password_hash[1].strip() == \"''\":\n findings.append(\n fmt.bullet(\n 'There is no password set for this Jupyter Notebook.'))\n num_misconfigs += 1\n if all(x in line for x in ['allow_remote_access', 'True']):\n findings.append(\n fmt.bullet('Remote access is enabled on this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n\n if findings:\n summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(\n num_misconfigs)\n findings.insert(0, fmt.heading4(fmt.bold(summary)))\n report = '\\n'.join(findings)\n return (report, Priority.HIGH, summary)\n\n report = 'No issues found in Jupyter Notebook configuration.'\n return (report, Priority.LOW, report)\n", "path": "turbinia/workers/analysis/jupyter.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for analysing Jupyter.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import ReportText\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.workers import Priority\n\n\nclass JupyterAnalysisTask(TurbiniaTask):\n \"\"\"Task to analyze a Jupyter Notebook config.\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def run(self, evidence, result):\n \"\"\"Run the Jupyter worker.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n # Where to store the resulting output file.\n output_file_name = 'jupyter_analysis.txt'\n output_file_path = os.path.join(self.output_dir, output_file_name)\n\n # What type of evidence we should output.\n output_evidence = ReportText(source_path=output_file_path)\n\n # Read the config file.\n\n jupyter_config = open(evidence.local_path, 'r').read()\n\n # Extract the config and return the report\n (report, priority, summary) = self.analyse_config(jupyter_config)\n output_evidence.text_data = report\n result.report_priority = priority\n result.report_data = report\n\n # Write the report to the output file.\n with open(output_file_path, 'w', encoding='utf-8') as fh:\n fh.write(output_evidence.text_data)\n fh.write('\\n')\n\n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n result.close(self, success=True, status=summary)\n\n return result\n\n def analyse_config(self, jupyter_config):\n \"\"\"Extract security related configs from Jupyter configuration files.\n\n Args:\n config (str): configuration file content.\n\n Returns:\n Tuple(\n report_text(str): The report data\n report_priority(int): The priority of the report (0 - 100)\n summary(str): A summary of the report (used for task status)\n )\n \"\"\"\n findings = []\n num_misconfigs = 0\n for line in jupyter_config.split('\\n'):\n\n if all(x in line for x in ['disable_check_xsrf', 'True']):\n findings.append(fmt.bullet('XSRF protection is disabled.'))\n num_misconfigs += 1\n continue\n if all(x in line for x in ['allow_root', 'True']):\n findings.append(fmt.bullet('Juypter Notebook allowed to run as root.'))\n num_misconfigs += 1\n continue\n if 'NotebookApp.password' in line:\n if all(x in line for x in ['required', 'False']):\n findings.append(\n fmt.bullet(\n 'Password is not required to access this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n if 'required' not in line:\n password_hash = line.split('=')\n if len(password_hash) > 1:\n if password_hash[1].strip() == \"''\":\n findings.append(\n fmt.bullet(\n 'There is no password set for this Jupyter Notebook.'))\n num_misconfigs += 1\n if all(x in line for x in ['allow_remote_access', 'True']):\n findings.append(\n fmt.bullet('Remote access is enabled on this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n\n if findings:\n summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(\n num_misconfigs)\n findings.insert(0, fmt.heading4(fmt.bold(summary)))\n report = '\\n'.join(findings)\n return (report, Priority.HIGH, summary)\n\n report = 'No issues found in Jupyter Notebook configuration.'\n return (report, Priority.LOW, report)\n", "path": "turbinia/workers/analysis/jupyter.py"}]}
| 1,859 | 186 |
gh_patches_debug_33837
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-9248
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gravatar,initials doesn't fallback when gravatar.com is unreachable
**Describe the bug**
On System -> Settings -> Avatar section, there is this blurb of text:
`Multiple values can be set, comma-separated, and authentik will fallback to the next mode when no avatar could be found. For example, setting this to gravatar,initials will attempt to get an avatar from Gravatar, and if the user has not configured on there, it will fallback to a generated avatar.`
However, if authentik is deployed in an environment that doesn't allow a network connection to gravatar.com, i.e. firewall or similar, even the setting "gravatar,initials" leaves each user's avatar as a broken thumbnail, and initials are never retrieved. Further, each GET of a user ends up being really slow. i.e. retrieving 150k users by paginating through /core/users/ takes 12 hours.
Once changing the setting to "initials" only, retrieving 150k users takes about 2 minutes, and users have proper initials avatars.
**To Reproduce**
Set global Avatar setting to gravatar,initials and block outgoing connections to gravatar.com
**Expected behavior**
Failback to using initials setting
**Version and Deployment (please complete the following information):**
- authentik version: [e.g. 2024.2.2]
- Deployment: [e.g. custom helm]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/lib/avatars.py`
Content:
```
1 """Avatar utils"""
2
3 from base64 import b64encode
4 from functools import cache as funccache
5 from hashlib import md5
6 from typing import TYPE_CHECKING
7 from urllib.parse import urlencode
8
9 from django.core.cache import cache
10 from django.http import HttpRequest, HttpResponseNotFound
11 from django.templatetags.static import static
12 from lxml import etree # nosec
13 from lxml.etree import Element, SubElement # nosec
14 from requests.exceptions import RequestException
15
16 from authentik.lib.config import get_path_from_dict
17 from authentik.lib.utils.http import get_http_session
18 from authentik.tenants.utils import get_current_tenant
19
20 if TYPE_CHECKING:
21 from authentik.core.models import User
22
23 GRAVATAR_URL = "https://secure.gravatar.com"
24 DEFAULT_AVATAR = static("dist/assets/images/user_default.png")
25 CACHE_KEY_GRAVATAR = "goauthentik.io/lib/avatars/"
26
27 SVG_XML_NS = "http://www.w3.org/2000/svg"
28 SVG_NS_MAP = {None: SVG_XML_NS}
29 # Match fonts used in web UI
30 SVG_FONTS = [
31 "'RedHatText'",
32 "'Overpass'",
33 "overpass",
34 "helvetica",
35 "arial",
36 "sans-serif",
37 ]
38
39
40 def avatar_mode_none(user: "User", mode: str) -> str | None:
41 """No avatar"""
42 return DEFAULT_AVATAR
43
44
45 def avatar_mode_attribute(user: "User", mode: str) -> str | None:
46 """Avatars based on a user attribute"""
47 avatar = get_path_from_dict(user.attributes, mode[11:], default=None)
48 return avatar
49
50
51 def avatar_mode_gravatar(user: "User", mode: str) -> str | None:
52 """Gravatar avatars"""
53 # gravatar uses md5 for their URLs, so md5 can't be avoided
54 mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
55 parameters = [("size", "158"), ("rating", "g"), ("default", "404")]
56 gravatar_url = f"{GRAVATAR_URL}/avatar/{mail_hash}?{urlencode(parameters, doseq=True)}"
57
58 full_key = CACHE_KEY_GRAVATAR + mail_hash
59 if cache.has_key(full_key):
60 cache.touch(full_key)
61 return cache.get(full_key)
62
63 try:
64 # Since we specify a default of 404, do a HEAD request
65 # (HEAD since we don't need the body)
66 # so if that returns a 404, move onto the next mode
67 res = get_http_session().head(gravatar_url, timeout=5)
68 if res.status_code == HttpResponseNotFound.status_code:
69 cache.set(full_key, None)
70 return None
71 res.raise_for_status()
72 except RequestException:
73 return gravatar_url
74 cache.set(full_key, gravatar_url)
75 return gravatar_url
76
77
78 def generate_colors(text: str) -> tuple[str, str]:
79 """Generate colours based on `text`"""
80 color = int(md5(text.lower().encode("utf-8")).hexdigest(), 16) % 0xFFFFFF # nosec
81
82 # Get a (somewhat arbitrarily) reduced scope of colors
83 # to avoid too dark or light backgrounds
84 blue = min(max((color) & 0xFF, 55), 200)
85 green = min(max((color >> 8) & 0xFF, 55), 200)
86 red = min(max((color >> 16) & 0xFF, 55), 200)
87 bg_hex = f"{red:02x}{green:02x}{blue:02x}"
88 # Contrasting text color (https://stackoverflow.com/a/3943023)
89 text_hex = (
90 "000" if (red * 0.299 + green * 0.587 + blue * 0.114) > 186 else "fff" # noqa: PLR2004
91 )
92 return bg_hex, text_hex
93
94
95 @funccache
96 def generate_avatar_from_name(
97 name: str,
98 length: int = 2,
99 size: int = 64,
100 rounded: bool = False,
101 font_size: float = 0.4375,
102 bold: bool = False,
103 uppercase: bool = True,
104 ) -> str:
105 """ "Generate an avatar with initials in SVG format.
106
107 Inspired from: https://github.com/LasseRafn/ui-avatars
108 """
109 name_parts = name.split()
110 # Only abbreviate first and last name
111 if len(name_parts) > 2: # noqa: PLR2004
112 name_parts = [name_parts[0], name_parts[-1]]
113
114 if len(name_parts) == 1:
115 initials = name_parts[0][:length]
116 else:
117 initials = "".join([part[0] for part in name_parts[:-1]])
118 initials += name_parts[-1]
119 initials = initials[:length]
120
121 bg_hex, text_hex = generate_colors(name)
122
123 half_size = size // 2
124 shape = "circle" if rounded else "rect"
125 font_weight = "600" if bold else "400"
126
127 root_element: Element = Element(f"{{{SVG_XML_NS}}}svg", nsmap=SVG_NS_MAP)
128 root_element.attrib["width"] = f"{size}px"
129 root_element.attrib["height"] = f"{size}px"
130 root_element.attrib["viewBox"] = f"0 0 {size} {size}"
131 root_element.attrib["version"] = "1.1"
132
133 shape = SubElement(root_element, f"{{{SVG_XML_NS}}}{shape}", nsmap=SVG_NS_MAP)
134 shape.attrib["fill"] = f"#{bg_hex}"
135 shape.attrib["cx"] = f"{half_size}"
136 shape.attrib["cy"] = f"{half_size}"
137 shape.attrib["width"] = f"{size}"
138 shape.attrib["height"] = f"{size}"
139 shape.attrib["r"] = f"{half_size}"
140
141 text = SubElement(root_element, f"{{{SVG_XML_NS}}}text", nsmap=SVG_NS_MAP)
142 text.attrib["x"] = "50%"
143 text.attrib["y"] = "50%"
144 text.attrib["style"] = (
145 f"color: #{text_hex}; " "line-height: 1; " f"font-family: {','.join(SVG_FONTS)}; "
146 )
147 text.attrib["fill"] = f"#{text_hex}"
148 text.attrib["alignment-baseline"] = "middle"
149 text.attrib["dominant-baseline"] = "middle"
150 text.attrib["text-anchor"] = "middle"
151 text.attrib["font-size"] = f"{round(size * font_size)}"
152 text.attrib["font-weight"] = f"{font_weight}"
153 text.attrib["dy"] = ".1em"
154 text.text = initials if not uppercase else initials.upper()
155
156 return etree.tostring(root_element).decode()
157
158
159 def avatar_mode_generated(user: "User", mode: str) -> str | None:
160 """Wrapper that converts generated avatar to base64 svg"""
161 # By default generate based off of user's display name
162 name = user.name.strip()
163 if name == "":
164 # Fallback to username
165 name = user.username.strip()
166 # If we still don't have anything, fallback to `a k`
167 if name == "":
168 name = "a k"
169 svg = generate_avatar_from_name(name)
170 return f"data:image/svg+xml;base64,{b64encode(svg.encode('utf-8')).decode('utf-8')}"
171
172
173 def avatar_mode_url(user: "User", mode: str) -> str | None:
174 """Format url"""
175 mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
176 return mode % {
177 "username": user.username,
178 "mail_hash": mail_hash,
179 "upn": user.attributes.get("upn", ""),
180 }
181
182
183 def get_avatar(user: "User", request: HttpRequest | None = None) -> str:
184 """Get avatar with configured mode"""
185 mode_map = {
186 "none": avatar_mode_none,
187 "initials": avatar_mode_generated,
188 "gravatar": avatar_mode_gravatar,
189 }
190 tenant = None
191 if request:
192 tenant = request.tenant
193 else:
194 tenant = get_current_tenant()
195 modes: str = tenant.avatars
196 for mode in modes.split(","):
197 avatar = None
198 if mode in mode_map:
199 avatar = mode_map[mode](user, mode)
200 elif mode.startswith("attributes."):
201 avatar = avatar_mode_attribute(user, mode)
202 elif "://" in mode:
203 avatar = avatar_mode_url(user, mode)
204 if avatar:
205 return avatar
206 return avatar_mode_none(user, modes)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/lib/avatars.py b/authentik/lib/avatars.py
--- a/authentik/lib/avatars.py
+++ b/authentik/lib/avatars.py
@@ -11,7 +11,7 @@
from django.templatetags.static import static
from lxml import etree # nosec
from lxml.etree import Element, SubElement # nosec
-from requests.exceptions import RequestException
+from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout
from authentik.lib.config import get_path_from_dict
from authentik.lib.utils.http import get_http_session
@@ -23,6 +23,8 @@
GRAVATAR_URL = "https://secure.gravatar.com"
DEFAULT_AVATAR = static("dist/assets/images/user_default.png")
CACHE_KEY_GRAVATAR = "goauthentik.io/lib/avatars/"
+CACHE_KEY_GRAVATAR_AVAILABLE = "goauthentik.io/lib/avatars/gravatar_available"
+GRAVATAR_STATUS_TTL_SECONDS = 60 * 60 * 8 # 8 Hours
SVG_XML_NS = "http://www.w3.org/2000/svg"
SVG_NS_MAP = {None: SVG_XML_NS}
@@ -50,6 +52,9 @@
def avatar_mode_gravatar(user: "User", mode: str) -> str | None:
"""Gravatar avatars"""
+ if not cache.get(CACHE_KEY_GRAVATAR_AVAILABLE, True):
+ return None
+
# gravatar uses md5 for their URLs, so md5 can't be avoided
mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
parameters = [("size", "158"), ("rating", "g"), ("default", "404")]
@@ -69,6 +74,8 @@
cache.set(full_key, None)
return None
res.raise_for_status()
+ except (Timeout, ConnectionError, HTTPError):
+ cache.set(CACHE_KEY_GRAVATAR_AVAILABLE, False, timeout=GRAVATAR_STATUS_TTL_SECONDS)
except RequestException:
return gravatar_url
cache.set(full_key, gravatar_url)
|
{"golden_diff": "diff --git a/authentik/lib/avatars.py b/authentik/lib/avatars.py\n--- a/authentik/lib/avatars.py\n+++ b/authentik/lib/avatars.py\n@@ -11,7 +11,7 @@\n from django.templatetags.static import static\n from lxml import etree # nosec\n from lxml.etree import Element, SubElement # nosec\n-from requests.exceptions import RequestException\n+from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout\n \n from authentik.lib.config import get_path_from_dict\n from authentik.lib.utils.http import get_http_session\n@@ -23,6 +23,8 @@\n GRAVATAR_URL = \"https://secure.gravatar.com\"\n DEFAULT_AVATAR = static(\"dist/assets/images/user_default.png\")\n CACHE_KEY_GRAVATAR = \"goauthentik.io/lib/avatars/\"\n+CACHE_KEY_GRAVATAR_AVAILABLE = \"goauthentik.io/lib/avatars/gravatar_available\"\n+GRAVATAR_STATUS_TTL_SECONDS = 60 * 60 * 8 # 8 Hours\n \n SVG_XML_NS = \"http://www.w3.org/2000/svg\"\n SVG_NS_MAP = {None: SVG_XML_NS}\n@@ -50,6 +52,9 @@\n \n def avatar_mode_gravatar(user: \"User\", mode: str) -> str | None:\n \"\"\"Gravatar avatars\"\"\"\n+ if not cache.get(CACHE_KEY_GRAVATAR_AVAILABLE, True):\n+ return None\n+\n # gravatar uses md5 for their URLs, so md5 can't be avoided\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n parameters = [(\"size\", \"158\"), (\"rating\", \"g\"), (\"default\", \"404\")]\n@@ -69,6 +74,8 @@\n cache.set(full_key, None)\n return None\n res.raise_for_status()\n+ except (Timeout, ConnectionError, HTTPError):\n+ cache.set(CACHE_KEY_GRAVATAR_AVAILABLE, False, timeout=GRAVATAR_STATUS_TTL_SECONDS)\n except RequestException:\n return gravatar_url\n cache.set(full_key, gravatar_url)\n", "issue": "gravatar,initials doesn't fallback when gravatar.com is unreachable\n**Describe the bug**\r\nOn System -> Settings -> Avatar section, there is this blurb of text:\r\n`Multiple values can be set, comma-separated, and authentik will fallback to the next mode when no avatar could be found. For example, setting this to gravatar,initials will attempt to get an avatar from Gravatar, and if the user has not configured on there, it will fallback to a generated avatar.`\r\n\r\nHowever, if authentik is deployed in an environment that doesn't allow a network connection to gravatar.com, i.e. firewall or similar, even the setting \"gravatar,initials\" leaves each user's avatar as a broken thumbnail, and initials are never retrieved. Further, each GET of a user ends up being really slow. i.e. retrieving 150k users by paginating through /core/users/ takes 12 hours. \r\n\r\nOnce changing the setting to \"initials\" only, retrieving 150k users takes about 2 minutes, and users have proper initials avatars.\r\n\r\n**To Reproduce**\r\nSet global Avatar setting to gravatar,initials and block outgoing connections to gravatar.com\r\n\r\n**Expected behavior**\r\nFailback to using initials setting\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: [e.g. 2024.2.2]\r\n- Deployment: [e.g. custom helm]\r\n\n", "before_files": [{"content": "\"\"\"Avatar utils\"\"\"\n\nfrom base64 import b64encode\nfrom functools import cache as funccache\nfrom hashlib import md5\nfrom typing import TYPE_CHECKING\nfrom urllib.parse import urlencode\n\nfrom django.core.cache import cache\nfrom django.http import HttpRequest, HttpResponseNotFound\nfrom django.templatetags.static import static\nfrom lxml import etree # nosec\nfrom lxml.etree import Element, SubElement # nosec\nfrom requests.exceptions import RequestException\n\nfrom authentik.lib.config import get_path_from_dict\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.tenants.utils import get_current_tenant\n\nif TYPE_CHECKING:\n from authentik.core.models import User\n\nGRAVATAR_URL = \"https://secure.gravatar.com\"\nDEFAULT_AVATAR = static(\"dist/assets/images/user_default.png\")\nCACHE_KEY_GRAVATAR = \"goauthentik.io/lib/avatars/\"\n\nSVG_XML_NS = \"http://www.w3.org/2000/svg\"\nSVG_NS_MAP = {None: SVG_XML_NS}\n# Match fonts used in web UI\nSVG_FONTS = [\n \"'RedHatText'\",\n \"'Overpass'\",\n \"overpass\",\n \"helvetica\",\n \"arial\",\n \"sans-serif\",\n]\n\n\ndef avatar_mode_none(user: \"User\", mode: str) -> str | None:\n \"\"\"No avatar\"\"\"\n return DEFAULT_AVATAR\n\n\ndef avatar_mode_attribute(user: \"User\", mode: str) -> str | None:\n \"\"\"Avatars based on a user attribute\"\"\"\n avatar = get_path_from_dict(user.attributes, mode[11:], default=None)\n return avatar\n\n\ndef avatar_mode_gravatar(user: \"User\", mode: str) -> str | None:\n \"\"\"Gravatar avatars\"\"\"\n # gravatar uses md5 for their URLs, so md5 can't be avoided\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n parameters = [(\"size\", \"158\"), (\"rating\", \"g\"), (\"default\", \"404\")]\n gravatar_url = f\"{GRAVATAR_URL}/avatar/{mail_hash}?{urlencode(parameters, doseq=True)}\"\n\n full_key = CACHE_KEY_GRAVATAR + mail_hash\n if cache.has_key(full_key):\n cache.touch(full_key)\n return cache.get(full_key)\n\n try:\n # Since we specify a default of 404, do a HEAD request\n # (HEAD since we don't need the body)\n # so if that returns a 404, move onto the next mode\n res = get_http_session().head(gravatar_url, timeout=5)\n if res.status_code == HttpResponseNotFound.status_code:\n cache.set(full_key, None)\n return None\n res.raise_for_status()\n except RequestException:\n return gravatar_url\n cache.set(full_key, gravatar_url)\n return gravatar_url\n\n\ndef generate_colors(text: str) -> tuple[str, str]:\n \"\"\"Generate colours based on `text`\"\"\"\n color = int(md5(text.lower().encode(\"utf-8\")).hexdigest(), 16) % 0xFFFFFF # nosec\n\n # Get a (somewhat arbitrarily) reduced scope of colors\n # to avoid too dark or light backgrounds\n blue = min(max((color) & 0xFF, 55), 200)\n green = min(max((color >> 8) & 0xFF, 55), 200)\n red = min(max((color >> 16) & 0xFF, 55), 200)\n bg_hex = f\"{red:02x}{green:02x}{blue:02x}\"\n # Contrasting text color (https://stackoverflow.com/a/3943023)\n text_hex = (\n \"000\" if (red * 0.299 + green * 0.587 + blue * 0.114) > 186 else \"fff\" # noqa: PLR2004\n )\n return bg_hex, text_hex\n\n\n@funccache\ndef generate_avatar_from_name(\n name: str,\n length: int = 2,\n size: int = 64,\n rounded: bool = False,\n font_size: float = 0.4375,\n bold: bool = False,\n uppercase: bool = True,\n) -> str:\n \"\"\" \"Generate an avatar with initials in SVG format.\n\n Inspired from: https://github.com/LasseRafn/ui-avatars\n \"\"\"\n name_parts = name.split()\n # Only abbreviate first and last name\n if len(name_parts) > 2: # noqa: PLR2004\n name_parts = [name_parts[0], name_parts[-1]]\n\n if len(name_parts) == 1:\n initials = name_parts[0][:length]\n else:\n initials = \"\".join([part[0] for part in name_parts[:-1]])\n initials += name_parts[-1]\n initials = initials[:length]\n\n bg_hex, text_hex = generate_colors(name)\n\n half_size = size // 2\n shape = \"circle\" if rounded else \"rect\"\n font_weight = \"600\" if bold else \"400\"\n\n root_element: Element = Element(f\"{{{SVG_XML_NS}}}svg\", nsmap=SVG_NS_MAP)\n root_element.attrib[\"width\"] = f\"{size}px\"\n root_element.attrib[\"height\"] = f\"{size}px\"\n root_element.attrib[\"viewBox\"] = f\"0 0 {size} {size}\"\n root_element.attrib[\"version\"] = \"1.1\"\n\n shape = SubElement(root_element, f\"{{{SVG_XML_NS}}}{shape}\", nsmap=SVG_NS_MAP)\n shape.attrib[\"fill\"] = f\"#{bg_hex}\"\n shape.attrib[\"cx\"] = f\"{half_size}\"\n shape.attrib[\"cy\"] = f\"{half_size}\"\n shape.attrib[\"width\"] = f\"{size}\"\n shape.attrib[\"height\"] = f\"{size}\"\n shape.attrib[\"r\"] = f\"{half_size}\"\n\n text = SubElement(root_element, f\"{{{SVG_XML_NS}}}text\", nsmap=SVG_NS_MAP)\n text.attrib[\"x\"] = \"50%\"\n text.attrib[\"y\"] = \"50%\"\n text.attrib[\"style\"] = (\n f\"color: #{text_hex}; \" \"line-height: 1; \" f\"font-family: {','.join(SVG_FONTS)}; \"\n )\n text.attrib[\"fill\"] = f\"#{text_hex}\"\n text.attrib[\"alignment-baseline\"] = \"middle\"\n text.attrib[\"dominant-baseline\"] = \"middle\"\n text.attrib[\"text-anchor\"] = \"middle\"\n text.attrib[\"font-size\"] = f\"{round(size * font_size)}\"\n text.attrib[\"font-weight\"] = f\"{font_weight}\"\n text.attrib[\"dy\"] = \".1em\"\n text.text = initials if not uppercase else initials.upper()\n\n return etree.tostring(root_element).decode()\n\n\ndef avatar_mode_generated(user: \"User\", mode: str) -> str | None:\n \"\"\"Wrapper that converts generated avatar to base64 svg\"\"\"\n # By default generate based off of user's display name\n name = user.name.strip()\n if name == \"\":\n # Fallback to username\n name = user.username.strip()\n # If we still don't have anything, fallback to `a k`\n if name == \"\":\n name = \"a k\"\n svg = generate_avatar_from_name(name)\n return f\"data:image/svg+xml;base64,{b64encode(svg.encode('utf-8')).decode('utf-8')}\"\n\n\ndef avatar_mode_url(user: \"User\", mode: str) -> str | None:\n \"\"\"Format url\"\"\"\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n return mode % {\n \"username\": user.username,\n \"mail_hash\": mail_hash,\n \"upn\": user.attributes.get(\"upn\", \"\"),\n }\n\n\ndef get_avatar(user: \"User\", request: HttpRequest | None = None) -> str:\n \"\"\"Get avatar with configured mode\"\"\"\n mode_map = {\n \"none\": avatar_mode_none,\n \"initials\": avatar_mode_generated,\n \"gravatar\": avatar_mode_gravatar,\n }\n tenant = None\n if request:\n tenant = request.tenant\n else:\n tenant = get_current_tenant()\n modes: str = tenant.avatars\n for mode in modes.split(\",\"):\n avatar = None\n if mode in mode_map:\n avatar = mode_map[mode](user, mode)\n elif mode.startswith(\"attributes.\"):\n avatar = avatar_mode_attribute(user, mode)\n elif \"://\" in mode:\n avatar = avatar_mode_url(user, mode)\n if avatar:\n return avatar\n return avatar_mode_none(user, modes)\n", "path": "authentik/lib/avatars.py"}], "after_files": [{"content": "\"\"\"Avatar utils\"\"\"\n\nfrom base64 import b64encode\nfrom functools import cache as funccache\nfrom hashlib import md5\nfrom typing import TYPE_CHECKING\nfrom urllib.parse import urlencode\n\nfrom django.core.cache import cache\nfrom django.http import HttpRequest, HttpResponseNotFound\nfrom django.templatetags.static import static\nfrom lxml import etree # nosec\nfrom lxml.etree import Element, SubElement # nosec\nfrom requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout\n\nfrom authentik.lib.config import get_path_from_dict\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.tenants.utils import get_current_tenant\n\nif TYPE_CHECKING:\n from authentik.core.models import User\n\nGRAVATAR_URL = \"https://secure.gravatar.com\"\nDEFAULT_AVATAR = static(\"dist/assets/images/user_default.png\")\nCACHE_KEY_GRAVATAR = \"goauthentik.io/lib/avatars/\"\nCACHE_KEY_GRAVATAR_AVAILABLE = \"goauthentik.io/lib/avatars/gravatar_available\"\nGRAVATAR_STATUS_TTL_SECONDS = 60 * 60 * 8 # 8 Hours\n\nSVG_XML_NS = \"http://www.w3.org/2000/svg\"\nSVG_NS_MAP = {None: SVG_XML_NS}\n# Match fonts used in web UI\nSVG_FONTS = [\n \"'RedHatText'\",\n \"'Overpass'\",\n \"overpass\",\n \"helvetica\",\n \"arial\",\n \"sans-serif\",\n]\n\n\ndef avatar_mode_none(user: \"User\", mode: str) -> str | None:\n \"\"\"No avatar\"\"\"\n return DEFAULT_AVATAR\n\n\ndef avatar_mode_attribute(user: \"User\", mode: str) -> str | None:\n \"\"\"Avatars based on a user attribute\"\"\"\n avatar = get_path_from_dict(user.attributes, mode[11:], default=None)\n return avatar\n\n\ndef avatar_mode_gravatar(user: \"User\", mode: str) -> str | None:\n \"\"\"Gravatar avatars\"\"\"\n if not cache.get(CACHE_KEY_GRAVATAR_AVAILABLE, True):\n return None\n\n # gravatar uses md5 for their URLs, so md5 can't be avoided\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n parameters = [(\"size\", \"158\"), (\"rating\", \"g\"), (\"default\", \"404\")]\n gravatar_url = f\"{GRAVATAR_URL}/avatar/{mail_hash}?{urlencode(parameters, doseq=True)}\"\n\n full_key = CACHE_KEY_GRAVATAR + mail_hash\n if cache.has_key(full_key):\n cache.touch(full_key)\n return cache.get(full_key)\n\n try:\n # Since we specify a default of 404, do a HEAD request\n # (HEAD since we don't need the body)\n # so if that returns a 404, move onto the next mode\n res = get_http_session().head(gravatar_url, timeout=5)\n if res.status_code == HttpResponseNotFound.status_code:\n cache.set(full_key, None)\n return None\n res.raise_for_status()\n except (Timeout, ConnectionError, HTTPError):\n cache.set(CACHE_KEY_GRAVATAR_AVAILABLE, False, timeout=GRAVATAR_STATUS_TTL_SECONDS)\n except RequestException:\n return gravatar_url\n cache.set(full_key, gravatar_url)\n return gravatar_url\n\n\ndef generate_colors(text: str) -> tuple[str, str]:\n \"\"\"Generate colours based on `text`\"\"\"\n color = int(md5(text.lower().encode(\"utf-8\")).hexdigest(), 16) % 0xFFFFFF # nosec\n\n # Get a (somewhat arbitrarily) reduced scope of colors\n # to avoid too dark or light backgrounds\n blue = min(max((color) & 0xFF, 55), 200)\n green = min(max((color >> 8) & 0xFF, 55), 200)\n red = min(max((color >> 16) & 0xFF, 55), 200)\n bg_hex = f\"{red:02x}{green:02x}{blue:02x}\"\n # Contrasting text color (https://stackoverflow.com/a/3943023)\n text_hex = (\n \"000\" if (red * 0.299 + green * 0.587 + blue * 0.114) > 186 else \"fff\" # noqa: PLR2004\n )\n return bg_hex, text_hex\n\n\n@funccache\ndef generate_avatar_from_name(\n name: str,\n length: int = 2,\n size: int = 64,\n rounded: bool = False,\n font_size: float = 0.4375,\n bold: bool = False,\n uppercase: bool = True,\n) -> str:\n \"\"\" \"Generate an avatar with initials in SVG format.\n\n Inspired from: https://github.com/LasseRafn/ui-avatars\n \"\"\"\n name_parts = name.split()\n # Only abbreviate first and last name\n if len(name_parts) > 2: # noqa: PLR2004\n name_parts = [name_parts[0], name_parts[-1]]\n\n if len(name_parts) == 1:\n initials = name_parts[0][:length]\n else:\n initials = \"\".join([part[0] for part in name_parts[:-1]])\n initials += name_parts[-1]\n initials = initials[:length]\n\n bg_hex, text_hex = generate_colors(name)\n\n half_size = size // 2\n shape = \"circle\" if rounded else \"rect\"\n font_weight = \"600\" if bold else \"400\"\n\n root_element: Element = Element(f\"{{{SVG_XML_NS}}}svg\", nsmap=SVG_NS_MAP)\n root_element.attrib[\"width\"] = f\"{size}px\"\n root_element.attrib[\"height\"] = f\"{size}px\"\n root_element.attrib[\"viewBox\"] = f\"0 0 {size} {size}\"\n root_element.attrib[\"version\"] = \"1.1\"\n\n shape = SubElement(root_element, f\"{{{SVG_XML_NS}}}{shape}\", nsmap=SVG_NS_MAP)\n shape.attrib[\"fill\"] = f\"#{bg_hex}\"\n shape.attrib[\"cx\"] = f\"{half_size}\"\n shape.attrib[\"cy\"] = f\"{half_size}\"\n shape.attrib[\"width\"] = f\"{size}\"\n shape.attrib[\"height\"] = f\"{size}\"\n shape.attrib[\"r\"] = f\"{half_size}\"\n\n text = SubElement(root_element, f\"{{{SVG_XML_NS}}}text\", nsmap=SVG_NS_MAP)\n text.attrib[\"x\"] = \"50%\"\n text.attrib[\"y\"] = \"50%\"\n text.attrib[\"style\"] = (\n f\"color: #{text_hex}; \" \"line-height: 1; \" f\"font-family: {','.join(SVG_FONTS)}; \"\n )\n text.attrib[\"fill\"] = f\"#{text_hex}\"\n text.attrib[\"alignment-baseline\"] = \"middle\"\n text.attrib[\"dominant-baseline\"] = \"middle\"\n text.attrib[\"text-anchor\"] = \"middle\"\n text.attrib[\"font-size\"] = f\"{round(size * font_size)}\"\n text.attrib[\"font-weight\"] = f\"{font_weight}\"\n text.attrib[\"dy\"] = \".1em\"\n text.text = initials if not uppercase else initials.upper()\n\n return etree.tostring(root_element).decode()\n\n\ndef avatar_mode_generated(user: \"User\", mode: str) -> str | None:\n \"\"\"Wrapper that converts generated avatar to base64 svg\"\"\"\n # By default generate based off of user's display name\n name = user.name.strip()\n if name == \"\":\n # Fallback to username\n name = user.username.strip()\n # If we still don't have anything, fallback to `a k`\n if name == \"\":\n name = \"a k\"\n svg = generate_avatar_from_name(name)\n return f\"data:image/svg+xml;base64,{b64encode(svg.encode('utf-8')).decode('utf-8')}\"\n\n\ndef avatar_mode_url(user: \"User\", mode: str) -> str | None:\n \"\"\"Format url\"\"\"\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n return mode % {\n \"username\": user.username,\n \"mail_hash\": mail_hash,\n \"upn\": user.attributes.get(\"upn\", \"\"),\n }\n\n\ndef get_avatar(user: \"User\", request: HttpRequest | None = None) -> str:\n \"\"\"Get avatar with configured mode\"\"\"\n mode_map = {\n \"none\": avatar_mode_none,\n \"initials\": avatar_mode_generated,\n \"gravatar\": avatar_mode_gravatar,\n }\n tenant = None\n if request:\n tenant = request.tenant\n else:\n tenant = get_current_tenant()\n modes: str = tenant.avatars\n for mode in modes.split(\",\"):\n avatar = None\n if mode in mode_map:\n avatar = mode_map[mode](user, mode)\n elif mode.startswith(\"attributes.\"):\n avatar = avatar_mode_attribute(user, mode)\n elif \"://\" in mode:\n avatar = avatar_mode_url(user, mode)\n if avatar:\n return avatar\n return avatar_mode_none(user, modes)\n", "path": "authentik/lib/avatars.py"}]}
| 3,043 | 477 |
gh_patches_debug_41396
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multiple-compressed responses support
HTTP response body can be compressed multiple times. In such cases `Content-Encoding` header contains a list of comma-separated encodings. Some servers instead can send multiple `Content-Encoding` headers.
This fix allows HttpCompressionMiddleware handle that.
See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
Fix #5143
Fixes #5143
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/httpcompression.py`
Content:
```
1 from __future__ import annotations
2
3 import warnings
4 from logging import getLogger
5 from typing import TYPE_CHECKING, List, Optional, Union
6
7 from scrapy import Request, Spider, signals
8 from scrapy.crawler import Crawler
9 from scrapy.exceptions import IgnoreRequest, NotConfigured
10 from scrapy.http import Response, TextResponse
11 from scrapy.responsetypes import responsetypes
12 from scrapy.statscollectors import StatsCollector
13 from scrapy.utils._compression import (
14 _DecompressionMaxSizeExceeded,
15 _inflate,
16 _unbrotli,
17 _unzstd,
18 )
19 from scrapy.utils.deprecate import ScrapyDeprecationWarning
20 from scrapy.utils.gz import gunzip
21
22 if TYPE_CHECKING:
23 # typing.Self requires Python 3.11
24 from typing_extensions import Self
25
26 logger = getLogger(__name__)
27
28 ACCEPTED_ENCODINGS: List[bytes] = [b"gzip", b"deflate"]
29
30 try:
31 import brotli # noqa: F401
32 except ImportError:
33 pass
34 else:
35 ACCEPTED_ENCODINGS.append(b"br")
36
37 try:
38 import zstandard # noqa: F401
39 except ImportError:
40 pass
41 else:
42 ACCEPTED_ENCODINGS.append(b"zstd")
43
44
45 class HttpCompressionMiddleware:
46 """This middleware allows compressed (gzip, deflate) traffic to be
47 sent/received from web sites"""
48
49 def __init__(
50 self,
51 stats: Optional[StatsCollector] = None,
52 *,
53 crawler: Optional[Crawler] = None,
54 ):
55 if not crawler:
56 self.stats = stats
57 self._max_size = 1073741824
58 self._warn_size = 33554432
59 return
60 self.stats = crawler.stats
61 self._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
62 self._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
63 crawler.signals.connect(self.open_spider, signals.spider_opened)
64
65 @classmethod
66 def from_crawler(cls, crawler: Crawler) -> Self:
67 if not crawler.settings.getbool("COMPRESSION_ENABLED"):
68 raise NotConfigured
69 try:
70 return cls(crawler=crawler)
71 except TypeError:
72 warnings.warn(
73 "HttpCompressionMiddleware subclasses must either modify "
74 "their '__init__' method to support a 'crawler' parameter or "
75 "reimplement their 'from_crawler' method.",
76 ScrapyDeprecationWarning,
77 )
78 mw = cls()
79 mw.stats = crawler.stats
80 mw._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
81 mw._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
82 crawler.signals.connect(mw.open_spider, signals.spider_opened)
83 return mw
84
85 def open_spider(self, spider):
86 if hasattr(spider, "download_maxsize"):
87 self._max_size = spider.download_maxsize
88 if hasattr(spider, "download_warnsize"):
89 self._warn_size = spider.download_warnsize
90
91 def process_request(
92 self, request: Request, spider: Spider
93 ) -> Union[Request, Response, None]:
94 request.headers.setdefault("Accept-Encoding", b", ".join(ACCEPTED_ENCODINGS))
95 return None
96
97 def process_response(
98 self, request: Request, response: Response, spider: Spider
99 ) -> Union[Request, Response]:
100 if request.method == "HEAD":
101 return response
102 if isinstance(response, Response):
103 content_encoding = response.headers.getlist("Content-Encoding")
104 if content_encoding:
105 encoding = content_encoding.pop()
106 max_size = request.meta.get("download_maxsize", self._max_size)
107 warn_size = request.meta.get("download_warnsize", self._warn_size)
108 try:
109 decoded_body = self._decode(
110 response.body, encoding.lower(), max_size
111 )
112 except _DecompressionMaxSizeExceeded:
113 raise IgnoreRequest(
114 f"Ignored response {response} because its body "
115 f"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE "
116 f"({max_size} B) during decompression."
117 )
118 if len(response.body) < warn_size <= len(decoded_body):
119 logger.warning(
120 f"{response} body size after decompression "
121 f"({len(decoded_body)} B) is larger than the "
122 f"download warning size ({warn_size} B)."
123 )
124 if self.stats:
125 self.stats.inc_value(
126 "httpcompression/response_bytes",
127 len(decoded_body),
128 spider=spider,
129 )
130 self.stats.inc_value(
131 "httpcompression/response_count", spider=spider
132 )
133 respcls = responsetypes.from_args(
134 headers=response.headers, url=response.url, body=decoded_body
135 )
136 kwargs = dict(cls=respcls, body=decoded_body)
137 if issubclass(respcls, TextResponse):
138 # force recalculating the encoding until we make sure the
139 # responsetypes guessing is reliable
140 kwargs["encoding"] = None
141 response = response.replace(**kwargs)
142 if not content_encoding:
143 del response.headers["Content-Encoding"]
144
145 return response
146
147 def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:
148 if encoding == b"gzip" or encoding == b"x-gzip":
149 return gunzip(body, max_size=max_size)
150 if encoding == b"deflate":
151 return _inflate(body, max_size=max_size)
152 if encoding == b"br" and b"br" in ACCEPTED_ENCODINGS:
153 return _unbrotli(body, max_size=max_size)
154 if encoding == b"zstd" and b"zstd" in ACCEPTED_ENCODINGS:
155 return _unzstd(body, max_size=max_size)
156 return body
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py
--- a/scrapy/downloadermiddlewares/httpcompression.py
+++ b/scrapy/downloadermiddlewares/httpcompression.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import warnings
+from itertools import chain
from logging import getLogger
from typing import TYPE_CHECKING, List, Optional, Union
@@ -102,18 +103,18 @@
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding:
- encoding = content_encoding.pop()
max_size = request.meta.get("download_maxsize", self._max_size)
warn_size = request.meta.get("download_warnsize", self._warn_size)
try:
- decoded_body = self._decode(
- response.body, encoding.lower(), max_size
+ decoded_body, content_encoding = self._handle_encoding(
+ response.body, content_encoding, max_size
)
except _DecompressionMaxSizeExceeded:
raise IgnoreRequest(
f"Ignored response {response} because its body "
- f"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE "
- f"({max_size} B) during decompression."
+ f"({len(response.body)} B compressed) exceeded "
+ f"DOWNLOAD_MAXSIZE ({max_size} B) during "
+ f"decompression."
)
if len(response.body) < warn_size <= len(decoded_body):
logger.warning(
@@ -121,6 +122,7 @@
f"({len(decoded_body)} B) is larger than the "
f"download warning size ({warn_size} B)."
)
+ response.headers["Content-Encoding"] = content_encoding
if self.stats:
self.stats.inc_value(
"httpcompression/response_bytes",
@@ -144,6 +146,28 @@
return response
+ def _handle_encoding(self, body, content_encoding, max_size):
+ to_decode, to_keep = self._split_encodings(content_encoding)
+ for encoding in to_decode:
+ body = self._decode(body, encoding, max_size)
+ return body, to_keep
+
+ def _split_encodings(self, content_encoding):
+ to_keep = [
+ encoding.strip().lower()
+ for encoding in chain.from_iterable(
+ encodings.split(b",") for encodings in content_encoding
+ )
+ ]
+ to_decode = []
+ while to_keep:
+ encoding = to_keep.pop()
+ if encoding not in ACCEPTED_ENCODINGS:
+ to_keep.append(encoding)
+ return to_decode, to_keep
+ to_decode.append(encoding)
+ return to_decode, to_keep
+
def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:
if encoding == b"gzip" or encoding == b"x-gzip":
return gunzip(body, max_size=max_size)
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py\n--- a/scrapy/downloadermiddlewares/httpcompression.py\n+++ b/scrapy/downloadermiddlewares/httpcompression.py\n@@ -1,6 +1,7 @@\n from __future__ import annotations\n \n import warnings\n+from itertools import chain\n from logging import getLogger\n from typing import TYPE_CHECKING, List, Optional, Union\n \n@@ -102,18 +103,18 @@\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n- encoding = content_encoding.pop()\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n- decoded_body = self._decode(\n- response.body, encoding.lower(), max_size\n+ decoded_body, content_encoding = self._handle_encoding(\n+ response.body, content_encoding, max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n- f\"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE \"\n- f\"({max_size} B) during decompression.\"\n+ f\"({len(response.body)} B compressed) exceeded \"\n+ f\"DOWNLOAD_MAXSIZE ({max_size} B) during \"\n+ f\"decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n@@ -121,6 +122,7 @@\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n+ response.headers[\"Content-Encoding\"] = content_encoding\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n@@ -144,6 +146,28 @@\n \n return response\n \n+ def _handle_encoding(self, body, content_encoding, max_size):\n+ to_decode, to_keep = self._split_encodings(content_encoding)\n+ for encoding in to_decode:\n+ body = self._decode(body, encoding, max_size)\n+ return body, to_keep\n+\n+ def _split_encodings(self, content_encoding):\n+ to_keep = [\n+ encoding.strip().lower()\n+ for encoding in chain.from_iterable(\n+ encodings.split(b\",\") for encodings in content_encoding\n+ )\n+ ]\n+ to_decode = []\n+ while to_keep:\n+ encoding = to_keep.pop()\n+ if encoding not in ACCEPTED_ENCODINGS:\n+ to_keep.append(encoding)\n+ return to_decode, to_keep\n+ to_decode.append(encoding)\n+ return to_decode, to_keep\n+\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding == b\"gzip\" or encoding == b\"x-gzip\":\n return gunzip(body, max_size=max_size)\n", "issue": "Multiple-compressed responses support\nHTTP response body can be compressed multiple times. In such cases `Content-Encoding` header contains a list of comma-separated encodings. Some servers instead can send multiple `Content-Encoding` headers.\r\nThis fix allows HttpCompressionMiddleware handle that.\r\nSee also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\nFix #5143\nFixes #5143 \n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom scrapy import Request, Spider, signals\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.statscollectors import StatsCollector\nfrom scrapy.utils._compression import (\n _DecompressionMaxSizeExceeded,\n _inflate,\n _unbrotli,\n _unzstd,\n)\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\nfrom scrapy.utils.gz import gunzip\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\nlogger = getLogger(__name__)\n\nACCEPTED_ENCODINGS: List[bytes] = [b\"gzip\", b\"deflate\"]\n\ntry:\n import brotli # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"br\")\n\ntry:\n import zstandard # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"zstd\")\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n\n def __init__(\n self,\n stats: Optional[StatsCollector] = None,\n *,\n crawler: Optional[Crawler] = None,\n ):\n if not crawler:\n self.stats = stats\n self._max_size = 1073741824\n self._warn_size = 33554432\n return\n self.stats = crawler.stats\n self._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n self._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(self.open_spider, signals.spider_opened)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler) -> Self:\n if not crawler.settings.getbool(\"COMPRESSION_ENABLED\"):\n raise NotConfigured\n try:\n return cls(crawler=crawler)\n except TypeError:\n warnings.warn(\n \"HttpCompressionMiddleware subclasses must either modify \"\n \"their '__init__' method to support a 'crawler' parameter or \"\n \"reimplement their 'from_crawler' method.\",\n ScrapyDeprecationWarning,\n )\n mw = cls()\n mw.stats = crawler.stats\n mw._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n mw._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(mw.open_spider, signals.spider_opened)\n return mw\n\n def open_spider(self, spider):\n if hasattr(spider, \"download_maxsize\"):\n self._max_size = spider.download_maxsize\n if hasattr(spider, \"download_warnsize\"):\n self._warn_size = spider.download_warnsize\n\n def process_request(\n self, request: Request, spider: Spider\n ) -> Union[Request, Response, None]:\n request.headers.setdefault(\"Accept-Encoding\", b\", \".join(ACCEPTED_ENCODINGS))\n return None\n\n def process_response(\n self, request: Request, response: Response, spider: Spider\n ) -> Union[Request, Response]:\n if request.method == \"HEAD\":\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n encoding = content_encoding.pop()\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n decoded_body = self._decode(\n response.body, encoding.lower(), max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n f\"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE \"\n f\"({max_size} B) during decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n f\"{response} body size after decompression \"\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n len(decoded_body),\n spider=spider,\n )\n self.stats.inc_value(\n \"httpcompression/response_count\", spider=spider\n )\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs[\"encoding\"] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers[\"Content-Encoding\"]\n\n return response\n\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding == b\"gzip\" or encoding == b\"x-gzip\":\n return gunzip(body, max_size=max_size)\n if encoding == b\"deflate\":\n return _inflate(body, max_size=max_size)\n if encoding == b\"br\" and b\"br\" in ACCEPTED_ENCODINGS:\n return _unbrotli(body, max_size=max_size)\n if encoding == b\"zstd\" and b\"zstd\" in ACCEPTED_ENCODINGS:\n return _unzstd(body, max_size=max_size)\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport warnings\nfrom itertools import chain\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom scrapy import Request, Spider, signals\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.statscollectors import StatsCollector\nfrom scrapy.utils._compression import (\n _DecompressionMaxSizeExceeded,\n _inflate,\n _unbrotli,\n _unzstd,\n)\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\nfrom scrapy.utils.gz import gunzip\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\nlogger = getLogger(__name__)\n\nACCEPTED_ENCODINGS: List[bytes] = [b\"gzip\", b\"deflate\"]\n\ntry:\n import brotli # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"br\")\n\ntry:\n import zstandard # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"zstd\")\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n\n def __init__(\n self,\n stats: Optional[StatsCollector] = None,\n *,\n crawler: Optional[Crawler] = None,\n ):\n if not crawler:\n self.stats = stats\n self._max_size = 1073741824\n self._warn_size = 33554432\n return\n self.stats = crawler.stats\n self._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n self._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(self.open_spider, signals.spider_opened)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler) -> Self:\n if not crawler.settings.getbool(\"COMPRESSION_ENABLED\"):\n raise NotConfigured\n try:\n return cls(crawler=crawler)\n except TypeError:\n warnings.warn(\n \"HttpCompressionMiddleware subclasses must either modify \"\n \"their '__init__' method to support a 'crawler' parameter or \"\n \"reimplement their 'from_crawler' method.\",\n ScrapyDeprecationWarning,\n )\n mw = cls()\n mw.stats = crawler.stats\n mw._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n mw._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(mw.open_spider, signals.spider_opened)\n return mw\n\n def open_spider(self, spider):\n if hasattr(spider, \"download_maxsize\"):\n self._max_size = spider.download_maxsize\n if hasattr(spider, \"download_warnsize\"):\n self._warn_size = spider.download_warnsize\n\n def process_request(\n self, request: Request, spider: Spider\n ) -> Union[Request, Response, None]:\n request.headers.setdefault(\"Accept-Encoding\", b\", \".join(ACCEPTED_ENCODINGS))\n return None\n\n def process_response(\n self, request: Request, response: Response, spider: Spider\n ) -> Union[Request, Response]:\n if request.method == \"HEAD\":\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n decoded_body, content_encoding = self._handle_encoding(\n response.body, content_encoding, max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n f\"({len(response.body)} B compressed) exceeded \"\n f\"DOWNLOAD_MAXSIZE ({max_size} B) during \"\n f\"decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n f\"{response} body size after decompression \"\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n response.headers[\"Content-Encoding\"] = content_encoding\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n len(decoded_body),\n spider=spider,\n )\n self.stats.inc_value(\n \"httpcompression/response_count\", spider=spider\n )\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs[\"encoding\"] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers[\"Content-Encoding\"]\n\n return response\n\n def _handle_encoding(self, body, content_encoding, max_size):\n to_decode, to_keep = self._split_encodings(content_encoding)\n for encoding in to_decode:\n body = self._decode(body, encoding, max_size)\n return body, to_keep\n\n def _split_encodings(self, content_encoding):\n to_keep = [\n encoding.strip().lower()\n for encoding in chain.from_iterable(\n encodings.split(b\",\") for encodings in content_encoding\n )\n ]\n to_decode = []\n while to_keep:\n encoding = to_keep.pop()\n if encoding not in ACCEPTED_ENCODINGS:\n to_keep.append(encoding)\n return to_decode, to_keep\n to_decode.append(encoding)\n return to_decode, to_keep\n\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding == b\"gzip\" or encoding == b\"x-gzip\":\n return gunzip(body, max_size=max_size)\n if encoding == b\"deflate\":\n return _inflate(body, max_size=max_size)\n if encoding == b\"br\" and b\"br\" in ACCEPTED_ENCODINGS:\n return _unbrotli(body, max_size=max_size)\n if encoding == b\"zstd\" and b\"zstd\" in ACCEPTED_ENCODINGS:\n return _unzstd(body, max_size=max_size)\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}]}
| 1,972 | 660 |
gh_patches_debug_31051
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-441
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add build completion to buildroot override add
From: https://fedorahosted.org/bodhi/ticket/788
"Small regression in Bodhi 2.0: builds completion fails to work in the override page.
I tried to submit an override for qpid-cpp-0.32-8.fc23 (due to boost rebuild induced ABI break), and it failed in both discovering the name of the source package and the nvr."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/views/generic.py`
Content:
```
1 # This program is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU General Public License
3 # as published by the Free Software Foundation; either version 2
4 # of the License, or (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10 #
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14
15 import datetime
16 import sqlalchemy as sa
17
18 from pyramid.security import authenticated_userid
19 from pyramid.view import view_config, notfound_view_config
20 from pyramid.exceptions import HTTPNotFound, HTTPForbidden
21
22 from bodhi import log
23 import bodhi.models
24 import bodhi.util
25
26
27 @notfound_view_config(append_slash=True)
28 def notfound_view(context, request):
29 """ Automatically redirects to slash-appended routes.
30
31 http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-rou
32 """
33 return HTTPNotFound()
34
35
36 def get_top_testers(request):
37 db = request.db
38 blacklist = request.registry.settings.get('stats_blacklist').split()
39 days = int(request.registry.settings.get('top_testers_timeframe', 7))
40 start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)
41
42 query = db.query(
43 bodhi.models.User,
44 sa.func.count(bodhi.models.User.comments).label('count_1')
45 ).join(bodhi.models.Comment)
46 query = query\
47 .order_by('count_1 desc')\
48 .filter(bodhi.models.Comment.timestamp > start_time)
49
50 for user in blacklist:
51 query = query.filter(bodhi.models.User.name != user)
52
53 return query\
54 .group_by(bodhi.models.User)\
55 .limit(5)\
56 .all()
57
58
59 def get_latest_updates(request, critpath, security):
60 db = request.db
61 query = db.query(bodhi.models.Update)
62
63 if critpath:
64 query = query.filter(
65 bodhi.models.Update.critpath==True)
66 if security:
67 query = query.filter(
68 bodhi.models.Update.type==bodhi.models.UpdateType.security)
69
70 query = query.filter(
71 bodhi.models.Update.status==bodhi.models.UpdateStatus.testing)
72
73 query = query.order_by(bodhi.models.Update.date_submitted.desc())
74 return query.limit(5).all()
75
76
77 @view_config(route_name='home', renderer='home.html')
78 def home(request):
79 """ Returns data for the frontpage """
80 r = request
81
82 @request.cache.cache_on_arguments()
83 def work():
84 top_testers = get_top_testers(request)
85 critpath_updates = get_latest_updates(request, True, False)
86 security_updates = get_latest_updates(request, False, True)
87
88 return {
89 "top_testers": [(obj.__json__(r), n) for obj, n in top_testers],
90 "critpath_updates": [obj.__json__(r) for obj in critpath_updates],
91 "security_updates": [obj.__json__(r) for obj in security_updates],
92 }
93
94 return work()
95
96
97 @view_config(route_name='new_update', renderer='new_update.html')
98 def new_update(request):
99 """ Returns the new update form """
100 user = authenticated_userid(request)
101 if not user:
102 raise HTTPForbidden("You must be logged in.")
103 return dict(
104 update=None,
105 types=reversed(bodhi.models.UpdateType.values()),
106 severities=reversed(bodhi.models.UpdateSeverity.values()),
107 suggestions=reversed(bodhi.models.UpdateSuggestion.values()),
108 )
109
110
111 @view_config(route_name='latest_candidates', renderer='json')
112 def latest_candidates(request):
113 """
114 For a given `package`, this method returns the most recent builds tagged
115 into the Release.candidate_tag for all Releases.
116 """
117 koji = request.koji
118 db = request.db
119
120 @request.cache.cache_on_arguments()
121 def work(pkg):
122 result = []
123 koji.multicall = True
124
125 releases = db.query(bodhi.models.Release) \
126 .filter(
127 bodhi.models.Release.state.in_(
128 (bodhi.models.ReleaseState.pending,
129 bodhi.models.ReleaseState.current)))
130
131 for release in releases:
132 koji.listTagged(release.candidate_tag, package=pkg, latest=True)
133
134 builds = koji.multiCall() or [] # Protect against None
135
136 for build in builds:
137 if isinstance(build, dict):
138 continue
139 if build and build[0] and build[0][0]:
140 result.append({
141 'nvr': build[0][0]['nvr'],
142 'id': build[0][0]['id'],
143 })
144 return result
145
146
147 pkg = request.params.get('package')
148 log.debug('latest_candidate(%r)' % pkg)
149
150 if not pkg:
151 return []
152
153 result = work(pkg)
154
155 log.debug(result)
156 return result
157
158
159 @view_config(route_name='latest_builds', renderer='json')
160 def latest_builds(request):
161 """ Get a list of the latest builds for a given package.
162
163 Returns a dictionary of the release dist tag to the latest build.
164 """
165 builds = {}
166 koji = request.koji
167 package = request.params.get('package')
168 for tag_type, tags in bodhi.models.Release.get_tags()[0].iteritems():
169 for tag in tags:
170 try:
171 for build in koji.getLatestBuilds(tag, package=package):
172 builds[tag] = build['nvr']
173 except: # Things like EPEL don't have pending tags
174 pass
175 return builds
176
177
178 @view_config(route_name='new_override', renderer='override.html')
179 def new_override(request):
180 """ Returns the new buildroot override form """
181 user = authenticated_userid(request)
182 if not user:
183 raise HTTPForbidden("You must be logged in.")
184 return dict()
185
186
187 @view_config(route_name='api_version', renderer='json')
188 def api_version(request):
189 """ Returns the Bodhi API version """
190 return dict(version=bodhi.util.version())
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bodhi/views/generic.py b/bodhi/views/generic.py
--- a/bodhi/views/generic.py
+++ b/bodhi/views/generic.py
@@ -16,6 +16,7 @@
import sqlalchemy as sa
from pyramid.security import authenticated_userid
+from pyramid.settings import asbool
from pyramid.view import view_config, notfound_view_config
from pyramid.exceptions import HTTPNotFound, HTTPForbidden
@@ -118,7 +119,7 @@
db = request.db
@request.cache.cache_on_arguments()
- def work(pkg):
+ def work(pkg, testing):
result = []
koji.multicall = True
@@ -128,8 +129,12 @@
(bodhi.models.ReleaseState.pending,
bodhi.models.ReleaseState.current)))
+ kwargs = dict(package=pkg, latest=True)
for release in releases:
- koji.listTagged(release.candidate_tag, package=pkg, latest=True)
+ koji.listTagged(release.candidate_tag, **kwargs)
+ if testing:
+ koji.listTagged(release.testing_tag, **kwargs)
+ koji.listTagged(release.pending_testing_tag, **kwargs)
builds = koji.multiCall() or [] # Protect against None
@@ -145,12 +150,13 @@
pkg = request.params.get('package')
- log.debug('latest_candidate(%r)' % pkg)
+ testing = asbool(request.params.get('testing'))
+ log.debug('latest_candidate(%r, %r)' % (pkg, testing))
if not pkg:
return []
- result = work(pkg)
+ result = work(pkg, testing)
log.debug(result)
return result
|
{"golden_diff": "diff --git a/bodhi/views/generic.py b/bodhi/views/generic.py\n--- a/bodhi/views/generic.py\n+++ b/bodhi/views/generic.py\n@@ -16,6 +16,7 @@\n import sqlalchemy as sa\n \n from pyramid.security import authenticated_userid\n+from pyramid.settings import asbool\n from pyramid.view import view_config, notfound_view_config\n from pyramid.exceptions import HTTPNotFound, HTTPForbidden\n \n@@ -118,7 +119,7 @@\n db = request.db\n \n @request.cache.cache_on_arguments()\n- def work(pkg):\n+ def work(pkg, testing):\n result = []\n koji.multicall = True\n \n@@ -128,8 +129,12 @@\n (bodhi.models.ReleaseState.pending,\n bodhi.models.ReleaseState.current)))\n \n+ kwargs = dict(package=pkg, latest=True)\n for release in releases:\n- koji.listTagged(release.candidate_tag, package=pkg, latest=True)\n+ koji.listTagged(release.candidate_tag, **kwargs)\n+ if testing:\n+ koji.listTagged(release.testing_tag, **kwargs)\n+ koji.listTagged(release.pending_testing_tag, **kwargs)\n \n builds = koji.multiCall() or [] # Protect against None\n \n@@ -145,12 +150,13 @@\n \n \n pkg = request.params.get('package')\n- log.debug('latest_candidate(%r)' % pkg)\n+ testing = asbool(request.params.get('testing'))\n+ log.debug('latest_candidate(%r, %r)' % (pkg, testing))\n \n if not pkg:\n return []\n \n- result = work(pkg)\n+ result = work(pkg, testing)\n \n log.debug(result)\n return result\n", "issue": "add build completion to buildroot override add\nFrom: https://fedorahosted.org/bodhi/ticket/788\n\n\"Small regression in Bodhi 2.0: builds completion fails to work in the override page.\n\nI tried to submit an override for qpid-cpp-0.32-8.fc23 (due to boost rebuild induced ABI break), and it failed in both discovering the name of the source package and the nvr.\"\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport datetime\nimport sqlalchemy as sa\n\nfrom pyramid.security import authenticated_userid\nfrom pyramid.view import view_config, notfound_view_config\nfrom pyramid.exceptions import HTTPNotFound, HTTPForbidden\n\nfrom bodhi import log\nimport bodhi.models\nimport bodhi.util\n\n\n@notfound_view_config(append_slash=True)\ndef notfound_view(context, request):\n \"\"\" Automatically redirects to slash-appended routes.\n\n http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-rou\n \"\"\"\n return HTTPNotFound()\n\n\ndef get_top_testers(request):\n db = request.db\n blacklist = request.registry.settings.get('stats_blacklist').split()\n days = int(request.registry.settings.get('top_testers_timeframe', 7))\n start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)\n\n query = db.query(\n bodhi.models.User,\n sa.func.count(bodhi.models.User.comments).label('count_1')\n ).join(bodhi.models.Comment)\n query = query\\\n .order_by('count_1 desc')\\\n .filter(bodhi.models.Comment.timestamp > start_time)\n\n for user in blacklist:\n query = query.filter(bodhi.models.User.name != user)\n\n return query\\\n .group_by(bodhi.models.User)\\\n .limit(5)\\\n .all()\n\n\ndef get_latest_updates(request, critpath, security):\n db = request.db\n query = db.query(bodhi.models.Update)\n\n if critpath:\n query = query.filter(\n bodhi.models.Update.critpath==True)\n if security:\n query = query.filter(\n bodhi.models.Update.type==bodhi.models.UpdateType.security)\n\n query = query.filter(\n bodhi.models.Update.status==bodhi.models.UpdateStatus.testing)\n\n query = query.order_by(bodhi.models.Update.date_submitted.desc())\n return query.limit(5).all()\n\n\n@view_config(route_name='home', renderer='home.html')\ndef home(request):\n \"\"\" Returns data for the frontpage \"\"\"\n r = request\n\n @request.cache.cache_on_arguments()\n def work():\n top_testers = get_top_testers(request)\n critpath_updates = get_latest_updates(request, True, False)\n security_updates = get_latest_updates(request, False, True)\n\n return {\n \"top_testers\": [(obj.__json__(r), n) for obj, n in top_testers],\n \"critpath_updates\": [obj.__json__(r) for obj in critpath_updates],\n \"security_updates\": [obj.__json__(r) for obj in security_updates],\n }\n\n return work()\n\n\n@view_config(route_name='new_update', renderer='new_update.html')\ndef new_update(request):\n \"\"\" Returns the new update form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict(\n update=None,\n types=reversed(bodhi.models.UpdateType.values()),\n severities=reversed(bodhi.models.UpdateSeverity.values()),\n suggestions=reversed(bodhi.models.UpdateSuggestion.values()),\n )\n\n\n@view_config(route_name='latest_candidates', renderer='json')\ndef latest_candidates(request):\n \"\"\"\n For a given `package`, this method returns the most recent builds tagged\n into the Release.candidate_tag for all Releases.\n \"\"\"\n koji = request.koji\n db = request.db\n\n @request.cache.cache_on_arguments()\n def work(pkg):\n result = []\n koji.multicall = True\n\n releases = db.query(bodhi.models.Release) \\\n .filter(\n bodhi.models.Release.state.in_(\n (bodhi.models.ReleaseState.pending,\n bodhi.models.ReleaseState.current)))\n\n for release in releases:\n koji.listTagged(release.candidate_tag, package=pkg, latest=True)\n\n builds = koji.multiCall() or [] # Protect against None\n\n for build in builds:\n if isinstance(build, dict):\n continue\n if build and build[0] and build[0][0]:\n result.append({\n 'nvr': build[0][0]['nvr'],\n 'id': build[0][0]['id'],\n })\n return result\n\n\n pkg = request.params.get('package')\n log.debug('latest_candidate(%r)' % pkg)\n\n if not pkg:\n return []\n\n result = work(pkg)\n\n log.debug(result)\n return result\n\n\n@view_config(route_name='latest_builds', renderer='json')\ndef latest_builds(request):\n \"\"\" Get a list of the latest builds for a given package.\n\n Returns a dictionary of the release dist tag to the latest build.\n \"\"\"\n builds = {}\n koji = request.koji\n package = request.params.get('package')\n for tag_type, tags in bodhi.models.Release.get_tags()[0].iteritems():\n for tag in tags:\n try:\n for build in koji.getLatestBuilds(tag, package=package):\n builds[tag] = build['nvr']\n except: # Things like EPEL don't have pending tags\n pass\n return builds\n\n\n@view_config(route_name='new_override', renderer='override.html')\ndef new_override(request):\n \"\"\" Returns the new buildroot override form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict()\n\n\n@view_config(route_name='api_version', renderer='json')\ndef api_version(request):\n \"\"\" Returns the Bodhi API version \"\"\"\n return dict(version=bodhi.util.version())\n", "path": "bodhi/views/generic.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport datetime\nimport sqlalchemy as sa\n\nfrom pyramid.security import authenticated_userid\nfrom pyramid.settings import asbool\nfrom pyramid.view import view_config, notfound_view_config\nfrom pyramid.exceptions import HTTPNotFound, HTTPForbidden\n\nfrom bodhi import log\nimport bodhi.models\nimport bodhi.util\n\n\n@notfound_view_config(append_slash=True)\ndef notfound_view(context, request):\n \"\"\" Automatically redirects to slash-appended routes.\n\n http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-rou\n \"\"\"\n return HTTPNotFound()\n\n\ndef get_top_testers(request):\n db = request.db\n blacklist = request.registry.settings.get('stats_blacklist').split()\n days = int(request.registry.settings.get('top_testers_timeframe', 7))\n start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)\n\n query = db.query(\n bodhi.models.User,\n sa.func.count(bodhi.models.User.comments).label('count_1')\n ).join(bodhi.models.Comment)\n query = query\\\n .order_by('count_1 desc')\\\n .filter(bodhi.models.Comment.timestamp > start_time)\n\n for user in blacklist:\n query = query.filter(bodhi.models.User.name != user)\n\n return query\\\n .group_by(bodhi.models.User)\\\n .limit(5)\\\n .all()\n\n\ndef get_latest_updates(request, critpath, security):\n db = request.db\n query = db.query(bodhi.models.Update)\n\n if critpath:\n query = query.filter(\n bodhi.models.Update.critpath==True)\n if security:\n query = query.filter(\n bodhi.models.Update.type==bodhi.models.UpdateType.security)\n\n query = query.filter(\n bodhi.models.Update.status==bodhi.models.UpdateStatus.testing)\n\n query = query.order_by(bodhi.models.Update.date_submitted.desc())\n return query.limit(5).all()\n\n\n@view_config(route_name='home', renderer='home.html')\ndef home(request):\n \"\"\" Returns data for the frontpage \"\"\"\n r = request\n\n @request.cache.cache_on_arguments()\n def work():\n top_testers = get_top_testers(request)\n critpath_updates = get_latest_updates(request, True, False)\n security_updates = get_latest_updates(request, False, True)\n\n return {\n \"top_testers\": [(obj.__json__(r), n) for obj, n in top_testers],\n \"critpath_updates\": [obj.__json__(r) for obj in critpath_updates],\n \"security_updates\": [obj.__json__(r) for obj in security_updates],\n }\n\n return work()\n\n\n@view_config(route_name='new_update', renderer='new_update.html')\ndef new_update(request):\n \"\"\" Returns the new update form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict(\n update=None,\n types=reversed(bodhi.models.UpdateType.values()),\n severities=reversed(bodhi.models.UpdateSeverity.values()),\n suggestions=reversed(bodhi.models.UpdateSuggestion.values()),\n )\n\n\n@view_config(route_name='latest_candidates', renderer='json')\ndef latest_candidates(request):\n \"\"\"\n For a given `package`, this method returns the most recent builds tagged\n into the Release.candidate_tag for all Releases.\n \"\"\"\n koji = request.koji\n db = request.db\n\n @request.cache.cache_on_arguments()\n def work(pkg, testing):\n result = []\n koji.multicall = True\n\n releases = db.query(bodhi.models.Release) \\\n .filter(\n bodhi.models.Release.state.in_(\n (bodhi.models.ReleaseState.pending,\n bodhi.models.ReleaseState.current)))\n\n kwargs = dict(package=pkg, latest=True)\n for release in releases:\n koji.listTagged(release.candidate_tag, **kwargs)\n if testing:\n koji.listTagged(release.testing_tag, **kwargs)\n koji.listTagged(release.pending_testing_tag, **kwargs)\n\n builds = koji.multiCall() or [] # Protect against None\n\n for build in builds:\n if isinstance(build, dict):\n continue\n if build and build[0] and build[0][0]:\n result.append({\n 'nvr': build[0][0]['nvr'],\n 'id': build[0][0]['id'],\n })\n return result\n\n\n pkg = request.params.get('package')\n testing = asbool(request.params.get('testing'))\n log.debug('latest_candidate(%r, %r)' % (pkg, testing))\n\n if not pkg:\n return []\n\n result = work(pkg, testing)\n\n log.debug(result)\n return result\n\n\n@view_config(route_name='latest_builds', renderer='json')\ndef latest_builds(request):\n \"\"\" Get a list of the latest builds for a given package.\n\n Returns a dictionary of the release dist tag to the latest build.\n \"\"\"\n builds = {}\n koji = request.koji\n package = request.params.get('package')\n for tag_type, tags in bodhi.models.Release.get_tags()[0].iteritems():\n for tag in tags:\n try:\n for build in koji.getLatestBuilds(tag, package=package):\n builds[tag] = build['nvr']\n except: # Things like EPEL don't have pending tags\n pass\n return builds\n\n\n@view_config(route_name='new_override', renderer='override.html')\ndef new_override(request):\n \"\"\" Returns the new buildroot override form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict()\n\n\n@view_config(route_name='api_version', renderer='json')\ndef api_version(request):\n \"\"\" Returns the Bodhi API version \"\"\"\n return dict(version=bodhi.util.version())\n", "path": "bodhi/views/generic.py"}]}
| 2,217 | 390 |
gh_patches_debug_8807
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1819
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PCC gives error with AMP
## 🐛 Bug
On turning on Automatic Mixed Precision, there is an error in Pearson Correlation Coefficient computation at this [line](https://github.com/Lightning-AI/torchmetrics/blob/962f82db7b0ab7ca32fe050aba45f1d3f9fe64c9/src/torchmetrics/functional/regression/pearson.py#L95). The trace is as follows,
```
File "/home/nikhil/miniconda3/envs/mrl/lib/python3.8/site-packages/torchmetrics/functional/regression/pearson.py", line 80, in _pearson_corrcoef_compute
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
RuntimeError: "sqrt_vml_cpu" not implemented for 'Half'
```
Turning AMP off, it fixes it.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior...
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- TorchMetrics version : `0.11.4` installed via `pip`
- Python & PyTorch Version (e.g., 1.0): `Python 3.8`, `Torch 2.0.1+cu118`
- Any other relevant information such as OS (e.g., Linux): `Linux`
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/regression/pearson.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
20 from torchmetrics.utilities.checks import _check_same_shape
21
22
23 def _pearson_corrcoef_update(
24 preds: Tensor,
25 target: Tensor,
26 mean_x: Tensor,
27 mean_y: Tensor,
28 var_x: Tensor,
29 var_y: Tensor,
30 corr_xy: Tensor,
31 n_prior: Tensor,
32 num_outputs: int,
33 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
34 """Update and returns variables required to compute Pearson Correlation Coefficient.
35
36 Check for same shape of input tensors.
37
38 Args:
39 preds: estimated scores
40 target: ground truth scores
41 mean_x: current mean estimate of x tensor
42 mean_y: current mean estimate of y tensor
43 var_x: current variance estimate of x tensor
44 var_y: current variance estimate of y tensor
45 corr_xy: current covariance estimate between x and y tensor
46 n_prior: current number of observed observations
47 num_outputs: Number of outputs in multioutput setting
48 """
49 # Data checking
50 _check_same_shape(preds, target)
51 _check_data_shape_to_num_outputs(preds, target, num_outputs)
52 cond = n_prior.mean() > 0
53
54 n_obs = preds.shape[0]
55 if cond:
56 mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)
57 my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)
58 else:
59 mx_new = preds.mean(0)
60 my_new = target.mean(0)
61
62 n_prior += n_obs
63
64 if cond:
65 var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)
66 var_y += ((target - my_new) * (target - mean_y)).sum(0)
67
68 else:
69 var_x += preds.var(0) * (n_obs - 1)
70 var_y += target.var(0) * (n_obs - 1)
71 corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)
72 mean_x = mx_new
73 mean_y = my_new
74
75 return mean_x, mean_y, var_x, var_y, corr_xy, n_prior
76
77
78 def _pearson_corrcoef_compute(
79 var_x: Tensor,
80 var_y: Tensor,
81 corr_xy: Tensor,
82 nb: Tensor,
83 ) -> Tensor:
84 """Compute the final pearson correlation based on accumulated statistics.
85
86 Args:
87 var_x: variance estimate of x tensor
88 var_y: variance estimate of y tensor
89 corr_xy: covariance estimate between x and y tensor
90 nb: number of observations
91 """
92 var_x /= nb - 1
93 var_y /= nb - 1
94 corr_xy /= nb - 1
95 corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
96 return torch.clamp(corrcoef, -1.0, 1.0)
97
98
99 def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
100 """Compute pearson correlation coefficient.
101
102 Args:
103 preds: estimated scores
104 target: ground truth scores
105
106 Example (single output regression):
107 >>> from torchmetrics.functional.regression import pearson_corrcoef
108 >>> target = torch.tensor([3, -0.5, 2, 7])
109 >>> preds = torch.tensor([2.5, 0.0, 2, 8])
110 >>> pearson_corrcoef(preds, target)
111 tensor(0.9849)
112
113 Example (multi output regression):
114 >>> from torchmetrics.functional.regression import pearson_corrcoef
115 >>> target = torch.tensor([[3, -0.5], [2, 7]])
116 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
117 >>> pearson_corrcoef(preds, target)
118 tensor([1., 1.])
119 """
120 d = preds.shape[1] if preds.ndim == 2 else 1
121 _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)
122 mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
123 var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
124 _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(
125 preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
126 )
127 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py
--- a/src/torchmetrics/functional/regression/pearson.py
+++ b/src/torchmetrics/functional/regression/pearson.py
@@ -92,6 +92,12 @@
var_x /= nb - 1
var_y /= nb - 1
corr_xy /= nb - 1
+ # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16
+ # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed
+ if var_x.dtype == torch.float16 and var_x.device == torch.device("cpu"):
+ var_x = var_x.bfloat16()
+ var_y = var_y.bfloat16()
+
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
return torch.clamp(corrcoef, -1.0, 1.0)
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py\n--- a/src/torchmetrics/functional/regression/pearson.py\n+++ b/src/torchmetrics/functional/regression/pearson.py\n@@ -92,6 +92,12 @@\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n+ # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n+ # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n+ if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n+ var_x = var_x.bfloat16()\n+ var_y = var_y.bfloat16()\n+\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n", "issue": "PCC gives error with AMP\n## \ud83d\udc1b Bug\r\n\r\nOn turning on Automatic Mixed Precision, there is an error in Pearson Correlation Coefficient computation at this [line](https://github.com/Lightning-AI/torchmetrics/blob/962f82db7b0ab7ca32fe050aba45f1d3f9fe64c9/src/torchmetrics/functional/regression/pearson.py#L95). The trace is as follows,\r\n```\r\n File \"/home/nikhil/miniconda3/envs/mrl/lib/python3.8/site-packages/torchmetrics/functional/regression/pearson.py\", line 80, in _pearson_corrcoef_compute \r\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() \r\nRuntimeError: \"sqrt_vml_cpu\" not implemented for 'Half'\r\n```\r\n\r\nTurning AMP off, it fixes it. \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- TorchMetrics version : `0.11.4` installed via `pip`\r\n- Python & PyTorch Version (e.g., 1.0): `Python 3.8`, `Torch 2.0.1+cu118`\r\n- Any other relevant information such as OS (e.g., Linux): `Linux`\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}]}
| 2,128 | 244 |
gh_patches_debug_590
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1838
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.96
On the docket:
+ [x] PEX_EXTRA_SYS_PATH propagation can break subprocesses run against other venvs. #1836
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.95"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.95"
+__version__ = "2.1.96"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.95\"\n+__version__ = \"2.1.96\"\n", "issue": "Release 2.1.96\nOn the docket:\r\n+ [x] PEX_EXTRA_SYS_PATH propagation can break subprocesses run against other venvs. #1836 \r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.95\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.96\"\n", "path": "pex/version.py"}]}
| 350 | 96 |
gh_patches_debug_18853
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-6693
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve gzipped-FITS files detection
### Describe the feature
Currently the file detection for gzipped-FITS files relies on checking if they are named in the `filename.fits.gz` format. However since this condition isn't a necessity for gzipped files, this approach misses those gzipped-FITS files which do not end with the `.gz` extension.
### Proposed solution
Use magic bytes / file signatures to check whether the file is gzipped or not. If it is, decompress and read its first few bytes to determine whether it's a FITS file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/io/file_tools.py`
Content:
```
1 """
2 This module provides a generic file reader.
3 """
4 import os
5 import re
6 import pathlib
7
8 try:
9 from . import _fits as fits
10 except ImportError:
11 fits = None
12
13 try:
14 from . import jp2
15 except ImportError:
16 jp2 = None
17
18 try:
19 from . import ana
20 except ImportError:
21 ana = None
22
23
24 __all__ = ['read_file', 'read_file_header', 'write_file', 'detect_filetype']
25
26 # File formats supported by SunPy
27 _known_extensions = {
28 ('fts', 'fits'): 'fits',
29 ('jp2', 'j2k', 'jpc', 'jpt'): 'jp2',
30 ('fz', 'f0'): 'ana'
31 }
32
33
34 # Define a dict which raises a custom error message if the value is None
35 class Readers(dict):
36 def __init__(self, *args):
37 dict.__init__(self, *args)
38
39 def __getitem__(self, key):
40 val = dict.__getitem__(self, key)
41 if val is None:
42 raise ReaderError(f"The Reader sunpy.io.{key} is not available, "
43 "please check that you have the required dependencies "
44 "installed.")
45 return val
46
47
48 # Map the readers
49 _readers = Readers({
50 'fits': fits,
51 'jp2': jp2,
52 'ana': ana
53 })
54
55
56 def read_file(filepath, filetype=None, **kwargs):
57 """
58 Automatically determine the filetype and read the file.
59
60 Parameters
61 ----------
62 filepath : `str`, path-like
63 The file to be read.
64 filetype : `str`, optional
65 Supported reader or extension to manually specify the filetype.
66 Supported readers are ('jp2', 'fits', 'ana')
67 memmap : `bool`, optional
68 Should memory mapping be used, i.e. keep data on disk rather than in RAM.
69 This is currently only supported by the FITS reader.
70 **kwargs : `dict`
71 All extra keyword arguments are passed to ``.read`` for the file specific reader.
72
73 Returns
74 -------
75 pairs : `list`
76 A list of (data, header) tuples.
77 """
78 # Convert Path objects to strings as the filepath can also be a URL
79 filepath = str(filepath)
80 # Use the explicitly passed filetype
81 if filetype is not None:
82 return _readers[filetype].read(filepath, **kwargs)
83
84 # Go through the known extensions
85 for extension, readername in _known_extensions.items():
86 if filepath.endswith(extension) or filetype in extension:
87 return _readers[readername].read(filepath, **kwargs)
88
89 # If filetype is not apparent from the extension, attempt to detect it
90 readername = _detect_filetype(filepath)
91 return _readers[readername].read(filepath, **kwargs)
92
93
94 def read_file_header(filepath, filetype=None, **kwargs):
95 """
96 Reads the header from a given file.
97
98 This should always return a instance of `~sunpy.io.header.FileHeader`.
99
100 Parameters
101 ----------
102 filepath : `str`
103 The file from which the header is to be read.
104 filetype : `str`
105 Supported reader or extension to manually specify the filetype.
106 Supported readers are ('jp2', 'fits').
107 **kwargs : `dict`
108 All extra keyword arguments are passed to ``.get_header`` for the file specific reader.
109
110 Returns
111 -------
112 headers : `list`
113 A list of headers.
114 """
115 # Use the explicitly passed filetype
116 if filetype is not None:
117 return _readers[filetype].get_header(filepath, **kwargs)
118
119 # Go through the known extensions
120 for extension, readername in _known_extensions.items():
121 if filepath.endswith(extension) or filetype in extension:
122 return _readers[readername].get_header(filepath, **kwargs)
123
124 # If filetype is not apparent from the extension, attempt to detect it
125 readername = _detect_filetype(filepath)
126 return _readers[readername].get_header(filepath, **kwargs)
127
128
129 def write_file(fname, data, header, filetype='auto', **kwargs):
130 """
131 Write a file from a data & header pair using one of the defined file types.
132
133 Parameters
134 ----------
135 fname : `str`
136 Filename of file to save.
137 data : `numpy.ndarray`
138 Data to save to a fits file.
139 header : `collections.OrderedDict`
140 Meta data to save with the data.
141 filetype : `str`, {'auto', 'fits', 'jp2'}, optional
142 Filetype to save if ``auto`` the filename extension will
143 be detected, else specify a supported file extension.
144 **kwargs : `dict`
145 All extra keyword arguments are passed to ``.write`` for the file specific reader.
146
147 Notes
148 -----
149 * This routine currently only supports saving a single HDU.
150 """
151 if filetype == 'auto':
152 # Get the extension without the leading dot
153 filetype = pathlib.Path(fname).suffix[1:]
154
155 for extension, readername in _known_extensions.items():
156 if filetype in extension:
157 return _readers[readername].write(fname, data, header, **kwargs)
158
159 # Nothing has matched, report an error
160 raise ValueError(f"The filetype provided ({filetype}) is not supported")
161
162
163 def _detect_filetype(filepath):
164 """
165 Attempts to determine the type of data contained in a file and returns
166 the filetype if the available readers exist within sunpy.io
167
168 Parameters
169 ----------
170 filepath : `str`
171 Where the file is.
172
173 Returns
174 -------
175 filetype : `str`
176 The type of file.
177 """
178
179 if detect_filetype(filepath) in _readers.keys():
180 return detect_filetype(filepath)
181
182 # Raise an error if an unsupported filetype is encountered
183 raise UnrecognizedFileTypeError("The requested filetype is not currently "
184 "supported by SunPy.")
185
186
187 def detect_filetype(filepath):
188 """
189 Attempts to determine the type of file a given filepath is.
190
191 Parameters
192 ----------
193 filepath : `str`
194 Where the file is.
195
196 Returns
197 -------
198 filetype : `str`
199 The type of file.
200 """
201
202 # Open file and read in first two lines
203 with open(filepath, 'rb') as fp:
204 line1 = fp.readline()
205 line2 = fp.readline()
206 # Some FITS files do not have line breaks at the end of header cards.
207 fp.seek(0)
208 first80 = fp.read(80)
209 # first 8 bytes of netcdf4/hdf5 to determine filetype as have same sequence
210 fp.seek(0)
211 first_8bytes = fp.read(8)
212 # first 4 bytes of CDF
213 fp.seek(0)
214 cdf_magic_number = fp.read(4).hex()
215
216 # FITS
217 # Check the extensions to see if it is a gzipped FITS file
218 filepath_rest_ext1, ext1 = os.path.splitext(filepath)
219 _, ext2 = os.path.splitext(filepath_rest_ext1)
220
221 gzip_extensions = [".gz"]
222 fits_extensions = [".fts", ".fit", ".fits"]
223 if (ext1 in gzip_extensions and ext2 in fits_extensions):
224 return 'fits'
225
226 # Check for "KEY_WORD =" at beginning of file
227 match = re.match(br"[A-Z0-9_]{0,8} *=", first80)
228 if match is not None:
229 return 'fits'
230
231 # JPEG 2000
232 # Checks for one of two signatures found at beginning of all JP2 files.
233 # Adapted from ExifTool
234 # [1] https://www.sno.phy.queensu.ca/~phil/exiftool/
235 # [2] http://www.hlevkin.com/Standards/fcd15444-2.pdf
236 # [3] http://www.hlevkin.com/Standards/fcd15444-1.pdf
237 jp2_signatures = [b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a",
238 b"\x00\x00\x00\x0cjP\x1a\x1a\x0d\x0a\x87\x0a"]
239 for sig in jp2_signatures:
240 if line1 + line2 == sig:
241 return 'jp2'
242
243 # netcdf4 and hdf5 files
244 if first_8bytes == b'\x89HDF\r\n\x1a\n':
245 return 'hdf5'
246
247 if cdf_magic_number in ['cdf30001', 'cdf26002', '0000ffff']:
248 return 'cdf'
249
250 # Raise an error if an unsupported filetype is encountered
251 raise UnrecognizedFileTypeError("The requested filetype is not currently "
252 "supported by SunPy.")
253
254
255 class UnrecognizedFileTypeError(OSError):
256 """
257 Exception to raise when an unknown file type is encountered.
258 """
259
260
261 class ReaderError(ImportError):
262 """
263 Exception to raise when a reader errors.
264 """
265
266
267 class InvalidJPEG2000FileExtension(OSError):
268 """
269 Exception to raise when an invalid JPEG2000 file type is encountered.
270 """
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/io/file_tools.py b/sunpy/io/file_tools.py
--- a/sunpy/io/file_tools.py
+++ b/sunpy/io/file_tools.py
@@ -1,8 +1,8 @@
"""
This module provides a generic file reader.
"""
-import os
import re
+import gzip
import pathlib
try:
@@ -214,14 +214,11 @@
cdf_magic_number = fp.read(4).hex()
# FITS
- # Check the extensions to see if it is a gzipped FITS file
- filepath_rest_ext1, ext1 = os.path.splitext(filepath)
- _, ext2 = os.path.splitext(filepath_rest_ext1)
-
- gzip_extensions = [".gz"]
- fits_extensions = [".fts", ".fit", ".fits"]
- if (ext1 in gzip_extensions and ext2 in fits_extensions):
- return 'fits'
+ # Checks for gzip signature.
+ # If found, decompresses first few bytes and checks for FITS
+ if first80[:3] == b"\x1f\x8b\x08":
+ with gzip.open(filepath, 'rb') as fp:
+ first80 = fp.read(80)
# Check for "KEY_WORD =" at beginning of file
match = re.match(br"[A-Z0-9_]{0,8} *=", first80)
|
{"golden_diff": "diff --git a/sunpy/io/file_tools.py b/sunpy/io/file_tools.py\n--- a/sunpy/io/file_tools.py\n+++ b/sunpy/io/file_tools.py\n@@ -1,8 +1,8 @@\n \"\"\"\n This module provides a generic file reader.\n \"\"\"\n-import os\n import re\n+import gzip\n import pathlib\n \n try:\n@@ -214,14 +214,11 @@\n cdf_magic_number = fp.read(4).hex()\n \n # FITS\n- # Check the extensions to see if it is a gzipped FITS file\n- filepath_rest_ext1, ext1 = os.path.splitext(filepath)\n- _, ext2 = os.path.splitext(filepath_rest_ext1)\n-\n- gzip_extensions = [\".gz\"]\n- fits_extensions = [\".fts\", \".fit\", \".fits\"]\n- if (ext1 in gzip_extensions and ext2 in fits_extensions):\n- return 'fits'\n+ # Checks for gzip signature.\n+ # If found, decompresses first few bytes and checks for FITS\n+ if first80[:3] == b\"\\x1f\\x8b\\x08\":\n+ with gzip.open(filepath, 'rb') as fp:\n+ first80 = fp.read(80)\n \n # Check for \"KEY_WORD =\" at beginning of file\n match = re.match(br\"[A-Z0-9_]{0,8} *=\", first80)\n", "issue": "improve gzipped-FITS files detection\n### Describe the feature\n\nCurrently the file detection for gzipped-FITS files relies on checking if they are named in the `filename.fits.gz` format. However since this condition isn't a necessity for gzipped files, this approach misses those gzipped-FITS files which do not end with the `.gz` extension.\n\n### Proposed solution\n\nUse magic bytes / file signatures to check whether the file is gzipped or not. If it is, decompress and read its first few bytes to determine whether it's a FITS file.\n", "before_files": [{"content": "\"\"\"\nThis module provides a generic file reader.\n\"\"\"\nimport os\nimport re\nimport pathlib\n\ntry:\n from . import _fits as fits\nexcept ImportError:\n fits = None\n\ntry:\n from . import jp2\nexcept ImportError:\n jp2 = None\n\ntry:\n from . import ana\nexcept ImportError:\n ana = None\n\n\n__all__ = ['read_file', 'read_file_header', 'write_file', 'detect_filetype']\n\n# File formats supported by SunPy\n_known_extensions = {\n ('fts', 'fits'): 'fits',\n ('jp2', 'j2k', 'jpc', 'jpt'): 'jp2',\n ('fz', 'f0'): 'ana'\n}\n\n\n# Define a dict which raises a custom error message if the value is None\nclass Readers(dict):\n def __init__(self, *args):\n dict.__init__(self, *args)\n\n def __getitem__(self, key):\n val = dict.__getitem__(self, key)\n if val is None:\n raise ReaderError(f\"The Reader sunpy.io.{key} is not available, \"\n \"please check that you have the required dependencies \"\n \"installed.\")\n return val\n\n\n# Map the readers\n_readers = Readers({\n 'fits': fits,\n 'jp2': jp2,\n 'ana': ana\n})\n\n\ndef read_file(filepath, filetype=None, **kwargs):\n \"\"\"\n Automatically determine the filetype and read the file.\n\n Parameters\n ----------\n filepath : `str`, path-like\n The file to be read.\n filetype : `str`, optional\n Supported reader or extension to manually specify the filetype.\n Supported readers are ('jp2', 'fits', 'ana')\n memmap : `bool`, optional\n Should memory mapping be used, i.e. keep data on disk rather than in RAM.\n This is currently only supported by the FITS reader.\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.read`` for the file specific reader.\n\n Returns\n -------\n pairs : `list`\n A list of (data, header) tuples.\n \"\"\"\n # Convert Path objects to strings as the filepath can also be a URL\n filepath = str(filepath)\n # Use the explicitly passed filetype\n if filetype is not None:\n return _readers[filetype].read(filepath, **kwargs)\n\n # Go through the known extensions\n for extension, readername in _known_extensions.items():\n if filepath.endswith(extension) or filetype in extension:\n return _readers[readername].read(filepath, **kwargs)\n\n # If filetype is not apparent from the extension, attempt to detect it\n readername = _detect_filetype(filepath)\n return _readers[readername].read(filepath, **kwargs)\n\n\ndef read_file_header(filepath, filetype=None, **kwargs):\n \"\"\"\n Reads the header from a given file.\n\n This should always return a instance of `~sunpy.io.header.FileHeader`.\n\n Parameters\n ----------\n filepath : `str`\n The file from which the header is to be read.\n filetype : `str`\n Supported reader or extension to manually specify the filetype.\n Supported readers are ('jp2', 'fits').\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.get_header`` for the file specific reader.\n\n Returns\n -------\n headers : `list`\n A list of headers.\n \"\"\"\n # Use the explicitly passed filetype\n if filetype is not None:\n return _readers[filetype].get_header(filepath, **kwargs)\n\n # Go through the known extensions\n for extension, readername in _known_extensions.items():\n if filepath.endswith(extension) or filetype in extension:\n return _readers[readername].get_header(filepath, **kwargs)\n\n # If filetype is not apparent from the extension, attempt to detect it\n readername = _detect_filetype(filepath)\n return _readers[readername].get_header(filepath, **kwargs)\n\n\ndef write_file(fname, data, header, filetype='auto', **kwargs):\n \"\"\"\n Write a file from a data & header pair using one of the defined file types.\n\n Parameters\n ----------\n fname : `str`\n Filename of file to save.\n data : `numpy.ndarray`\n Data to save to a fits file.\n header : `collections.OrderedDict`\n Meta data to save with the data.\n filetype : `str`, {'auto', 'fits', 'jp2'}, optional\n Filetype to save if ``auto`` the filename extension will\n be detected, else specify a supported file extension.\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.write`` for the file specific reader.\n\n Notes\n -----\n * This routine currently only supports saving a single HDU.\n \"\"\"\n if filetype == 'auto':\n # Get the extension without the leading dot\n filetype = pathlib.Path(fname).suffix[1:]\n\n for extension, readername in _known_extensions.items():\n if filetype in extension:\n return _readers[readername].write(fname, data, header, **kwargs)\n\n # Nothing has matched, report an error\n raise ValueError(f\"The filetype provided ({filetype}) is not supported\")\n\n\ndef _detect_filetype(filepath):\n \"\"\"\n Attempts to determine the type of data contained in a file and returns\n the filetype if the available readers exist within sunpy.io\n\n Parameters\n ----------\n filepath : `str`\n Where the file is.\n\n Returns\n -------\n filetype : `str`\n The type of file.\n \"\"\"\n\n if detect_filetype(filepath) in _readers.keys():\n return detect_filetype(filepath)\n\n # Raise an error if an unsupported filetype is encountered\n raise UnrecognizedFileTypeError(\"The requested filetype is not currently \"\n \"supported by SunPy.\")\n\n\ndef detect_filetype(filepath):\n \"\"\"\n Attempts to determine the type of file a given filepath is.\n\n Parameters\n ----------\n filepath : `str`\n Where the file is.\n\n Returns\n -------\n filetype : `str`\n The type of file.\n \"\"\"\n\n # Open file and read in first two lines\n with open(filepath, 'rb') as fp:\n line1 = fp.readline()\n line2 = fp.readline()\n # Some FITS files do not have line breaks at the end of header cards.\n fp.seek(0)\n first80 = fp.read(80)\n # first 8 bytes of netcdf4/hdf5 to determine filetype as have same sequence\n fp.seek(0)\n first_8bytes = fp.read(8)\n # first 4 bytes of CDF\n fp.seek(0)\n cdf_magic_number = fp.read(4).hex()\n\n # FITS\n # Check the extensions to see if it is a gzipped FITS file\n filepath_rest_ext1, ext1 = os.path.splitext(filepath)\n _, ext2 = os.path.splitext(filepath_rest_ext1)\n\n gzip_extensions = [\".gz\"]\n fits_extensions = [\".fts\", \".fit\", \".fits\"]\n if (ext1 in gzip_extensions and ext2 in fits_extensions):\n return 'fits'\n\n # Check for \"KEY_WORD =\" at beginning of file\n match = re.match(br\"[A-Z0-9_]{0,8} *=\", first80)\n if match is not None:\n return 'fits'\n\n # JPEG 2000\n # Checks for one of two signatures found at beginning of all JP2 files.\n # Adapted from ExifTool\n # [1] https://www.sno.phy.queensu.ca/~phil/exiftool/\n # [2] http://www.hlevkin.com/Standards/fcd15444-2.pdf\n # [3] http://www.hlevkin.com/Standards/fcd15444-1.pdf\n jp2_signatures = [b\"\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a\",\n b\"\\x00\\x00\\x00\\x0cjP\\x1a\\x1a\\x0d\\x0a\\x87\\x0a\"]\n for sig in jp2_signatures:\n if line1 + line2 == sig:\n return 'jp2'\n\n # netcdf4 and hdf5 files\n if first_8bytes == b'\\x89HDF\\r\\n\\x1a\\n':\n return 'hdf5'\n\n if cdf_magic_number in ['cdf30001', 'cdf26002', '0000ffff']:\n return 'cdf'\n\n # Raise an error if an unsupported filetype is encountered\n raise UnrecognizedFileTypeError(\"The requested filetype is not currently \"\n \"supported by SunPy.\")\n\n\nclass UnrecognizedFileTypeError(OSError):\n \"\"\"\n Exception to raise when an unknown file type is encountered.\n \"\"\"\n\n\nclass ReaderError(ImportError):\n \"\"\"\n Exception to raise when a reader errors.\n \"\"\"\n\n\nclass InvalidJPEG2000FileExtension(OSError):\n \"\"\"\n Exception to raise when an invalid JPEG2000 file type is encountered.\n \"\"\"\n", "path": "sunpy/io/file_tools.py"}], "after_files": [{"content": "\"\"\"\nThis module provides a generic file reader.\n\"\"\"\nimport re\nimport gzip\nimport pathlib\n\ntry:\n from . import _fits as fits\nexcept ImportError:\n fits = None\n\ntry:\n from . import jp2\nexcept ImportError:\n jp2 = None\n\ntry:\n from . import ana\nexcept ImportError:\n ana = None\n\n\n__all__ = ['read_file', 'read_file_header', 'write_file', 'detect_filetype']\n\n# File formats supported by SunPy\n_known_extensions = {\n ('fts', 'fits'): 'fits',\n ('jp2', 'j2k', 'jpc', 'jpt'): 'jp2',\n ('fz', 'f0'): 'ana'\n}\n\n\n# Define a dict which raises a custom error message if the value is None\nclass Readers(dict):\n def __init__(self, *args):\n dict.__init__(self, *args)\n\n def __getitem__(self, key):\n val = dict.__getitem__(self, key)\n if val is None:\n raise ReaderError(f\"The Reader sunpy.io.{key} is not available, \"\n \"please check that you have the required dependencies \"\n \"installed.\")\n return val\n\n\n# Map the readers\n_readers = Readers({\n 'fits': fits,\n 'jp2': jp2,\n 'ana': ana\n})\n\n\ndef read_file(filepath, filetype=None, **kwargs):\n \"\"\"\n Automatically determine the filetype and read the file.\n\n Parameters\n ----------\n filepath : `str`, path-like\n The file to be read.\n filetype : `str`, optional\n Supported reader or extension to manually specify the filetype.\n Supported readers are ('jp2', 'fits', 'ana')\n memmap : `bool`, optional\n Should memory mapping be used, i.e. keep data on disk rather than in RAM.\n This is currently only supported by the FITS reader.\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.read`` for the file specific reader.\n\n Returns\n -------\n pairs : `list`\n A list of (data, header) tuples.\n \"\"\"\n # Convert Path objects to strings as the filepath can also be a URL\n filepath = str(filepath)\n # Use the explicitly passed filetype\n if filetype is not None:\n return _readers[filetype].read(filepath, **kwargs)\n\n # Go through the known extensions\n for extension, readername in _known_extensions.items():\n if filepath.endswith(extension) or filetype in extension:\n return _readers[readername].read(filepath, **kwargs)\n\n # If filetype is not apparent from the extension, attempt to detect it\n readername = _detect_filetype(filepath)\n return _readers[readername].read(filepath, **kwargs)\n\n\ndef read_file_header(filepath, filetype=None, **kwargs):\n \"\"\"\n Reads the header from a given file.\n\n This should always return a instance of `~sunpy.io.header.FileHeader`.\n\n Parameters\n ----------\n filepath : `str`\n The file from which the header is to be read.\n filetype : `str`\n Supported reader or extension to manually specify the filetype.\n Supported readers are ('jp2', 'fits').\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.get_header`` for the file specific reader.\n\n Returns\n -------\n headers : `list`\n A list of headers.\n \"\"\"\n # Use the explicitly passed filetype\n if filetype is not None:\n return _readers[filetype].get_header(filepath, **kwargs)\n\n # Go through the known extensions\n for extension, readername in _known_extensions.items():\n if filepath.endswith(extension) or filetype in extension:\n return _readers[readername].get_header(filepath, **kwargs)\n\n # If filetype is not apparent from the extension, attempt to detect it\n readername = _detect_filetype(filepath)\n return _readers[readername].get_header(filepath, **kwargs)\n\n\ndef write_file(fname, data, header, filetype='auto', **kwargs):\n \"\"\"\n Write a file from a data & header pair using one of the defined file types.\n\n Parameters\n ----------\n fname : `str`\n Filename of file to save.\n data : `numpy.ndarray`\n Data to save to a fits file.\n header : `collections.OrderedDict`\n Meta data to save with the data.\n filetype : `str`, {'auto', 'fits', 'jp2'}, optional\n Filetype to save if ``auto`` the filename extension will\n be detected, else specify a supported file extension.\n **kwargs : `dict`\n All extra keyword arguments are passed to ``.write`` for the file specific reader.\n\n Notes\n -----\n * This routine currently only supports saving a single HDU.\n \"\"\"\n if filetype == 'auto':\n # Get the extension without the leading dot\n filetype = pathlib.Path(fname).suffix[1:]\n\n for extension, readername in _known_extensions.items():\n if filetype in extension:\n return _readers[readername].write(fname, data, header, **kwargs)\n\n # Nothing has matched, report an error\n raise ValueError(f\"The filetype provided ({filetype}) is not supported\")\n\n\ndef _detect_filetype(filepath):\n \"\"\"\n Attempts to determine the type of data contained in a file and returns\n the filetype if the available readers exist within sunpy.io\n\n Parameters\n ----------\n filepath : `str`\n Where the file is.\n\n Returns\n -------\n filetype : `str`\n The type of file.\n \"\"\"\n\n if detect_filetype(filepath) in _readers.keys():\n return detect_filetype(filepath)\n\n # Raise an error if an unsupported filetype is encountered\n raise UnrecognizedFileTypeError(\"The requested filetype is not currently \"\n \"supported by SunPy.\")\n\n\ndef detect_filetype(filepath):\n \"\"\"\n Attempts to determine the type of file a given filepath is.\n\n Parameters\n ----------\n filepath : `str`\n Where the file is.\n\n Returns\n -------\n filetype : `str`\n The type of file.\n \"\"\"\n\n # Open file and read in first two lines\n with open(filepath, 'rb') as fp:\n line1 = fp.readline()\n line2 = fp.readline()\n # Some FITS files do not have line breaks at the end of header cards.\n fp.seek(0)\n first80 = fp.read(80)\n # first 8 bytes of netcdf4/hdf5 to determine filetype as have same sequence\n fp.seek(0)\n first_8bytes = fp.read(8)\n # first 4 bytes of CDF\n fp.seek(0)\n cdf_magic_number = fp.read(4).hex()\n\n # FITS\n # Checks for gzip signature.\n # If found, decompresses first few bytes and checks for FITS\n if first80[:3] == b\"\\x1f\\x8b\\x08\":\n with gzip.open(filepath, 'rb') as fp:\n first80 = fp.read(80)\n\n # Check for \"KEY_WORD =\" at beginning of file\n match = re.match(br\"[A-Z0-9_]{0,8} *=\", first80)\n if match is not None:\n return 'fits'\n\n # JPEG 2000\n # Checks for one of two signatures found at beginning of all JP2 files.\n # Adapted from ExifTool\n # [1] https://www.sno.phy.queensu.ca/~phil/exiftool/\n # [2] http://www.hlevkin.com/Standards/fcd15444-2.pdf\n # [3] http://www.hlevkin.com/Standards/fcd15444-1.pdf\n jp2_signatures = [b\"\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a\",\n b\"\\x00\\x00\\x00\\x0cjP\\x1a\\x1a\\x0d\\x0a\\x87\\x0a\"]\n for sig in jp2_signatures:\n if line1 + line2 == sig:\n return 'jp2'\n\n # netcdf4 and hdf5 files\n if first_8bytes == b'\\x89HDF\\r\\n\\x1a\\n':\n return 'hdf5'\n\n if cdf_magic_number in ['cdf30001', 'cdf26002', '0000ffff']:\n return 'cdf'\n\n # Raise an error if an unsupported filetype is encountered\n raise UnrecognizedFileTypeError(\"The requested filetype is not currently \"\n \"supported by SunPy.\")\n\n\nclass UnrecognizedFileTypeError(OSError):\n \"\"\"\n Exception to raise when an unknown file type is encountered.\n \"\"\"\n\n\nclass ReaderError(ImportError):\n \"\"\"\n Exception to raise when a reader errors.\n \"\"\"\n\n\nclass InvalidJPEG2000FileExtension(OSError):\n \"\"\"\n Exception to raise when an invalid JPEG2000 file type is encountered.\n \"\"\"\n", "path": "sunpy/io/file_tools.py"}]}
| 3,100 | 316 |
gh_patches_debug_27395
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1060
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stdin for templates is broken
As reported here https://github.com/awslabs/aws-cfn-lint-visual-studio-code/issues/47 cfn-lint has broken the ability to use stdin to provide templates.
The result is the help documentation is provided
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/core.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import logging
18 import os
19 from jsonschema.exceptions import ValidationError
20 from cfnlint import RulesCollection
21 import cfnlint.config
22 import cfnlint.formatters
23 import cfnlint.decode
24 import cfnlint.maintenance
25 from cfnlint.helpers import REGIONS
26
27
28 LOGGER = logging.getLogger('cfnlint')
29 DEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')
30
31
32 class CfnLintExitException(Exception):
33 """Generic exception used when the cli should exit"""
34 def __init__(self, msg=None, exit_code=1):
35 if msg is None:
36 msg = 'process failed with exit code %s' % exit_code
37 super(CfnLintExitException, self).__init__(msg)
38 self.exit_code = exit_code
39
40
41 class InvalidRegionException(CfnLintExitException):
42 """When an unsupported/invalid region is supplied"""
43
44
45 class UnexpectedRuleException(CfnLintExitException):
46 """When processing a rule fails in an unexpected way"""
47
48
49 def run_cli(filename, template, rules, regions, override_spec):
50 """Process args and run"""
51
52 if override_spec:
53 cfnlint.helpers.override_specs(override_spec)
54
55 return run_checks(filename, template, rules, regions)
56
57
58 def get_exit_code(matches):
59 """ Determine exit code """
60 exit_code = 0
61 for match in matches:
62 if match.rule.id[0] == 'I':
63 exit_code = exit_code | 8
64 elif match.rule.id[0] == 'W':
65 exit_code = exit_code | 4
66 elif match.rule.id[0] == 'E':
67 exit_code = exit_code | 2
68
69 return exit_code
70
71
72 def get_formatter(fmt):
73 """ Get Formatter"""
74 formatter = {}
75 if fmt:
76 if fmt == 'quiet':
77 formatter = cfnlint.formatters.QuietFormatter()
78 elif fmt == 'parseable':
79 # pylint: disable=bad-option-value
80 formatter = cfnlint.formatters.ParseableFormatter()
81 elif fmt == 'json':
82 formatter = cfnlint.formatters.JsonFormatter()
83 else:
84 formatter = cfnlint.formatters.Formatter()
85
86 return formatter
87
88
89 def get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):
90 """Get rules"""
91 rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)
92 rules_dirs = [DEFAULT_RULESDIR] + rulesdir
93 try:
94 for rules_dir in rules_dirs:
95 rules.create_from_directory(rules_dir)
96 except OSError as e:
97 raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)
98 return rules
99
100
101 def configure_logging(debug_logging):
102 """ Backwards compatibility for integrators """
103 LOGGER.info('Update your integrations to use "cfnlint.config.configure_logging" instead')
104 cfnlint.config.configure_logging(debug_logging, False)
105
106
107 def get_args_filenames(cli_args):
108 """ Get Template Configuration items and set them as default values"""
109 try:
110 config = cfnlint.config.ConfigMixIn(cli_args)
111 except ValidationError as e:
112 LOGGER.error('Error parsing config file: %s', str(e))
113 exit(1)
114
115 fmt = config.format
116 formatter = get_formatter(fmt)
117
118 if config.update_specs:
119 cfnlint.maintenance.update_resource_specs()
120 exit(0)
121
122 if config.update_documentation:
123 # Get ALL rules (ignore the CLI settings))
124 documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)
125 cfnlint.maintenance.update_documentation(documentation_rules)
126 exit(0)
127
128 if config.update_iam_policies:
129 cfnlint.maintenance.update_iam_policies()
130 exit(0)
131
132 if config.listrules:
133 rules = cfnlint.core.get_rules(
134 config.append_rules,
135 config.ignore_checks,
136 config.include_checks,
137 config.configure_rules
138 )
139 print(rules)
140 exit(0)
141
142 if not config.templates:
143 # Not specified, print the help
144 config.parser.print_help()
145 exit(1)
146
147 return(config, config.templates, formatter)
148
149
150 def get_template_rules(filename, args):
151 """ Get Template Configuration items and set them as default values"""
152
153 (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)
154
155 if matches:
156 return(template, [], matches)
157
158 args.template_args = template
159
160 rules = cfnlint.core.get_rules(
161 args.append_rules,
162 args.ignore_checks,
163 args.include_checks,
164 args.configure_rules,
165 args.include_experimental,
166 )
167
168 return(template, rules, [])
169
170
171 def run_checks(filename, template, rules, regions):
172 """Run Checks against the template"""
173 if regions:
174 if not set(regions).issubset(set(REGIONS)):
175 unsupported_regions = list(set(regions).difference(set(REGIONS)))
176 msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)
177 raise InvalidRegionException(msg, 32)
178
179 matches = []
180
181 runner = cfnlint.Runner(rules, filename, template, regions)
182 matches.extend(runner.transform())
183 # Only do rule analysis if Transform was successful
184 if not matches:
185 try:
186 matches.extend(runner.run())
187 except Exception as err: # pylint: disable=W0703
188 msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))
189 UnexpectedRuleException(msg, 1)
190 matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))
191
192 return(matches)
193
```
Path: `src/cfnlint/rules/templates/LimitSize.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import os
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20 from cfnlint.helpers import LIMITS
21 try: # pragma: no cover
22 from pathlib import Path
23 except ImportError: # pragma: no cover
24 from pathlib2 import Path
25
26
27
28 class LimitSize(CloudFormationLintRule):
29 """Check Template Size"""
30 id = 'E1002'
31 shortdesc = 'Template size limit'
32 description = 'Check the size of the template is less than the upper limit'
33 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'
34 tags = ['limits']
35
36 def match(self, cfn):
37 """Basic Matching"""
38 matches = []
39
40 # Check number of resources against the defined limit
41 filename = cfn.filename
42
43 # Only check if the file exists. The template could be passed in using stdIn
44 if Path(filename).is_file():
45 statinfo = os.stat(filename)
46 if statinfo.st_size > LIMITS['template']['body']:
47 message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'
48 matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))
49
50 return matches
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py
--- a/src/cfnlint/core.py
+++ b/src/cfnlint/core.py
@@ -16,6 +16,7 @@
"""
import logging
import os
+import sys
from jsonschema.exceptions import ValidationError
from cfnlint import RulesCollection
import cfnlint.config
@@ -139,6 +140,9 @@
print(rules)
exit(0)
+ if not sys.stdin.isatty():
+ return(config, [None], formatter)
+
if not config.templates:
# Not specified, print the help
config.parser.print_help()
diff --git a/src/cfnlint/rules/templates/LimitSize.py b/src/cfnlint/rules/templates/LimitSize.py
--- a/src/cfnlint/rules/templates/LimitSize.py
+++ b/src/cfnlint/rules/templates/LimitSize.py
@@ -41,10 +41,11 @@
filename = cfn.filename
# Only check if the file exists. The template could be passed in using stdIn
- if Path(filename).is_file():
- statinfo = os.stat(filename)
- if statinfo.st_size > LIMITS['template']['body']:
- message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'
- matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))
+ if filename:
+ if Path(filename).is_file():
+ statinfo = os.stat(filename)
+ if statinfo.st_size > LIMITS['template']['body']:
+ message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'
+ matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py\n--- a/src/cfnlint/core.py\n+++ b/src/cfnlint/core.py\n@@ -16,6 +16,7 @@\n \"\"\"\n import logging\n import os\n+import sys\n from jsonschema.exceptions import ValidationError\n from cfnlint import RulesCollection\n import cfnlint.config\n@@ -139,6 +140,9 @@\n print(rules)\n exit(0)\n \n+ if not sys.stdin.isatty():\n+ return(config, [None], formatter)\n+\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\ndiff --git a/src/cfnlint/rules/templates/LimitSize.py b/src/cfnlint/rules/templates/LimitSize.py\n--- a/src/cfnlint/rules/templates/LimitSize.py\n+++ b/src/cfnlint/rules/templates/LimitSize.py\n@@ -41,10 +41,11 @@\n filename = cfn.filename\n \n # Only check if the file exists. The template could be passed in using stdIn\n- if Path(filename).is_file():\n- statinfo = os.stat(filename)\n- if statinfo.st_size > LIMITS['template']['body']:\n- message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'\n- matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))\n+ if filename:\n+ if Path(filename).is_file():\n+ statinfo = os.stat(filename)\n+ if statinfo.st_size > LIMITS['template']['body']:\n+ message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'\n+ matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))\n \n return matches\n", "issue": "Stdin for templates is broken\nAs reported here https://github.com/awslabs/aws-cfn-lint-visual-studio-code/issues/47 cfn-lint has broken the ability to use stdin to provide templates. \r\n\r\nThe result is the help documentation is provided\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport os\nfrom jsonschema.exceptions import ValidationError\nfrom cfnlint import RulesCollection\nimport cfnlint.config\nimport cfnlint.formatters\nimport cfnlint.decode\nimport cfnlint.maintenance\nfrom cfnlint.helpers import REGIONS\n\n\nLOGGER = logging.getLogger('cfnlint')\nDEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')\n\n\nclass CfnLintExitException(Exception):\n \"\"\"Generic exception used when the cli should exit\"\"\"\n def __init__(self, msg=None, exit_code=1):\n if msg is None:\n msg = 'process failed with exit code %s' % exit_code\n super(CfnLintExitException, self).__init__(msg)\n self.exit_code = exit_code\n\n\nclass InvalidRegionException(CfnLintExitException):\n \"\"\"When an unsupported/invalid region is supplied\"\"\"\n\n\nclass UnexpectedRuleException(CfnLintExitException):\n \"\"\"When processing a rule fails in an unexpected way\"\"\"\n\n\ndef run_cli(filename, template, rules, regions, override_spec):\n \"\"\"Process args and run\"\"\"\n\n if override_spec:\n cfnlint.helpers.override_specs(override_spec)\n\n return run_checks(filename, template, rules, regions)\n\n\ndef get_exit_code(matches):\n \"\"\" Determine exit code \"\"\"\n exit_code = 0\n for match in matches:\n if match.rule.id[0] == 'I':\n exit_code = exit_code | 8\n elif match.rule.id[0] == 'W':\n exit_code = exit_code | 4\n elif match.rule.id[0] == 'E':\n exit_code = exit_code | 2\n\n return exit_code\n\n\ndef get_formatter(fmt):\n \"\"\" Get Formatter\"\"\"\n formatter = {}\n if fmt:\n if fmt == 'quiet':\n formatter = cfnlint.formatters.QuietFormatter()\n elif fmt == 'parseable':\n # pylint: disable=bad-option-value\n formatter = cfnlint.formatters.ParseableFormatter()\n elif fmt == 'json':\n formatter = cfnlint.formatters.JsonFormatter()\n else:\n formatter = cfnlint.formatters.Formatter()\n\n return formatter\n\n\ndef get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):\n \"\"\"Get rules\"\"\"\n rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)\n rules_dirs = [DEFAULT_RULESDIR] + rulesdir\n try:\n for rules_dir in rules_dirs:\n rules.create_from_directory(rules_dir)\n except OSError as e:\n raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)\n return rules\n\n\ndef configure_logging(debug_logging):\n \"\"\" Backwards compatibility for integrators \"\"\"\n LOGGER.info('Update your integrations to use \"cfnlint.config.configure_logging\" instead')\n cfnlint.config.configure_logging(debug_logging, False)\n\n\ndef get_args_filenames(cli_args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n try:\n config = cfnlint.config.ConfigMixIn(cli_args)\n except ValidationError as e:\n LOGGER.error('Error parsing config file: %s', str(e))\n exit(1)\n\n fmt = config.format\n formatter = get_formatter(fmt)\n\n if config.update_specs:\n cfnlint.maintenance.update_resource_specs()\n exit(0)\n\n if config.update_documentation:\n # Get ALL rules (ignore the CLI settings))\n documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)\n cfnlint.maintenance.update_documentation(documentation_rules)\n exit(0)\n\n if config.update_iam_policies:\n cfnlint.maintenance.update_iam_policies()\n exit(0)\n\n if config.listrules:\n rules = cfnlint.core.get_rules(\n config.append_rules,\n config.ignore_checks,\n config.include_checks,\n config.configure_rules\n )\n print(rules)\n exit(0)\n\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\n exit(1)\n\n return(config, config.templates, formatter)\n\n\ndef get_template_rules(filename, args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n\n (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)\n\n if matches:\n return(template, [], matches)\n\n args.template_args = template\n\n rules = cfnlint.core.get_rules(\n args.append_rules,\n args.ignore_checks,\n args.include_checks,\n args.configure_rules,\n args.include_experimental,\n )\n\n return(template, rules, [])\n\n\ndef run_checks(filename, template, rules, regions):\n \"\"\"Run Checks against the template\"\"\"\n if regions:\n if not set(regions).issubset(set(REGIONS)):\n unsupported_regions = list(set(regions).difference(set(REGIONS)))\n msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)\n raise InvalidRegionException(msg, 32)\n\n matches = []\n\n runner = cfnlint.Runner(rules, filename, template, regions)\n matches.extend(runner.transform())\n # Only do rule analysis if Transform was successful\n if not matches:\n try:\n matches.extend(runner.run())\n except Exception as err: # pylint: disable=W0703\n msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))\n UnexpectedRuleException(msg, 1)\n matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))\n\n return(matches)\n", "path": "src/cfnlint/core.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import LIMITS\ntry: # pragma: no cover\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n from pathlib2 import Path\n\n\n\nclass LimitSize(CloudFormationLintRule):\n \"\"\"Check Template Size\"\"\"\n id = 'E1002'\n shortdesc = 'Template size limit'\n description = 'Check the size of the template is less than the upper limit'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'\n tags = ['limits']\n\n def match(self, cfn):\n \"\"\"Basic Matching\"\"\"\n matches = []\n\n # Check number of resources against the defined limit\n filename = cfn.filename\n\n # Only check if the file exists. The template could be passed in using stdIn\n if Path(filename).is_file():\n statinfo = os.stat(filename)\n if statinfo.st_size > LIMITS['template']['body']:\n message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'\n matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))\n\n return matches\n", "path": "src/cfnlint/rules/templates/LimitSize.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom jsonschema.exceptions import ValidationError\nfrom cfnlint import RulesCollection\nimport cfnlint.config\nimport cfnlint.formatters\nimport cfnlint.decode\nimport cfnlint.maintenance\nfrom cfnlint.helpers import REGIONS\n\n\nLOGGER = logging.getLogger('cfnlint')\nDEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')\n\n\nclass CfnLintExitException(Exception):\n \"\"\"Generic exception used when the cli should exit\"\"\"\n def __init__(self, msg=None, exit_code=1):\n if msg is None:\n msg = 'process failed with exit code %s' % exit_code\n super(CfnLintExitException, self).__init__(msg)\n self.exit_code = exit_code\n\n\nclass InvalidRegionException(CfnLintExitException):\n \"\"\"When an unsupported/invalid region is supplied\"\"\"\n\n\nclass UnexpectedRuleException(CfnLintExitException):\n \"\"\"When processing a rule fails in an unexpected way\"\"\"\n\n\ndef run_cli(filename, template, rules, regions, override_spec):\n \"\"\"Process args and run\"\"\"\n\n if override_spec:\n cfnlint.helpers.override_specs(override_spec)\n\n return run_checks(filename, template, rules, regions)\n\n\ndef get_exit_code(matches):\n \"\"\" Determine exit code \"\"\"\n exit_code = 0\n for match in matches:\n if match.rule.id[0] == 'I':\n exit_code = exit_code | 8\n elif match.rule.id[0] == 'W':\n exit_code = exit_code | 4\n elif match.rule.id[0] == 'E':\n exit_code = exit_code | 2\n\n return exit_code\n\n\ndef get_formatter(fmt):\n \"\"\" Get Formatter\"\"\"\n formatter = {}\n if fmt:\n if fmt == 'quiet':\n formatter = cfnlint.formatters.QuietFormatter()\n elif fmt == 'parseable':\n # pylint: disable=bad-option-value\n formatter = cfnlint.formatters.ParseableFormatter()\n elif fmt == 'json':\n formatter = cfnlint.formatters.JsonFormatter()\n else:\n formatter = cfnlint.formatters.Formatter()\n\n return formatter\n\n\ndef get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):\n \"\"\"Get rules\"\"\"\n rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)\n rules_dirs = [DEFAULT_RULESDIR] + rulesdir\n try:\n for rules_dir in rules_dirs:\n rules.create_from_directory(rules_dir)\n except OSError as e:\n raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)\n return rules\n\n\ndef configure_logging(debug_logging):\n \"\"\" Backwards compatibility for integrators \"\"\"\n LOGGER.info('Update your integrations to use \"cfnlint.config.configure_logging\" instead')\n cfnlint.config.configure_logging(debug_logging, False)\n\n\ndef get_args_filenames(cli_args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n try:\n config = cfnlint.config.ConfigMixIn(cli_args)\n except ValidationError as e:\n LOGGER.error('Error parsing config file: %s', str(e))\n exit(1)\n\n fmt = config.format\n formatter = get_formatter(fmt)\n\n if config.update_specs:\n cfnlint.maintenance.update_resource_specs()\n exit(0)\n\n if config.update_documentation:\n # Get ALL rules (ignore the CLI settings))\n documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)\n cfnlint.maintenance.update_documentation(documentation_rules)\n exit(0)\n\n if config.update_iam_policies:\n cfnlint.maintenance.update_iam_policies()\n exit(0)\n\n if config.listrules:\n rules = cfnlint.core.get_rules(\n config.append_rules,\n config.ignore_checks,\n config.include_checks,\n config.configure_rules\n )\n print(rules)\n exit(0)\n\n if not sys.stdin.isatty():\n return(config, [None], formatter)\n\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\n exit(1)\n\n return(config, config.templates, formatter)\n\n\ndef get_template_rules(filename, args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n\n (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)\n\n if matches:\n return(template, [], matches)\n\n args.template_args = template\n\n rules = cfnlint.core.get_rules(\n args.append_rules,\n args.ignore_checks,\n args.include_checks,\n args.configure_rules,\n args.include_experimental,\n )\n\n return(template, rules, [])\n\n\ndef run_checks(filename, template, rules, regions):\n \"\"\"Run Checks against the template\"\"\"\n if regions:\n if not set(regions).issubset(set(REGIONS)):\n unsupported_regions = list(set(regions).difference(set(REGIONS)))\n msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)\n raise InvalidRegionException(msg, 32)\n\n matches = []\n\n runner = cfnlint.Runner(rules, filename, template, regions)\n matches.extend(runner.transform())\n # Only do rule analysis if Transform was successful\n if not matches:\n try:\n matches.extend(runner.run())\n except Exception as err: # pylint: disable=W0703\n msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))\n UnexpectedRuleException(msg, 1)\n matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))\n\n return(matches)\n", "path": "src/cfnlint/core.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import LIMITS\ntry: # pragma: no cover\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n from pathlib2 import Path\n\n\n\nclass LimitSize(CloudFormationLintRule):\n \"\"\"Check Template Size\"\"\"\n id = 'E1002'\n shortdesc = 'Template size limit'\n description = 'Check the size of the template is less than the upper limit'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'\n tags = ['limits']\n\n def match(self, cfn):\n \"\"\"Basic Matching\"\"\"\n matches = []\n\n # Check number of resources against the defined limit\n filename = cfn.filename\n\n # Only check if the file exists. The template could be passed in using stdIn\n if filename:\n if Path(filename).is_file():\n statinfo = os.stat(filename)\n if statinfo.st_size > LIMITS['template']['body']:\n message = 'The template file size ({0} bytes) exceeds the limit ({1} bytes)'\n matches.append(RuleMatch(['Template'], message.format(statinfo.st_size, LIMITS['template']['body'])))\n\n return matches\n", "path": "src/cfnlint/rules/templates/LimitSize.py"}]}
| 2,839 | 407 |
gh_patches_debug_29330
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-4987
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
STS calls to use regional endpoints instead of global endpoint
Change STS api calls to use regional STS endpoints to gain the benefits of "reduce latency, build in redundancy, and increase session token validity" as documented at https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region
This will also enable leveraging a VPC Endpoint for STS, if provisioned.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/credentials.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Authentication utilities
16 """
17 from __future__ import absolute_import, division, print_function, unicode_literals
18
19 import os
20
21 from botocore.credentials import RefreshableCredentials
22 from botocore.session import get_session
23 from boto3 import Session
24
25 from c7n.version import version
26 from c7n.utils import get_retry
27
28
29 class SessionFactory(object):
30
31 def __init__(self, region, profile=None, assume_role=None, external_id=None):
32 self.region = region
33 self.profile = profile
34 self.assume_role = assume_role
35 self.external_id = external_id
36 self.user_agent_name = "CloudCustodian"
37 self.session_name = "CloudCustodian"
38 if 'C7N_SESSION_SUFFIX' in os.environ:
39 self.session_name = "%s@%s" % (
40 self.session_name, os.environ['C7N_SESSION_SUFFIX'])
41 self._subscribers = []
42
43 def _set_policy_name(self, name):
44 self.user_agent_name = ("CloudCustodian(%s)" % name).strip()
45
46 policy_name = property(None, _set_policy_name)
47
48 def __call__(self, assume=True, region=None):
49 if self.assume_role and assume:
50 session = Session(profile_name=self.profile)
51 session = assumed_session(
52 self.assume_role, self.session_name, session,
53 region or self.region, self.external_id)
54 else:
55 session = Session(
56 region_name=region or self.region, profile_name=self.profile)
57
58 return self.update(session)
59
60 def update(self, session):
61 session._session.user_agent_name = self.user_agent_name
62 session._session.user_agent_version = version
63
64 for s in self._subscribers:
65 s(session)
66
67 return session
68
69 def set_subscribers(self, subscribers):
70 self._subscribers = subscribers
71
72
73 def assumed_session(role_arn, session_name, session=None, region=None, external_id=None):
74 """STS Role assume a boto3.Session
75
76 With automatic credential renewal.
77
78 Args:
79 role_arn: iam role arn to assume
80 session_name: client session identifier
81 session: an optional extant session, note session is captured
82 in a function closure for renewing the sts assumed role.
83
84 :return: a boto3 session using the sts assumed role credentials
85
86 Notes: We have to poke at botocore internals a few times
87 """
88 if session is None:
89 session = Session()
90
91 retry = get_retry(('Throttling',))
92
93 def refresh():
94
95 parameters = {"RoleArn": role_arn, "RoleSessionName": session_name}
96
97 if external_id is not None:
98 parameters['ExternalId'] = external_id
99
100 credentials = retry(
101 session.client('sts').assume_role, **parameters)['Credentials']
102 return dict(
103 access_key=credentials['AccessKeyId'],
104 secret_key=credentials['SecretAccessKey'],
105 token=credentials['SessionToken'],
106 # Silly that we basically stringify so it can be parsed again
107 expiry_time=credentials['Expiration'].isoformat())
108
109 session_credentials = RefreshableCredentials.create_from_metadata(
110 metadata=refresh(),
111 refresh_using=refresh,
112 method='sts-assume-role')
113
114 # so dirty.. it hurts, no clean way to set this outside of the
115 # internals poke. There's some work upstream on making this nicer
116 # but its pretty baroque as well with upstream support.
117 # https://github.com/boto/boto3/issues/443
118 # https://github.com/boto/botocore/issues/761
119
120 s = get_session()
121 s._credentials = session_credentials
122 if region is None:
123 region = s.get_config_variable('region') or 'us-east-1'
124 s.set_config_variable('region', region)
125 return Session(botocore_session=s)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/credentials.py b/c7n/credentials.py
--- a/c7n/credentials.py
+++ b/c7n/credentials.py
@@ -26,6 +26,10 @@
from c7n.utils import get_retry
+# 0.8.45.1 compatibility with global only sts endpoints, out of caution, remove in 0.8.46.1
+USE_STS_GLOBAL = os.environ.get('C7N_USE_STS_GLOBAL', '').lower() in ('yes', 'true')
+
+
class SessionFactory(object):
def __init__(self, region, profile=None, assume_role=None, external_id=None):
@@ -98,7 +102,8 @@
parameters['ExternalId'] = external_id
credentials = retry(
- session.client('sts').assume_role, **parameters)['Credentials']
+ get_sts_client(
+ session, region).assume_role, **parameters)['Credentials']
return dict(
access_key=credentials['AccessKeyId'],
secret_key=credentials['SecretAccessKey'],
@@ -123,3 +128,20 @@
region = s.get_config_variable('region') or 'us-east-1'
s.set_config_variable('region', region)
return Session(botocore_session=s)
+
+
+def get_sts_client(session, region):
+ """Get the AWS STS endpoint specific for the given region.
+
+ Returns the global endpoint if region is not specified.
+
+ For the list of regional endpoints, see https://amzn.to/2ohJgtR
+ """
+ if region and not USE_STS_GLOBAL:
+ endpoint_url = "https://sts.{}.amazonaws.com".format(region)
+ region_name = region
+ else:
+ endpoint_url = "https://sts.amazonaws.com"
+ region_name = None
+ return session.client(
+ 'sts', endpoint_url=endpoint_url, region_name=region_name)
|
{"golden_diff": "diff --git a/c7n/credentials.py b/c7n/credentials.py\n--- a/c7n/credentials.py\n+++ b/c7n/credentials.py\n@@ -26,6 +26,10 @@\n from c7n.utils import get_retry\n \n \n+# 0.8.45.1 compatibility with global only sts endpoints, out of caution, remove in 0.8.46.1\n+USE_STS_GLOBAL = os.environ.get('C7N_USE_STS_GLOBAL', '').lower() in ('yes', 'true')\n+\n+\n class SessionFactory(object):\n \n def __init__(self, region, profile=None, assume_role=None, external_id=None):\n@@ -98,7 +102,8 @@\n parameters['ExternalId'] = external_id\n \n credentials = retry(\n- session.client('sts').assume_role, **parameters)['Credentials']\n+ get_sts_client(\n+ session, region).assume_role, **parameters)['Credentials']\n return dict(\n access_key=credentials['AccessKeyId'],\n secret_key=credentials['SecretAccessKey'],\n@@ -123,3 +128,20 @@\n region = s.get_config_variable('region') or 'us-east-1'\n s.set_config_variable('region', region)\n return Session(botocore_session=s)\n+\n+\n+def get_sts_client(session, region):\n+ \"\"\"Get the AWS STS endpoint specific for the given region.\n+\n+ Returns the global endpoint if region is not specified.\n+\n+ For the list of regional endpoints, see https://amzn.to/2ohJgtR\n+ \"\"\"\n+ if region and not USE_STS_GLOBAL:\n+ endpoint_url = \"https://sts.{}.amazonaws.com\".format(region)\n+ region_name = region\n+ else:\n+ endpoint_url = \"https://sts.amazonaws.com\"\n+ region_name = None\n+ return session.client(\n+ 'sts', endpoint_url=endpoint_url, region_name=region_name)\n", "issue": "STS calls to use regional endpoints instead of global endpoint\nChange STS api calls to use regional STS endpoints to gain the benefits of \"reduce latency, build in redundancy, and increase session token validity\" as documented at https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region\r\n\r\nThis will also enable leveraging a VPC Endpoint for STS, if provisioned. \n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAuthentication utilities\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom botocore.credentials import RefreshableCredentials\nfrom botocore.session import get_session\nfrom boto3 import Session\n\nfrom c7n.version import version\nfrom c7n.utils import get_retry\n\n\nclass SessionFactory(object):\n\n def __init__(self, region, profile=None, assume_role=None, external_id=None):\n self.region = region\n self.profile = profile\n self.assume_role = assume_role\n self.external_id = external_id\n self.user_agent_name = \"CloudCustodian\"\n self.session_name = \"CloudCustodian\"\n if 'C7N_SESSION_SUFFIX' in os.environ:\n self.session_name = \"%s@%s\" % (\n self.session_name, os.environ['C7N_SESSION_SUFFIX'])\n self._subscribers = []\n\n def _set_policy_name(self, name):\n self.user_agent_name = (\"CloudCustodian(%s)\" % name).strip()\n\n policy_name = property(None, _set_policy_name)\n\n def __call__(self, assume=True, region=None):\n if self.assume_role and assume:\n session = Session(profile_name=self.profile)\n session = assumed_session(\n self.assume_role, self.session_name, session,\n region or self.region, self.external_id)\n else:\n session = Session(\n region_name=region or self.region, profile_name=self.profile)\n\n return self.update(session)\n\n def update(self, session):\n session._session.user_agent_name = self.user_agent_name\n session._session.user_agent_version = version\n\n for s in self._subscribers:\n s(session)\n\n return session\n\n def set_subscribers(self, subscribers):\n self._subscribers = subscribers\n\n\ndef assumed_session(role_arn, session_name, session=None, region=None, external_id=None):\n \"\"\"STS Role assume a boto3.Session\n\n With automatic credential renewal.\n\n Args:\n role_arn: iam role arn to assume\n session_name: client session identifier\n session: an optional extant session, note session is captured\n in a function closure for renewing the sts assumed role.\n\n :return: a boto3 session using the sts assumed role credentials\n\n Notes: We have to poke at botocore internals a few times\n \"\"\"\n if session is None:\n session = Session()\n\n retry = get_retry(('Throttling',))\n\n def refresh():\n\n parameters = {\"RoleArn\": role_arn, \"RoleSessionName\": session_name}\n\n if external_id is not None:\n parameters['ExternalId'] = external_id\n\n credentials = retry(\n session.client('sts').assume_role, **parameters)['Credentials']\n return dict(\n access_key=credentials['AccessKeyId'],\n secret_key=credentials['SecretAccessKey'],\n token=credentials['SessionToken'],\n # Silly that we basically stringify so it can be parsed again\n expiry_time=credentials['Expiration'].isoformat())\n\n session_credentials = RefreshableCredentials.create_from_metadata(\n metadata=refresh(),\n refresh_using=refresh,\n method='sts-assume-role')\n\n # so dirty.. it hurts, no clean way to set this outside of the\n # internals poke. There's some work upstream on making this nicer\n # but its pretty baroque as well with upstream support.\n # https://github.com/boto/boto3/issues/443\n # https://github.com/boto/botocore/issues/761\n\n s = get_session()\n s._credentials = session_credentials\n if region is None:\n region = s.get_config_variable('region') or 'us-east-1'\n s.set_config_variable('region', region)\n return Session(botocore_session=s)\n", "path": "c7n/credentials.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAuthentication utilities\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom botocore.credentials import RefreshableCredentials\nfrom botocore.session import get_session\nfrom boto3 import Session\n\nfrom c7n.version import version\nfrom c7n.utils import get_retry\n\n\n# 0.8.45.1 compatibility with global only sts endpoints, out of caution, remove in 0.8.46.1\nUSE_STS_GLOBAL = os.environ.get('C7N_USE_STS_GLOBAL', '').lower() in ('yes', 'true')\n\n\nclass SessionFactory(object):\n\n def __init__(self, region, profile=None, assume_role=None, external_id=None):\n self.region = region\n self.profile = profile\n self.assume_role = assume_role\n self.external_id = external_id\n self.user_agent_name = \"CloudCustodian\"\n self.session_name = \"CloudCustodian\"\n if 'C7N_SESSION_SUFFIX' in os.environ:\n self.session_name = \"%s@%s\" % (\n self.session_name, os.environ['C7N_SESSION_SUFFIX'])\n self._subscribers = []\n\n def _set_policy_name(self, name):\n self.user_agent_name = (\"CloudCustodian(%s)\" % name).strip()\n\n policy_name = property(None, _set_policy_name)\n\n def __call__(self, assume=True, region=None):\n if self.assume_role and assume:\n session = Session(profile_name=self.profile)\n session = assumed_session(\n self.assume_role, self.session_name, session,\n region or self.region, self.external_id)\n else:\n session = Session(\n region_name=region or self.region, profile_name=self.profile)\n\n return self.update(session)\n\n def update(self, session):\n session._session.user_agent_name = self.user_agent_name\n session._session.user_agent_version = version\n\n for s in self._subscribers:\n s(session)\n\n return session\n\n def set_subscribers(self, subscribers):\n self._subscribers = subscribers\n\n\ndef assumed_session(role_arn, session_name, session=None, region=None, external_id=None):\n \"\"\"STS Role assume a boto3.Session\n\n With automatic credential renewal.\n\n Args:\n role_arn: iam role arn to assume\n session_name: client session identifier\n session: an optional extant session, note session is captured\n in a function closure for renewing the sts assumed role.\n\n :return: a boto3 session using the sts assumed role credentials\n\n Notes: We have to poke at botocore internals a few times\n \"\"\"\n if session is None:\n session = Session()\n\n retry = get_retry(('Throttling',))\n\n def refresh():\n\n parameters = {\"RoleArn\": role_arn, \"RoleSessionName\": session_name}\n\n if external_id is not None:\n parameters['ExternalId'] = external_id\n\n credentials = retry(\n get_sts_client(\n session, region).assume_role, **parameters)['Credentials']\n return dict(\n access_key=credentials['AccessKeyId'],\n secret_key=credentials['SecretAccessKey'],\n token=credentials['SessionToken'],\n # Silly that we basically stringify so it can be parsed again\n expiry_time=credentials['Expiration'].isoformat())\n\n session_credentials = RefreshableCredentials.create_from_metadata(\n metadata=refresh(),\n refresh_using=refresh,\n method='sts-assume-role')\n\n # so dirty.. it hurts, no clean way to set this outside of the\n # internals poke. There's some work upstream on making this nicer\n # but its pretty baroque as well with upstream support.\n # https://github.com/boto/boto3/issues/443\n # https://github.com/boto/botocore/issues/761\n\n s = get_session()\n s._credentials = session_credentials\n if region is None:\n region = s.get_config_variable('region') or 'us-east-1'\n s.set_config_variable('region', region)\n return Session(botocore_session=s)\n\n\ndef get_sts_client(session, region):\n \"\"\"Get the AWS STS endpoint specific for the given region.\n\n Returns the global endpoint if region is not specified.\n\n For the list of regional endpoints, see https://amzn.to/2ohJgtR\n \"\"\"\n if region and not USE_STS_GLOBAL:\n endpoint_url = \"https://sts.{}.amazonaws.com\".format(region)\n region_name = region\n else:\n endpoint_url = \"https://sts.amazonaws.com\"\n region_name = None\n return session.client(\n 'sts', endpoint_url=endpoint_url, region_name=region_name)\n", "path": "c7n/credentials.py"}]}
| 1,580 | 431 |
gh_patches_debug_555
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-753
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 1.6.9
On the docket:
+ [x] Fix pkg resource early import #750
+ [x] Fix `sys.path` scrubbing of pex extras modules. #752
Maybe on the docket:
+ [ ] Problem running PEX files with restricted permissions #746
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.8'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.8'
+__version__ = '1.6.9'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.8'\n+__version__ = '1.6.9'\n", "issue": "Release 1.6.9\nOn the docket:\r\n+ [x] Fix pkg resource early import #750\r\n+ [x] Fix `sys.path` scrubbing of pex extras modules. #752\r\n\r\nMaybe on the docket:\r\n+ [ ] Problem running PEX files with restricted permissions #746\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.8'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.9'\n", "path": "pex/version.py"}]}
| 377 | 94 |
gh_patches_debug_39132
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-7357
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
provisiondevice creates a superuser to the wrong facility
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
<!--
Description of the behavior that was observed, including screenshots or other references when applicable
-->
When there is an existing facility on Kolibri instance, running `provisiondevice` command would set up a superuser on the existing facility instead of the new facility it is supposed to created in.
I believe this is because we didn't pass in `facility` when running `create_superuser`: https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/core/device/management/commands/provisiondevice.py#L87
### Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
superuser should be created in the newly created facility
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
users are unable to log in to the facility that is created through `provisiondevice` command
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
…
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
1. set up kolibri with one facility
2. run `provisiondevice` to set up another facility
3. notice that the facility is created successfully, but the superuser is in the old facility
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
Kolibri version: release-0.14.x
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/device/management/commands/provisiondevice.py`
Content:
```
1 import json
2 import logging
3 import os
4
5 from django.conf import settings
6 from django.core.management.base import BaseCommand
7 from django.core.management.base import CommandError
8 from django.db import transaction
9 from django.utils import six
10
11 from kolibri.core.auth.constants.facility_presets import mappings
12 from kolibri.core.auth.constants.facility_presets import presets
13 from kolibri.core.auth.models import Facility
14 from kolibri.core.auth.models import FacilityUser
15 from kolibri.core.device.utils import provision_device
16
17 logger = logging.getLogger(__name__)
18
19
20 def _check_setting(name, available, msg):
21 if name not in available:
22 raise CommandError(msg.format(name))
23
24
25 def check_facility_setting(name):
26 AVAILABLE_SETTINGS = [
27 "learner_can_edit_username",
28 "learner_can_edit_name",
29 "learner_can_edit_password",
30 "learner_can_sign_up",
31 "learner_can_delete_account",
32 "learner_can_login_with_no_password",
33 "show_download_button_in_learn",
34 ]
35 _check_setting(
36 name,
37 AVAILABLE_SETTINGS,
38 "'{}' is not a facility setting that can be changed by this command",
39 )
40
41
42 def check_device_setting(name):
43 AVAILABLE_SETTINGS = [
44 "language_id",
45 "landing_page",
46 "allow_guest_access",
47 "allow_peer_unlisted_channel_import",
48 "allow_learner_unassigned_resource_access",
49 "name",
50 "allow_other_browsers_to_connect",
51 ]
52 _check_setting(
53 name,
54 AVAILABLE_SETTINGS,
55 "'{}' is not a device setting that can be changed by this command",
56 )
57
58
59 def get_user_response(prompt, valid_answers=None):
60 answer = None
61 while not answer or (
62 valid_answers is not None and answer.lower() not in valid_answers
63 ):
64 answer = six.moves.input(prompt)
65 return answer.lower()
66
67
68 languages = dict(settings.LANGUAGES)
69
70
71 def create_facility(facility_name=None, preset=None, interactive=False):
72 if facility_name is None and interactive:
73 answer = get_user_response(
74 "Do you wish to create a facility? [yn] ", ["y", "n"]
75 )
76 if answer == "y":
77 facility_name = get_user_response(
78 "What do you wish to name your facility? "
79 )
80
81 if facility_name:
82 facility, created = Facility.objects.get_or_create(name=facility_name)
83
84 if not created:
85 logger.warn(
86 "Facility with name {name} already exists, not modifying preset.".format(
87 name=facility_name
88 )
89 )
90 return facility
91
92 logger.info("Facility with name {name} created.".format(name=facility_name))
93
94 if preset is None and interactive:
95 preset = get_user_response(
96 "Which preset do you wish to use? [{presets}]: ".format(
97 presets=",".join(presets.keys())
98 ),
99 valid_answers=presets,
100 )
101
102 # Only set preset data if we have created the facility, otherwise leave previous data intact
103 if preset:
104 dataset_data = mappings[preset]
105 for key, value in dataset_data.items():
106 check_facility_setting(key)
107 setattr(facility.dataset, key, value)
108 facility.dataset.save()
109 logger.info("Facility preset changed to {preset}.".format(preset=preset))
110 else:
111 facility = Facility.get_default_facility() or Facility.objects.first()
112 if not facility:
113 raise CommandError("No facility exists")
114 return facility
115
116
117 def update_facility_settings(facility, new_settings):
118 # Override any settings passed in
119 for key, value in new_settings.items():
120 check_facility_setting(key)
121 setattr(facility.dataset, key, value)
122 facility.dataset.save()
123 logger.info("Facility settings updated with {}".format(new_settings))
124
125
126 def create_superuser(username=None, password=None, interactive=False):
127 if username is None and interactive:
128 username = get_user_response("Enter a username for the super user: ")
129
130 if password is None and interactive:
131 confirm = ""
132 while password != confirm:
133 password = get_user_response("Enter a password for the super user: ")
134 confirm = get_user_response("Confirm password for the super user: ")
135
136 if username and password:
137 if not FacilityUser.objects.filter(username__icontains=username).exists():
138 FacilityUser.objects.create_superuser(username, password)
139 logger.info(
140 "Superuser created with username {username}.".format(username=username)
141 )
142 else:
143 logger.warn(
144 "An account with username {username} already exists, not creating user account.".format(
145 username=username
146 )
147 )
148
149
150 def create_device_settings(
151 language_id=None, facility=None, interactive=False, new_settings={}
152 ):
153 if language_id is None and interactive:
154 language_id = get_user_response(
155 "Enter a default language code [{langs}]: ".format(
156 langs=",".join(languages.keys())
157 ),
158 valid_answers=languages,
159 )
160 # Override any settings passed in
161 for key in new_settings:
162 check_device_setting(key)
163
164 settings_to_set = dict(new_settings)
165 settings_to_set["language_id"] = language_id
166 settings_to_set["default_facility"] = facility
167
168 provision_device(**settings_to_set)
169 logger.info("Device settings updated with {}".format(settings_to_set))
170
171
172 def json_file_contents(parser, arg):
173 if not os.path.exists(arg) or not os.path.isfile(arg):
174 return parser.error("The file '{}' does not exist".format(arg))
175 with open(arg, "r") as f:
176 try:
177 output = json.load(f)
178 except json.decoder.JSONDecodeError as e:
179 return parser.error("The file '{}' is not valid JSON:\n{}".format(arg, e))
180 return output
181
182
183 class Command(BaseCommand):
184 help = "Provision a device for use"
185
186 def add_arguments(self, parser):
187 parser.add_argument(
188 "--facility", action="store", type=str, help="Facility name to create"
189 )
190 parser.add_argument(
191 "--superusername",
192 action="store",
193 type=str,
194 help="Superuser username to create",
195 )
196 parser.add_argument(
197 "--superuserpassword",
198 action="store",
199 type=str,
200 help="Superuser password to create",
201 )
202 parser.add_argument(
203 "--preset",
204 action="store",
205 type=str,
206 help="Facility preset to use",
207 choices=presets,
208 )
209 parser.add_argument(
210 "--language_id",
211 action="store",
212 type=str,
213 help="Language id for default language",
214 choices=languages,
215 )
216 parser.add_argument(
217 "--noinput",
218 "--no-input",
219 action="store_false",
220 dest="interactive",
221 default=True,
222 help="Tells Django to NOT prompt the user for input of any kind.",
223 )
224 parser.add_argument(
225 "--facility_settings",
226 action="store",
227 help="JSON file containing facility settings",
228 type=lambda arg: json_file_contents(parser, arg),
229 default={},
230 )
231 parser.add_argument(
232 "--device_settings",
233 action="store",
234 help="JSON file containing device settings",
235 type=lambda arg: json_file_contents(parser, arg),
236 default={},
237 )
238
239 def handle(self, *args, **options):
240
241 logger.warn(
242 "The 'provisiondevice' command is experimental, and the API and behavior will change in a future release"
243 )
244
245 with transaction.atomic():
246 facility = create_facility(
247 facility_name=options["facility"],
248 preset=options["preset"],
249 interactive=options["interactive"],
250 )
251
252 update_facility_settings(facility, options["facility_settings"])
253
254 create_device_settings(
255 language_id=options["language_id"],
256 facility=facility,
257 interactive=options["interactive"],
258 new_settings=options["device_settings"],
259 )
260
261 create_superuser(
262 username=options["superusername"],
263 password=options["superuserpassword"],
264 interactive=options["interactive"],
265 )
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/core/device/management/commands/provisiondevice.py b/kolibri/core/device/management/commands/provisiondevice.py
--- a/kolibri/core/device/management/commands/provisiondevice.py
+++ b/kolibri/core/device/management/commands/provisiondevice.py
@@ -1,8 +1,10 @@
import json
import logging
import os
+import sys
from django.conf import settings
+from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db import transaction
@@ -77,6 +79,8 @@
facility_name = get_user_response(
"What do you wish to name your facility? "
)
+ else:
+ sys.exit(1)
if facility_name:
facility, created = Facility.objects.get_or_create(name=facility_name)
@@ -123,7 +127,7 @@
logger.info("Facility settings updated with {}".format(new_settings))
-def create_superuser(username=None, password=None, interactive=False):
+def create_superuser(username=None, password=None, interactive=False, facility=None):
if username is None and interactive:
username = get_user_response("Enter a username for the super user: ")
@@ -134,15 +138,17 @@
confirm = get_user_response("Confirm password for the super user: ")
if username and password:
- if not FacilityUser.objects.filter(username__icontains=username).exists():
- FacilityUser.objects.create_superuser(username, password)
+ try:
+ FacilityUser.objects.create_superuser(username, password, facility=facility)
logger.info(
- "Superuser created with username {username}.".format(username=username)
+ "Superuser created with username {username} in facility {facility}.".format(
+ username=username, facility=facility
+ )
)
- else:
+ except ValidationError:
logger.warn(
- "An account with username {username} already exists, not creating user account.".format(
- username=username
+ "An account with username {username} already exists in facility {facility}, not creating user account.".format(
+ username=username, facility=facility
)
)
@@ -262,4 +268,5 @@
username=options["superusername"],
password=options["superuserpassword"],
interactive=options["interactive"],
+ facility=facility,
)
|
{"golden_diff": "diff --git a/kolibri/core/device/management/commands/provisiondevice.py b/kolibri/core/device/management/commands/provisiondevice.py\n--- a/kolibri/core/device/management/commands/provisiondevice.py\n+++ b/kolibri/core/device/management/commands/provisiondevice.py\n@@ -1,8 +1,10 @@\n import json\n import logging\n import os\n+import sys\n \n from django.conf import settings\n+from django.core.exceptions import ValidationError\n from django.core.management.base import BaseCommand\n from django.core.management.base import CommandError\n from django.db import transaction\n@@ -77,6 +79,8 @@\n facility_name = get_user_response(\n \"What do you wish to name your facility? \"\n )\n+ else:\n+ sys.exit(1)\n \n if facility_name:\n facility, created = Facility.objects.get_or_create(name=facility_name)\n@@ -123,7 +127,7 @@\n logger.info(\"Facility settings updated with {}\".format(new_settings))\n \n \n-def create_superuser(username=None, password=None, interactive=False):\n+def create_superuser(username=None, password=None, interactive=False, facility=None):\n if username is None and interactive:\n username = get_user_response(\"Enter a username for the super user: \")\n \n@@ -134,15 +138,17 @@\n confirm = get_user_response(\"Confirm password for the super user: \")\n \n if username and password:\n- if not FacilityUser.objects.filter(username__icontains=username).exists():\n- FacilityUser.objects.create_superuser(username, password)\n+ try:\n+ FacilityUser.objects.create_superuser(username, password, facility=facility)\n logger.info(\n- \"Superuser created with username {username}.\".format(username=username)\n+ \"Superuser created with username {username} in facility {facility}.\".format(\n+ username=username, facility=facility\n+ )\n )\n- else:\n+ except ValidationError:\n logger.warn(\n- \"An account with username {username} already exists, not creating user account.\".format(\n- username=username\n+ \"An account with username {username} already exists in facility {facility}, not creating user account.\".format(\n+ username=username, facility=facility\n )\n )\n \n@@ -262,4 +268,5 @@\n username=options[\"superusername\"],\n password=options[\"superuserpassword\"],\n interactive=options[\"interactive\"],\n+ facility=facility,\n )\n", "issue": "provisiondevice creates a superuser to the wrong facility\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n### Observed behavior\r\n<!--\r\nDescription of the behavior that was observed, including screenshots or other references when applicable\r\n-->\r\n\r\nWhen there is an existing facility on Kolibri instance, running `provisiondevice` command would set up a superuser on the existing facility instead of the new facility it is supposed to created in.\r\nI believe this is because we didn't pass in `facility` when running `create_superuser`: https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/core/device/management/commands/provisiondevice.py#L87\r\n\r\n### Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nsuperuser should be created in the newly created facility\r\n\r\n### User-facing consequences\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nusers are unable to log in to the facility that is created through `provisiondevice` command\r\n\r\n### Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n\u2026\r\n\r\n### Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\n1. set up kolibri with one facility\r\n2. run `provisiondevice` to set up another facility\r\n3. notice that the facility is created successfully, but the superuser is in the old facility\r\n\r\n### Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\nKolibri version: release-0.14.x\r\n\n", "before_files": [{"content": "import json\nimport logging\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom django.db import transaction\nfrom django.utils import six\n\nfrom kolibri.core.auth.constants.facility_presets import mappings\nfrom kolibri.core.auth.constants.facility_presets import presets\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.device.utils import provision_device\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_setting(name, available, msg):\n if name not in available:\n raise CommandError(msg.format(name))\n\n\ndef check_facility_setting(name):\n AVAILABLE_SETTINGS = [\n \"learner_can_edit_username\",\n \"learner_can_edit_name\",\n \"learner_can_edit_password\",\n \"learner_can_sign_up\",\n \"learner_can_delete_account\",\n \"learner_can_login_with_no_password\",\n \"show_download_button_in_learn\",\n ]\n _check_setting(\n name,\n AVAILABLE_SETTINGS,\n \"'{}' is not a facility setting that can be changed by this command\",\n )\n\n\ndef check_device_setting(name):\n AVAILABLE_SETTINGS = [\n \"language_id\",\n \"landing_page\",\n \"allow_guest_access\",\n \"allow_peer_unlisted_channel_import\",\n \"allow_learner_unassigned_resource_access\",\n \"name\",\n \"allow_other_browsers_to_connect\",\n ]\n _check_setting(\n name,\n AVAILABLE_SETTINGS,\n \"'{}' is not a device setting that can be changed by this command\",\n )\n\n\ndef get_user_response(prompt, valid_answers=None):\n answer = None\n while not answer or (\n valid_answers is not None and answer.lower() not in valid_answers\n ):\n answer = six.moves.input(prompt)\n return answer.lower()\n\n\nlanguages = dict(settings.LANGUAGES)\n\n\ndef create_facility(facility_name=None, preset=None, interactive=False):\n if facility_name is None and interactive:\n answer = get_user_response(\n \"Do you wish to create a facility? [yn] \", [\"y\", \"n\"]\n )\n if answer == \"y\":\n facility_name = get_user_response(\n \"What do you wish to name your facility? \"\n )\n\n if facility_name:\n facility, created = Facility.objects.get_or_create(name=facility_name)\n\n if not created:\n logger.warn(\n \"Facility with name {name} already exists, not modifying preset.\".format(\n name=facility_name\n )\n )\n return facility\n\n logger.info(\"Facility with name {name} created.\".format(name=facility_name))\n\n if preset is None and interactive:\n preset = get_user_response(\n \"Which preset do you wish to use? [{presets}]: \".format(\n presets=\",\".join(presets.keys())\n ),\n valid_answers=presets,\n )\n\n # Only set preset data if we have created the facility, otherwise leave previous data intact\n if preset:\n dataset_data = mappings[preset]\n for key, value in dataset_data.items():\n check_facility_setting(key)\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n logger.info(\"Facility preset changed to {preset}.\".format(preset=preset))\n else:\n facility = Facility.get_default_facility() or Facility.objects.first()\n if not facility:\n raise CommandError(\"No facility exists\")\n return facility\n\n\ndef update_facility_settings(facility, new_settings):\n # Override any settings passed in\n for key, value in new_settings.items():\n check_facility_setting(key)\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n logger.info(\"Facility settings updated with {}\".format(new_settings))\n\n\ndef create_superuser(username=None, password=None, interactive=False):\n if username is None and interactive:\n username = get_user_response(\"Enter a username for the super user: \")\n\n if password is None and interactive:\n confirm = \"\"\n while password != confirm:\n password = get_user_response(\"Enter a password for the super user: \")\n confirm = get_user_response(\"Confirm password for the super user: \")\n\n if username and password:\n if not FacilityUser.objects.filter(username__icontains=username).exists():\n FacilityUser.objects.create_superuser(username, password)\n logger.info(\n \"Superuser created with username {username}.\".format(username=username)\n )\n else:\n logger.warn(\n \"An account with username {username} already exists, not creating user account.\".format(\n username=username\n )\n )\n\n\ndef create_device_settings(\n language_id=None, facility=None, interactive=False, new_settings={}\n):\n if language_id is None and interactive:\n language_id = get_user_response(\n \"Enter a default language code [{langs}]: \".format(\n langs=\",\".join(languages.keys())\n ),\n valid_answers=languages,\n )\n # Override any settings passed in\n for key in new_settings:\n check_device_setting(key)\n\n settings_to_set = dict(new_settings)\n settings_to_set[\"language_id\"] = language_id\n settings_to_set[\"default_facility\"] = facility\n\n provision_device(**settings_to_set)\n logger.info(\"Device settings updated with {}\".format(settings_to_set))\n\n\ndef json_file_contents(parser, arg):\n if not os.path.exists(arg) or not os.path.isfile(arg):\n return parser.error(\"The file '{}' does not exist\".format(arg))\n with open(arg, \"r\") as f:\n try:\n output = json.load(f)\n except json.decoder.JSONDecodeError as e:\n return parser.error(\"The file '{}' is not valid JSON:\\n{}\".format(arg, e))\n return output\n\n\nclass Command(BaseCommand):\n help = \"Provision a device for use\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--facility\", action=\"store\", type=str, help=\"Facility name to create\"\n )\n parser.add_argument(\n \"--superusername\",\n action=\"store\",\n type=str,\n help=\"Superuser username to create\",\n )\n parser.add_argument(\n \"--superuserpassword\",\n action=\"store\",\n type=str,\n help=\"Superuser password to create\",\n )\n parser.add_argument(\n \"--preset\",\n action=\"store\",\n type=str,\n help=\"Facility preset to use\",\n choices=presets,\n )\n parser.add_argument(\n \"--language_id\",\n action=\"store\",\n type=str,\n help=\"Language id for default language\",\n choices=languages,\n )\n parser.add_argument(\n \"--noinput\",\n \"--no-input\",\n action=\"store_false\",\n dest=\"interactive\",\n default=True,\n help=\"Tells Django to NOT prompt the user for input of any kind.\",\n )\n parser.add_argument(\n \"--facility_settings\",\n action=\"store\",\n help=\"JSON file containing facility settings\",\n type=lambda arg: json_file_contents(parser, arg),\n default={},\n )\n parser.add_argument(\n \"--device_settings\",\n action=\"store\",\n help=\"JSON file containing device settings\",\n type=lambda arg: json_file_contents(parser, arg),\n default={},\n )\n\n def handle(self, *args, **options):\n\n logger.warn(\n \"The 'provisiondevice' command is experimental, and the API and behavior will change in a future release\"\n )\n\n with transaction.atomic():\n facility = create_facility(\n facility_name=options[\"facility\"],\n preset=options[\"preset\"],\n interactive=options[\"interactive\"],\n )\n\n update_facility_settings(facility, options[\"facility_settings\"])\n\n create_device_settings(\n language_id=options[\"language_id\"],\n facility=facility,\n interactive=options[\"interactive\"],\n new_settings=options[\"device_settings\"],\n )\n\n create_superuser(\n username=options[\"superusername\"],\n password=options[\"superuserpassword\"],\n interactive=options[\"interactive\"],\n )\n", "path": "kolibri/core/device/management/commands/provisiondevice.py"}], "after_files": [{"content": "import json\nimport logging\nimport os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom django.db import transaction\nfrom django.utils import six\n\nfrom kolibri.core.auth.constants.facility_presets import mappings\nfrom kolibri.core.auth.constants.facility_presets import presets\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.device.utils import provision_device\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_setting(name, available, msg):\n if name not in available:\n raise CommandError(msg.format(name))\n\n\ndef check_facility_setting(name):\n AVAILABLE_SETTINGS = [\n \"learner_can_edit_username\",\n \"learner_can_edit_name\",\n \"learner_can_edit_password\",\n \"learner_can_sign_up\",\n \"learner_can_delete_account\",\n \"learner_can_login_with_no_password\",\n \"show_download_button_in_learn\",\n ]\n _check_setting(\n name,\n AVAILABLE_SETTINGS,\n \"'{}' is not a facility setting that can be changed by this command\",\n )\n\n\ndef check_device_setting(name):\n AVAILABLE_SETTINGS = [\n \"language_id\",\n \"landing_page\",\n \"allow_guest_access\",\n \"allow_peer_unlisted_channel_import\",\n \"allow_learner_unassigned_resource_access\",\n \"name\",\n \"allow_other_browsers_to_connect\",\n ]\n _check_setting(\n name,\n AVAILABLE_SETTINGS,\n \"'{}' is not a device setting that can be changed by this command\",\n )\n\n\ndef get_user_response(prompt, valid_answers=None):\n answer = None\n while not answer or (\n valid_answers is not None and answer.lower() not in valid_answers\n ):\n answer = six.moves.input(prompt)\n return answer.lower()\n\n\nlanguages = dict(settings.LANGUAGES)\n\n\ndef create_facility(facility_name=None, preset=None, interactive=False):\n if facility_name is None and interactive:\n answer = get_user_response(\n \"Do you wish to create a facility? [yn] \", [\"y\", \"n\"]\n )\n if answer == \"y\":\n facility_name = get_user_response(\n \"What do you wish to name your facility? \"\n )\n else:\n sys.exit(1)\n\n if facility_name:\n facility, created = Facility.objects.get_or_create(name=facility_name)\n\n if not created:\n logger.warn(\n \"Facility with name {name} already exists, not modifying preset.\".format(\n name=facility_name\n )\n )\n return facility\n\n logger.info(\"Facility with name {name} created.\".format(name=facility_name))\n\n if preset is None and interactive:\n preset = get_user_response(\n \"Which preset do you wish to use? [{presets}]: \".format(\n presets=\",\".join(presets.keys())\n ),\n valid_answers=presets,\n )\n\n # Only set preset data if we have created the facility, otherwise leave previous data intact\n if preset:\n dataset_data = mappings[preset]\n for key, value in dataset_data.items():\n check_facility_setting(key)\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n logger.info(\"Facility preset changed to {preset}.\".format(preset=preset))\n else:\n facility = Facility.get_default_facility() or Facility.objects.first()\n if not facility:\n raise CommandError(\"No facility exists\")\n return facility\n\n\ndef update_facility_settings(facility, new_settings):\n # Override any settings passed in\n for key, value in new_settings.items():\n check_facility_setting(key)\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n logger.info(\"Facility settings updated with {}\".format(new_settings))\n\n\ndef create_superuser(username=None, password=None, interactive=False, facility=None):\n if username is None and interactive:\n username = get_user_response(\"Enter a username for the super user: \")\n\n if password is None and interactive:\n confirm = \"\"\n while password != confirm:\n password = get_user_response(\"Enter a password for the super user: \")\n confirm = get_user_response(\"Confirm password for the super user: \")\n\n if username and password:\n try:\n FacilityUser.objects.create_superuser(username, password, facility=facility)\n logger.info(\n \"Superuser created with username {username} in facility {facility}.\".format(\n username=username, facility=facility\n )\n )\n except ValidationError:\n logger.warn(\n \"An account with username {username} already exists in facility {facility}, not creating user account.\".format(\n username=username, facility=facility\n )\n )\n\n\ndef create_device_settings(\n language_id=None, facility=None, interactive=False, new_settings={}\n):\n if language_id is None and interactive:\n language_id = get_user_response(\n \"Enter a default language code [{langs}]: \".format(\n langs=\",\".join(languages.keys())\n ),\n valid_answers=languages,\n )\n # Override any settings passed in\n for key in new_settings:\n check_device_setting(key)\n\n settings_to_set = dict(new_settings)\n settings_to_set[\"language_id\"] = language_id\n settings_to_set[\"default_facility\"] = facility\n\n provision_device(**settings_to_set)\n logger.info(\"Device settings updated with {}\".format(settings_to_set))\n\n\ndef json_file_contents(parser, arg):\n if not os.path.exists(arg) or not os.path.isfile(arg):\n return parser.error(\"The file '{}' does not exist\".format(arg))\n with open(arg, \"r\") as f:\n try:\n output = json.load(f)\n except json.decoder.JSONDecodeError as e:\n return parser.error(\"The file '{}' is not valid JSON:\\n{}\".format(arg, e))\n return output\n\n\nclass Command(BaseCommand):\n help = \"Provision a device for use\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--facility\", action=\"store\", type=str, help=\"Facility name to create\"\n )\n parser.add_argument(\n \"--superusername\",\n action=\"store\",\n type=str,\n help=\"Superuser username to create\",\n )\n parser.add_argument(\n \"--superuserpassword\",\n action=\"store\",\n type=str,\n help=\"Superuser password to create\",\n )\n parser.add_argument(\n \"--preset\",\n action=\"store\",\n type=str,\n help=\"Facility preset to use\",\n choices=presets,\n )\n parser.add_argument(\n \"--language_id\",\n action=\"store\",\n type=str,\n help=\"Language id for default language\",\n choices=languages,\n )\n parser.add_argument(\n \"--noinput\",\n \"--no-input\",\n action=\"store_false\",\n dest=\"interactive\",\n default=True,\n help=\"Tells Django to NOT prompt the user for input of any kind.\",\n )\n parser.add_argument(\n \"--facility_settings\",\n action=\"store\",\n help=\"JSON file containing facility settings\",\n type=lambda arg: json_file_contents(parser, arg),\n default={},\n )\n parser.add_argument(\n \"--device_settings\",\n action=\"store\",\n help=\"JSON file containing device settings\",\n type=lambda arg: json_file_contents(parser, arg),\n default={},\n )\n\n def handle(self, *args, **options):\n\n logger.warn(\n \"The 'provisiondevice' command is experimental, and the API and behavior will change in a future release\"\n )\n\n with transaction.atomic():\n facility = create_facility(\n facility_name=options[\"facility\"],\n preset=options[\"preset\"],\n interactive=options[\"interactive\"],\n )\n\n update_facility_settings(facility, options[\"facility_settings\"])\n\n create_device_settings(\n language_id=options[\"language_id\"],\n facility=facility,\n interactive=options[\"interactive\"],\n new_settings=options[\"device_settings\"],\n )\n\n create_superuser(\n username=options[\"superusername\"],\n password=options[\"superuserpassword\"],\n interactive=options[\"interactive\"],\n facility=facility,\n )\n", "path": "kolibri/core/device/management/commands/provisiondevice.py"}]}
| 3,092 | 534 |
gh_patches_debug_27420
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-679
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a backport of `SynchronousTestCasa`
This actually just makes `TestCase` pretend to be `SynchronousTestCase` currently, since the difference is mostly just that `SynchronousTestCase` does less. (There is also some stuff that was add to `SynchronousTestCase` that makes synchronous testing easier, but on versions with that code `TestCase` inherits from it).
This incidentally bumps the minimum version of twisted required for the worker to 10.2.0, which is 5y 12mo old, and is the old version that we tested against that supports deferred cancellation, which was used in #2464. (It was introduced in 10.1.0, bu that that wasn't in our test matrix, and 10.2.0 is plenty old enough).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/util/maildir.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16
17 # This is a class which watches a maildir for new messages. It uses the
18 # linux dirwatcher API (if available) to look for new files. The
19 # .messageReceived method is invoked with the filename of the new message,
20 # relative to the top of the maildir (so it will look like "new/blahblah").
21
22 import os
23 from twisted.python import log, runtime
24 from twisted.application import service, internet
25 from twisted.internet import reactor, defer
26 dnotify = None
27 try:
28 import dnotify
29 except:
30 log.msg("unable to import dnotify, so Maildir will use polling instead")
31
32 class NoSuchMaildir(Exception):
33 pass
34
35 class MaildirService(service.MultiService):
36 pollinterval = 10 # only used if we don't have DNotify
37
38 def __init__(self, basedir=None):
39 service.MultiService.__init__(self)
40 if basedir:
41 self.setBasedir(basedir)
42 self.files = []
43 self.dnotify = None
44
45 def setBasedir(self, basedir):
46 # some users of MaildirService (scheduler.Try_Jobdir, in particular)
47 # don't know their basedir until setServiceParent, since it is
48 # relative to the buildmaster's basedir. So let them set it late. We
49 # don't actually need it until our own startService.
50 self.basedir = basedir
51 self.newdir = os.path.join(self.basedir, "new")
52 self.curdir = os.path.join(self.basedir, "cur")
53
54 def startService(self):
55 service.MultiService.startService(self)
56 if not os.path.isdir(self.newdir) or not os.path.isdir(self.curdir):
57 raise NoSuchMaildir("invalid maildir '%s'" % self.basedir)
58 try:
59 if dnotify:
60 # we must hold an fd open on the directory, so we can get
61 # notified when it changes.
62 self.dnotify = dnotify.DNotify(self.newdir,
63 self.dnotify_callback,
64 [dnotify.DNotify.DN_CREATE])
65 except (IOError, OverflowError):
66 # IOError is probably linux<2.4.19, which doesn't support
67 # dnotify. OverflowError will occur on some 64-bit machines
68 # because of a python bug
69 log.msg("DNotify failed, falling back to polling")
70 if not self.dnotify:
71 t = internet.TimerService(self.pollinterval, self.poll)
72 t.setServiceParent(self)
73 self.poll()
74
75 def dnotify_callback(self):
76 log.msg("dnotify noticed something, now polling")
77
78 # give it a moment. I found that qmail had problems when the message
79 # was removed from the maildir instantly. It shouldn't, that's what
80 # maildirs are made for. I wasn't able to eyeball any reason for the
81 # problem, and safecat didn't behave the same way, but qmail reports
82 # "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
83 # maildir_child() process exited with rc not in 0,2,3,4). Not sure
84 # why, and I'd have to hack qmail to investigate further, so it's
85 # easier to just wait a second before yanking the message out of new/
86
87 reactor.callLater(0.1, self.poll)
88
89
90 def stopService(self):
91 if self.dnotify:
92 self.dnotify.remove()
93 self.dnotify = None
94 return service.MultiService.stopService(self)
95
96 @defer.inlineCallbacks
97 def poll(self):
98 try:
99 assert self.basedir
100 # see what's new
101 for f in self.files:
102 if not os.path.isfile(os.path.join(self.newdir, f)):
103 self.files.remove(f)
104 newfiles = []
105 for f in os.listdir(self.newdir):
106 if not f in self.files:
107 newfiles.append(f)
108 self.files.extend(newfiles)
109 for n in newfiles:
110 try:
111 yield self.messageReceived(n)
112 except:
113 log.err(None, "while reading '%s' from maildir '%s':" % (n, self.basedir))
114 except Exception:
115 log.err(None, "while polling maildir '%s':" % (self.basedir,))
116
117 def moveToCurDir(self, filename):
118 if runtime.platformType == "posix":
119 # open the file before moving it, because I'm afraid that once
120 # it's in cur/, someone might delete it at any moment
121 path = os.path.join(self.newdir, filename)
122 f = open(path, "r")
123 os.rename(os.path.join(self.newdir, filename),
124 os.path.join(self.curdir, filename))
125 elif runtime.platformType == "win32":
126 # do this backwards under windows, because you can't move a file
127 # that somebody is holding open. This was causing a Permission
128 # Denied error on bear's win32-twisted1.3 buildslave.
129 os.rename(os.path.join(self.newdir, filename),
130 os.path.join(self.curdir, filename))
131 path = os.path.join(self.curdir, filename)
132 f = open(path, "r")
133
134 return f
135
136 def messageReceived(self, filename):
137 raise NotImplementedError
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/util/maildir.py b/master/buildbot/util/maildir.py
--- a/master/buildbot/util/maildir.py
+++ b/master/buildbot/util/maildir.py
@@ -41,6 +41,7 @@
self.setBasedir(basedir)
self.files = []
self.dnotify = None
+ self.timerService = None
def setBasedir(self, basedir):
# some users of MaildirService (scheduler.Try_Jobdir, in particular)
@@ -68,10 +69,11 @@
# because of a python bug
log.msg("DNotify failed, falling back to polling")
if not self.dnotify:
- t = internet.TimerService(self.pollinterval, self.poll)
- t.setServiceParent(self)
- self.poll()
-
+ self.timerService = internet.TimerService(self.pollinterval, self.poll)
+ self.timerService.setServiceParent(self)
+ self.poll()
+
+
def dnotify_callback(self):
log.msg("dnotify noticed something, now polling")
@@ -91,6 +93,9 @@
if self.dnotify:
self.dnotify.remove()
self.dnotify = None
+ if self.timerService is not None:
+ self.timerService.disownServiceParent()
+ self.timerService = None
return service.MultiService.stopService(self)
@defer.inlineCallbacks
|
{"golden_diff": "diff --git a/master/buildbot/util/maildir.py b/master/buildbot/util/maildir.py\n--- a/master/buildbot/util/maildir.py\n+++ b/master/buildbot/util/maildir.py\n@@ -41,6 +41,7 @@\n self.setBasedir(basedir)\n self.files = []\n self.dnotify = None\n+ self.timerService = None\n \n def setBasedir(self, basedir):\n # some users of MaildirService (scheduler.Try_Jobdir, in particular)\n@@ -68,10 +69,11 @@\n # because of a python bug\n log.msg(\"DNotify failed, falling back to polling\")\n if not self.dnotify:\n- t = internet.TimerService(self.pollinterval, self.poll)\n- t.setServiceParent(self)\n- self.poll()\n-\n+ self.timerService = internet.TimerService(self.pollinterval, self.poll)\n+ self.timerService.setServiceParent(self)\n+ self.poll() \n+ \n+ \n def dnotify_callback(self):\n log.msg(\"dnotify noticed something, now polling\")\n \n@@ -91,6 +93,9 @@\n if self.dnotify:\n self.dnotify.remove()\n self.dnotify = None\n+ if self.timerService is not None:\n+ self.timerService.disownServiceParent() \n+ self.timerService = None\n return service.MultiService.stopService(self)\n \n @defer.inlineCallbacks\n", "issue": "Add a backport of `SynchronousTestCasa`\nThis actually just makes `TestCase` pretend to be `SynchronousTestCase` currently, since the difference is mostly just that `SynchronousTestCase` does less. (There is also some stuff that was add to `SynchronousTestCase` that makes synchronous testing easier, but on versions with that code `TestCase` inherits from it).\n\nThis incidentally bumps the minimum version of twisted required for the worker to 10.2.0, which is 5y 12mo old, and is the old version that we tested against that supports deferred cancellation, which was used in #2464. (It was introduced in 10.1.0, bu that that wasn't in our test matrix, and 10.2.0 is plenty old enough).\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\n# This is a class which watches a maildir for new messages. It uses the\n# linux dirwatcher API (if available) to look for new files. The\n# .messageReceived method is invoked with the filename of the new message,\n# relative to the top of the maildir (so it will look like \"new/blahblah\").\n\nimport os\nfrom twisted.python import log, runtime\nfrom twisted.application import service, internet\nfrom twisted.internet import reactor, defer\ndnotify = None\ntry:\n import dnotify\nexcept:\n log.msg(\"unable to import dnotify, so Maildir will use polling instead\")\n\nclass NoSuchMaildir(Exception):\n pass\n\nclass MaildirService(service.MultiService):\n pollinterval = 10 # only used if we don't have DNotify\n\n def __init__(self, basedir=None):\n service.MultiService.__init__(self)\n if basedir:\n self.setBasedir(basedir)\n self.files = []\n self.dnotify = None\n\n def setBasedir(self, basedir):\n # some users of MaildirService (scheduler.Try_Jobdir, in particular)\n # don't know their basedir until setServiceParent, since it is\n # relative to the buildmaster's basedir. So let them set it late. We\n # don't actually need it until our own startService.\n self.basedir = basedir\n self.newdir = os.path.join(self.basedir, \"new\")\n self.curdir = os.path.join(self.basedir, \"cur\")\n\n def startService(self):\n service.MultiService.startService(self)\n if not os.path.isdir(self.newdir) or not os.path.isdir(self.curdir):\n raise NoSuchMaildir(\"invalid maildir '%s'\" % self.basedir)\n try:\n if dnotify:\n # we must hold an fd open on the directory, so we can get\n # notified when it changes.\n self.dnotify = dnotify.DNotify(self.newdir,\n self.dnotify_callback,\n [dnotify.DNotify.DN_CREATE])\n except (IOError, OverflowError):\n # IOError is probably linux<2.4.19, which doesn't support\n # dnotify. OverflowError will occur on some 64-bit machines\n # because of a python bug\n log.msg(\"DNotify failed, falling back to polling\")\n if not self.dnotify:\n t = internet.TimerService(self.pollinterval, self.poll)\n t.setServiceParent(self)\n self.poll()\n\n def dnotify_callback(self):\n log.msg(\"dnotify noticed something, now polling\")\n\n # give it a moment. I found that qmail had problems when the message\n # was removed from the maildir instantly. It shouldn't, that's what\n # maildirs are made for. I wasn't able to eyeball any reason for the\n # problem, and safecat didn't behave the same way, but qmail reports\n # \"Temporary_error_on_maildir_delivery\" (qmail-local.c:165,\n # maildir_child() process exited with rc not in 0,2,3,4). Not sure\n # why, and I'd have to hack qmail to investigate further, so it's\n # easier to just wait a second before yanking the message out of new/\n\n reactor.callLater(0.1, self.poll)\n\n\n def stopService(self):\n if self.dnotify:\n self.dnotify.remove()\n self.dnotify = None\n return service.MultiService.stopService(self)\n\n @defer.inlineCallbacks\n def poll(self):\n try:\n assert self.basedir\n # see what's new\n for f in self.files:\n if not os.path.isfile(os.path.join(self.newdir, f)):\n self.files.remove(f)\n newfiles = []\n for f in os.listdir(self.newdir):\n if not f in self.files:\n newfiles.append(f)\n self.files.extend(newfiles)\n for n in newfiles:\n try:\n yield self.messageReceived(n)\n except:\n log.err(None, \"while reading '%s' from maildir '%s':\" % (n, self.basedir))\n except Exception:\n log.err(None, \"while polling maildir '%s':\" % (self.basedir,))\n\n def moveToCurDir(self, filename):\n if runtime.platformType == \"posix\":\n # open the file before moving it, because I'm afraid that once\n # it's in cur/, someone might delete it at any moment\n path = os.path.join(self.newdir, filename)\n f = open(path, \"r\")\n os.rename(os.path.join(self.newdir, filename),\n os.path.join(self.curdir, filename))\n elif runtime.platformType == \"win32\":\n # do this backwards under windows, because you can't move a file\n # that somebody is holding open. This was causing a Permission\n # Denied error on bear's win32-twisted1.3 buildslave.\n os.rename(os.path.join(self.newdir, filename),\n os.path.join(self.curdir, filename))\n path = os.path.join(self.curdir, filename)\n f = open(path, \"r\")\n\n return f\n\n def messageReceived(self, filename):\n raise NotImplementedError\n", "path": "master/buildbot/util/maildir.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\n# This is a class which watches a maildir for new messages. It uses the\n# linux dirwatcher API (if available) to look for new files. The\n# .messageReceived method is invoked with the filename of the new message,\n# relative to the top of the maildir (so it will look like \"new/blahblah\").\n\nimport os\nfrom twisted.python import log, runtime\nfrom twisted.application import service, internet\nfrom twisted.internet import reactor, defer\ndnotify = None\ntry:\n import dnotify\nexcept:\n log.msg(\"unable to import dnotify, so Maildir will use polling instead\")\n\nclass NoSuchMaildir(Exception):\n pass\n\nclass MaildirService(service.MultiService):\n pollinterval = 10 # only used if we don't have DNotify\n\n def __init__(self, basedir=None):\n service.MultiService.__init__(self)\n if basedir:\n self.setBasedir(basedir)\n self.files = []\n self.dnotify = None\n self.timerService = None\n\n def setBasedir(self, basedir):\n # some users of MaildirService (scheduler.Try_Jobdir, in particular)\n # don't know their basedir until setServiceParent, since it is\n # relative to the buildmaster's basedir. So let them set it late. We\n # don't actually need it until our own startService.\n self.basedir = basedir\n self.newdir = os.path.join(self.basedir, \"new\")\n self.curdir = os.path.join(self.basedir, \"cur\")\n\n def startService(self):\n service.MultiService.startService(self)\n if not os.path.isdir(self.newdir) or not os.path.isdir(self.curdir):\n raise NoSuchMaildir(\"invalid maildir '%s'\" % self.basedir)\n try:\n if dnotify:\n # we must hold an fd open on the directory, so we can get\n # notified when it changes.\n self.dnotify = dnotify.DNotify(self.newdir,\n self.dnotify_callback,\n [dnotify.DNotify.DN_CREATE])\n except (IOError, OverflowError):\n # IOError is probably linux<2.4.19, which doesn't support\n # dnotify. OverflowError will occur on some 64-bit machines\n # because of a python bug\n log.msg(\"DNotify failed, falling back to polling\")\n if not self.dnotify:\n self.timerService = internet.TimerService(self.pollinterval, self.poll)\n self.timerService.setServiceParent(self)\n self.poll() \n \n \n def dnotify_callback(self):\n log.msg(\"dnotify noticed something, now polling\")\n\n # give it a moment. I found that qmail had problems when the message\n # was removed from the maildir instantly. It shouldn't, that's what\n # maildirs are made for. I wasn't able to eyeball any reason for the\n # problem, and safecat didn't behave the same way, but qmail reports\n # \"Temporary_error_on_maildir_delivery\" (qmail-local.c:165,\n # maildir_child() process exited with rc not in 0,2,3,4). Not sure\n # why, and I'd have to hack qmail to investigate further, so it's\n # easier to just wait a second before yanking the message out of new/\n\n reactor.callLater(0.1, self.poll)\n\n\n def stopService(self):\n if self.dnotify:\n self.dnotify.remove()\n self.dnotify = None\n if self.timerService is not None:\n self.timerService.disownServiceParent() \n self.timerService = None\n return service.MultiService.stopService(self)\n\n @defer.inlineCallbacks\n def poll(self):\n try:\n assert self.basedir\n # see what's new\n for f in self.files:\n if not os.path.isfile(os.path.join(self.newdir, f)):\n self.files.remove(f)\n newfiles = []\n for f in os.listdir(self.newdir):\n if not f in self.files:\n newfiles.append(f)\n self.files.extend(newfiles)\n for n in newfiles:\n try:\n yield self.messageReceived(n)\n except:\n log.err(None, \"while reading '%s' from maildir '%s':\" % (n, self.basedir))\n except Exception:\n log.err(None, \"while polling maildir '%s':\" % (self.basedir,))\n\n def moveToCurDir(self, filename):\n if runtime.platformType == \"posix\":\n # open the file before moving it, because I'm afraid that once\n # it's in cur/, someone might delete it at any moment\n path = os.path.join(self.newdir, filename)\n f = open(path, \"r\")\n os.rename(os.path.join(self.newdir, filename),\n os.path.join(self.curdir, filename))\n elif runtime.platformType == \"win32\":\n # do this backwards under windows, because you can't move a file\n # that somebody is holding open. This was causing a Permission\n # Denied error on bear's win32-twisted1.3 buildslave.\n os.rename(os.path.join(self.newdir, filename),\n os.path.join(self.curdir, filename))\n path = os.path.join(self.curdir, filename)\n f = open(path, \"r\")\n\n return f\n\n def messageReceived(self, filename):\n raise NotImplementedError\n", "path": "master/buildbot/util/maildir.py"}]}
| 2,058 | 311 |
gh_patches_debug_24742
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-669
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add simple SVD tool to kit
I find myself doing simple SVDs a lot. We should add a tool to `kit` and then plan to add multidimensional/data support later.
example of what this method might look like:
```
def SVD_doer(arr, i=None):
u, s, v = np.linalg.svd(arr , full_matrices=False, compute_uv=True)
u = u.T
if i == None:
return u, v, s
else:
return u[i], v[i], s[i]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/kit/_array.py`
Content:
```
1 """Array interaction tools."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import exceptions as wt_exceptions
10
11
12 # --- define --------------------------------------------------------------------------------------
13
14
15 __all__ = [
16 "closest_pair",
17 "diff",
18 "fft",
19 "joint_shape",
20 "orthogonal",
21 "remove_nans_1D",
22 "share_nans",
23 "smooth_1D",
24 "unique",
25 "valid_index",
26 ]
27
28
29 # --- functions -----------------------------------------------------------------------------------
30
31
32 def closest_pair(arr, give="indicies"):
33 """Find the pair of indices corresponding to the closest elements in an array.
34
35 If multiple pairs are equally close, both pairs of indicies are returned.
36 Optionally returns the closest distance itself.
37
38 I am sure that this could be written as a cheaper operation. I
39 wrote this as a quick and dirty method because I need it now to use on some
40 relatively small arrays. Feel free to refactor if you need this operation
41 done as fast as possible. - Blaise 2016-02-07
42
43 Parameters
44 ----------
45 arr : numpy.ndarray
46 The array to search.
47 give : {'indicies', 'distance'} (optional)
48 Toggle return behavior. If 'distance', returns a single float - the
49 closest distance itself. Default is indicies.
50
51 Returns
52 -------
53 list of lists of two tuples
54 List containing lists of two tuples: indicies the nearest pair in the
55 array.
56
57 >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])
58 >>> closest_pair(arr)
59 [[(1,), (8,)], [(3,), (4,)]]
60
61 """
62 idxs = [idx for idx in np.ndindex(arr.shape)]
63 outs = []
64 min_dist = arr.max() - arr.min()
65 for idxa in idxs:
66 for idxb in idxs:
67 if idxa == idxb:
68 continue
69 dist = abs(arr[idxa] - arr[idxb])
70 if dist == min_dist:
71 if not [idxb, idxa] in outs:
72 outs.append([idxa, idxb])
73 elif dist < min_dist:
74 min_dist = dist
75 outs = [[idxa, idxb]]
76 if give == "indicies":
77 return outs
78 elif give == "distance":
79 return min_dist
80 else:
81 raise KeyError("give not recognized in closest_pair")
82
83
84 def diff(xi, yi, order=1):
85 """Take the numerical derivative of a 1D array.
86
87 Output is mapped onto the original coordinates using linear interpolation.
88 Expects monotonic xi values.
89
90 Parameters
91 ----------
92 xi : 1D array-like
93 Coordinates.
94 yi : 1D array-like
95 Values.
96 order : positive integer (optional)
97 Order of differentiation.
98
99 Returns
100 -------
101 1D numpy array
102 Numerical derivative. Has the same shape as the input arrays.
103 """
104 yi = np.array(yi).copy()
105 flip = False
106 if xi[-1] < xi[0]:
107 xi = np.flipud(xi.copy())
108 yi = np.flipud(yi)
109 flip = True
110 midpoints = (xi[1:] + xi[:-1]) / 2
111 for _ in range(order):
112 d = np.diff(yi)
113 d /= np.diff(xi)
114 yi = np.interp(xi, midpoints, d)
115 if flip:
116 yi = np.flipud(yi)
117 return yi
118
119
120 def fft(xi, yi, axis=0):
121 """Take the 1D FFT of an N-dimensional array and return "sensible" properly shifted arrays.
122
123 Parameters
124 ----------
125 xi : numpy.ndarray
126 1D array over which the points to be FFT'ed are defined
127 yi : numpy.ndarray
128 ND array with values to FFT
129 axis : int
130 axis of yi to perform FFT over
131
132 Returns
133 -------
134 xi : 1D numpy.ndarray
135 1D array. Conjugate to input xi. Example: if input xi is in the time
136 domain, output xi is in frequency domain.
137 yi : ND numpy.ndarray
138 FFT. Has the same shape as the input array (yi).
139 """
140 # xi must be 1D
141 if xi.ndim != 1:
142 raise wt_exceptions.DimensionalityError(1, xi.ndim)
143 # xi must be evenly spaced
144 spacing = np.diff(xi)
145 if not np.allclose(spacing, spacing.mean()):
146 raise RuntimeError("WrightTools.kit.fft: argument xi must be evenly spaced")
147 # fft
148 yi = np.fft.fft(yi, axis=axis)
149 d = (xi.max() - xi.min()) / (xi.size - 1)
150 xi = np.fft.fftfreq(xi.size, d=d)
151 # shift
152 xi = np.fft.fftshift(xi)
153 yi = np.fft.fftshift(yi, axes=axis)
154 return xi, yi
155
156
157 def joint_shape(*args):
158 """Given a set of arrays, return the joint shape.
159
160 Parameters
161 ----------
162 args : array-likes
163
164 Returns
165 -------
166 tuple of int
167 Joint shape.
168 """
169 if len(args) == 0:
170 return ()
171 shape = []
172 shapes = [a.shape for a in args]
173 ndim = args[0].ndim
174 for i in range(ndim):
175 shape.append(max([s[i] for s in shapes]))
176 return tuple(shape)
177
178
179 def orthogonal(*args):
180 """Determine if a set of arrays are orthogonal.
181
182 Parameters
183 ----------
184 args : array-likes or array shapes
185
186 Returns
187 -------
188 bool
189 Array orthogonality condition.
190 """
191 for i, arg in enumerate(args):
192 if hasattr(arg, "shape"):
193 args[i] = arg.shape
194 for s in zip(*args):
195 if np.product(s) != max(s):
196 return False
197 return True
198
199
200 def remove_nans_1D(*args):
201 """Remove nans in a set of 1D arrays.
202
203 Removes indicies in all arrays if any array is nan at that index.
204 All input arrays must have the same size.
205
206 Parameters
207 ----------
208 args : 1D arrays
209
210 Returns
211 -------
212 tuple
213 Tuple of 1D arrays in same order as given, with nan indicies removed.
214 """
215 vals = np.isnan(args[0])
216 for a in args:
217 vals |= np.isnan(a)
218 return tuple(np.array(a)[vals == False] for a in args)
219
220
221 def share_nans(*arrs):
222 """Take a list of nD arrays and return a new list of nD arrays.
223
224 The new list is in the same order as the old list.
225 If one indexed element in an old array is nan then every element for that
226 index in all new arrays in the list is then nan.
227
228 Parameters
229 ----------
230 *arrs : nD arrays.
231
232 Returns
233 -------
234 list
235 List of nD arrays in same order as given, with nan indicies syncronized.
236 """
237 nans = np.zeros(joint_shape(*arrs))
238 for arr in arrs:
239 nans *= arr
240 return tuple([a + nans for a in arrs])
241
242
243 def smooth_1D(arr, n=10):
244 """Smooth 1D data by 'running average'.
245
246 Parameters
247 ----------
248 n : int
249 number of points to average
250 """
251 for i in range(n, len(arr) - n):
252 window = arr[i - n : i + n].copy()
253 arr[i] = window.mean()
254 return arr
255
256
257 def unique(arr, tolerance=1e-6):
258 """Return unique elements in 1D array, within tolerance.
259
260 Parameters
261 ----------
262 arr : array_like
263 Input array. This will be flattened if it is not already 1D.
264 tolerance : number (optional)
265 The tolerance for uniqueness.
266
267 Returns
268 -------
269 array
270 The sorted unique values.
271 """
272 arr = sorted(arr.flatten())
273 unique = []
274 while len(arr) > 0:
275 current = arr[0]
276 lis = [xi for xi in arr if np.abs(current - xi) < tolerance]
277 arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]
278 xi_lis_average = sum(lis) / len(lis)
279 unique.append(xi_lis_average)
280 return np.array(unique)
281
282
283 def valid_index(index, shape):
284 """Get a valid index for a broadcastable shape.
285
286 Parameters
287 ----------
288 index : tuple
289 Given index.
290 shape : tuple of int
291 Shape.
292
293 Returns
294 -------
295 tuple
296 Valid index.
297 """
298 # append slices to index
299 index = list(index)
300 while len(index) < len(shape):
301 index.append(slice(None))
302 # fill out, in reverse
303 out = []
304 for i, s in zip(index[::-1], shape[::-1]):
305 if s == 1:
306 if isinstance(i, slice):
307 out.append(slice(None))
308 else:
309 out.append(0)
310 else:
311 out.append(i)
312 return tuple(out[::-1])
313
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -21,6 +21,7 @@
"remove_nans_1D",
"share_nans",
"smooth_1D",
+ "svd",
"unique",
"valid_index",
]
@@ -215,7 +216,7 @@
vals = np.isnan(args[0])
for a in args:
vals |= np.isnan(a)
- return tuple(np.array(a)[vals == False] for a in args)
+ return tuple(np.array(a)[~vals] for a in args)
def share_nans(*arrs):
@@ -254,6 +255,33 @@
return arr
+def svd(a, i=None):
+ """Singular Value Decomposition.
+
+ Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
+ are unitary and `s` is a 1D array of `a`'s singular values.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ i : int or slice (optional)
+ What singular value "slice" to return.
+ Default is None which returns unitary 2D arrays.
+
+ Returns
+ -------
+ tuple
+ Decomposed arrays in order `u`, `v`, `s`
+ """
+ u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)
+ u = u.T
+ if i is None:
+ return u, v, s
+ else:
+ return u[i], v[i], s[i]
+
+
def unique(arr, tolerance=1e-6):
"""Return unique elements in 1D array, within tolerance.
|
{"golden_diff": "diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py\n--- a/WrightTools/kit/_array.py\n+++ b/WrightTools/kit/_array.py\n@@ -21,6 +21,7 @@\n \"remove_nans_1D\",\n \"share_nans\",\n \"smooth_1D\",\n+ \"svd\",\n \"unique\",\n \"valid_index\",\n ]\n@@ -215,7 +216,7 @@\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n- return tuple(np.array(a)[vals == False] for a in args)\n+ return tuple(np.array(a)[~vals] for a in args)\n \n \n def share_nans(*arrs):\n@@ -254,6 +255,33 @@\n return arr\n \n \n+def svd(a, i=None):\n+ \"\"\"Singular Value Decomposition.\n+\n+ Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n+ are unitary and `s` is a 1D array of `a`'s singular values.\n+\n+ Parameters\n+ ----------\n+ a : array_like\n+ Input array.\n+ i : int or slice (optional)\n+ What singular value \"slice\" to return.\n+ Default is None which returns unitary 2D arrays.\n+\n+ Returns\n+ -------\n+ tuple\n+ Decomposed arrays in order `u`, `v`, `s`\n+ \"\"\"\n+ u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)\n+ u = u.T\n+ if i is None:\n+ return u, v, s\n+ else:\n+ return u[i], v[i], s[i]\n+\n+\n def unique(arr, tolerance=1e-6):\n \"\"\"Return unique elements in 1D array, within tolerance.\n", "issue": "Add simple SVD tool to kit\nI find myself doing simple SVDs a lot. We should add a tool to `kit` and then plan to add multidimensional/data support later.\r\n\r\nexample of what this method might look like:\r\n```\r\ndef SVD_doer(arr, i=None):\r\n u, s, v = np.linalg.svd(arr , full_matrices=False, compute_uv=True)\r\n u = u.T\r\n if i == None:\r\n return u, v, s\r\n else:\r\n return u[i], v[i], s[i]\r\n```\n", "before_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\n \"closest_pair\",\n \"diff\",\n \"fft\",\n \"joint_shape\",\n \"orthogonal\",\n \"remove_nans_1D\",\n \"share_nans\",\n \"smooth_1D\",\n \"unique\",\n \"valid_index\",\n]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give=\"indicies\"):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == \"indicies\":\n return outs\n elif give == \"distance\":\n return min_dist\n else:\n raise KeyError(\"give not recognized in closest_pair\")\n\n\ndef diff(xi, yi, order=1):\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0):\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError(\"WrightTools.kit.fft: argument xi must be evenly spaced\")\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args):\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args):\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, \"shape\"):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args):\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n return tuple(np.array(a)[vals == False] for a in args)\n\n\ndef share_nans(*arrs):\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros(joint_shape(*arrs))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10):\n \"\"\"Smooth 1D data by 'running average'.\n\n Parameters\n ----------\n n : int\n number of points to average\n \"\"\"\n for i in range(n, len(arr) - n):\n window = arr[i - n : i + n].copy()\n arr[i] = window.mean()\n return arr\n\n\ndef unique(arr, tolerance=1e-6):\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape):\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n", "path": "WrightTools/kit/_array.py"}], "after_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\n \"closest_pair\",\n \"diff\",\n \"fft\",\n \"joint_shape\",\n \"orthogonal\",\n \"remove_nans_1D\",\n \"share_nans\",\n \"smooth_1D\",\n \"svd\",\n \"unique\",\n \"valid_index\",\n]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give=\"indicies\"):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == \"indicies\":\n return outs\n elif give == \"distance\":\n return min_dist\n else:\n raise KeyError(\"give not recognized in closest_pair\")\n\n\ndef diff(xi, yi, order=1):\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0):\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError(\"WrightTools.kit.fft: argument xi must be evenly spaced\")\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args):\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args):\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, \"shape\"):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args):\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n return tuple(np.array(a)[~vals] for a in args)\n\n\ndef share_nans(*arrs):\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros(joint_shape(*arrs))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10):\n \"\"\"Smooth 1D data by 'running average'.\n\n Parameters\n ----------\n n : int\n number of points to average\n \"\"\"\n for i in range(n, len(arr) - n):\n window = arr[i - n : i + n].copy()\n arr[i] = window.mean()\n return arr\n\n\ndef svd(a, i=None):\n \"\"\"Singular Value Decomposition.\n\n Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n are unitary and `s` is a 1D array of `a`'s singular values.\n\n Parameters\n ----------\n a : array_like\n Input array.\n i : int or slice (optional)\n What singular value \"slice\" to return.\n Default is None which returns unitary 2D arrays.\n\n Returns\n -------\n tuple\n Decomposed arrays in order `u`, `v`, `s`\n \"\"\"\n u, s, v = np.linalg.svd(a, full_matrices=False, compute_uv=True)\n u = u.T\n if i is None:\n return u, v, s\n else:\n return u[i], v[i], s[i]\n\n\ndef unique(arr, tolerance=1e-6):\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape):\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n", "path": "WrightTools/kit/_array.py"}]}
| 3,255 | 436 |
gh_patches_debug_785
|
rasdani/github-patches
|
git_diff
|
translate__pootle-5666
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Silence dev checks in the admin UI as well
There's the following section in the Pootle dev config:
``` Python
# Silence the DEBUG check on dev servers
SILENCED_SYSTEM_CHECKS = [
'pootle.W004', # python-levenstein not installed
'pootle.W005', # DEBUG = True
'pootle.W010', # DEFAULT_FROM_EMAIL has default setting
'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting
]
```
When running Pootle, I still get these notifications in the admin UI among others:
> `/!\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings
>
> `/!\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form.
>
> `/!\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings.
I think it might make sense to silence them consistently not only in the console, but in the admin UI as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/views/admin/dashboard.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import locale
10 import os
11
12 from redis.exceptions import ConnectionError
13
14 from django.contrib.auth import get_user_model
15 from django.core.cache import cache
16 from django.shortcuts import render
17
18 from django_rq.queues import get_failed_queue, get_queue
19 from django_rq.workers import Worker
20
21 from pootle.core.decorators import admin_required
22 from pootle.i18n.gettext import ugettext as _, ungettext
23 from pootle_statistics.models import Submission
24 from pootle_store.models import Suggestion
25
26
27 def _format_numbers(numbers):
28 for k in numbers.keys():
29 formatted_number = locale.format("%d", numbers[k], grouping=True)
30 # Under Windows, formatted number must be converted to Unicode
31 if os.name == 'nt':
32 formatted_number = formatted_number.decode(
33 locale.getpreferredencoding()
34 )
35 numbers[k] = formatted_number
36
37
38 def server_stats():
39 User = get_user_model()
40 result = cache.get("server_stats")
41 if result is None:
42 result = {}
43 result['user_count'] = max(User.objects.filter(
44 is_active=True).count()-2, 0)
45 # 'default' and 'nobody' might be counted
46 # FIXME: the special users should not be retuned with is_active
47 result['submission_count'] = Submission.objects.count()
48 result['pending_count'] = Suggestion.objects.pending().count()
49 cache.set("server_stats", result, 86400)
50 _format_numbers(result)
51 return result
52
53
54 def rq_stats():
55 queue = get_queue()
56 failed_queue = get_failed_queue()
57 try:
58 workers = Worker.all(queue.connection)
59 except ConnectionError:
60 return None
61
62 num_workers = len(workers)
63 is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0
64 if is_running:
65 # Translators: this refers to the status of the background job worker
66 status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',
67 num_workers) % num_workers
68 else:
69 # Translators: this refers to the status of the background job worker
70 status_msg = _('Stopped')
71
72 result = {
73 'job_count': queue.count,
74 'failed_job_count': failed_queue.count,
75 'is_running': is_running,
76 'status_msg': status_msg,
77 }
78
79 return result
80
81
82 def checks():
83 from django.core.checks.registry import registry
84
85 return registry.run_checks()
86
87
88 @admin_required
89 def view(request):
90 ctx = {
91 'page': 'admin-dashboard',
92 'server_stats': server_stats(),
93 'rq_stats': rq_stats(),
94 'checks': checks(),
95 }
96 return render(request, "admin/dashboard.html", ctx)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py
--- a/pootle/apps/pootle_app/views/admin/dashboard.py
+++ b/pootle/apps/pootle_app/views/admin/dashboard.py
@@ -82,7 +82,7 @@
def checks():
from django.core.checks.registry import registry
- return registry.run_checks()
+ return [e for e in registry.run_checks() if not e.is_silenced()]
@admin_required
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py\n--- a/pootle/apps/pootle_app/views/admin/dashboard.py\n+++ b/pootle/apps/pootle_app/views/admin/dashboard.py\n@@ -82,7 +82,7 @@\n def checks():\n from django.core.checks.registry import registry\n \n- return registry.run_checks()\n+ return [e for e in registry.run_checks() if not e.is_silenced()]\n \n \n @admin_required\n", "issue": "Silence dev checks in the admin UI as well\nThere's the following section in the Pootle dev config:\n\n``` Python\n# Silence the DEBUG check on dev servers\nSILENCED_SYSTEM_CHECKS = [\n 'pootle.W004', # python-levenstein not installed\n 'pootle.W005', # DEBUG = True\n 'pootle.W010', # DEFAULT_FROM_EMAIL has default setting\n 'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting\n]\n```\n\nWhen running Pootle, I still get these notifications in the admin UI among others:\n\n> `/!\\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings\n> \n> `/!\\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form.\n> \n> `/!\\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings.\n\nI think it might make sense to silence them consistently not only in the console, but in the admin UI as well.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\nimport os\n\nfrom redis.exceptions import ConnectionError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.shortcuts import render\n\nfrom django_rq.queues import get_failed_queue, get_queue\nfrom django_rq.workers import Worker\n\nfrom pootle.core.decorators import admin_required\nfrom pootle.i18n.gettext import ugettext as _, ungettext\nfrom pootle_statistics.models import Submission\nfrom pootle_store.models import Suggestion\n\n\ndef _format_numbers(numbers):\n for k in numbers.keys():\n formatted_number = locale.format(\"%d\", numbers[k], grouping=True)\n # Under Windows, formatted number must be converted to Unicode\n if os.name == 'nt':\n formatted_number = formatted_number.decode(\n locale.getpreferredencoding()\n )\n numbers[k] = formatted_number\n\n\ndef server_stats():\n User = get_user_model()\n result = cache.get(\"server_stats\")\n if result is None:\n result = {}\n result['user_count'] = max(User.objects.filter(\n is_active=True).count()-2, 0)\n # 'default' and 'nobody' might be counted\n # FIXME: the special users should not be retuned with is_active\n result['submission_count'] = Submission.objects.count()\n result['pending_count'] = Suggestion.objects.pending().count()\n cache.set(\"server_stats\", result, 86400)\n _format_numbers(result)\n return result\n\n\ndef rq_stats():\n queue = get_queue()\n failed_queue = get_failed_queue()\n try:\n workers = Worker.all(queue.connection)\n except ConnectionError:\n return None\n\n num_workers = len(workers)\n is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0\n if is_running:\n # Translators: this refers to the status of the background job worker\n status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',\n num_workers) % num_workers\n else:\n # Translators: this refers to the status of the background job worker\n status_msg = _('Stopped')\n\n result = {\n 'job_count': queue.count,\n 'failed_job_count': failed_queue.count,\n 'is_running': is_running,\n 'status_msg': status_msg,\n }\n\n return result\n\n\ndef checks():\n from django.core.checks.registry import registry\n\n return registry.run_checks()\n\n\n@admin_required\ndef view(request):\n ctx = {\n 'page': 'admin-dashboard',\n 'server_stats': server_stats(),\n 'rq_stats': rq_stats(),\n 'checks': checks(),\n }\n return render(request, \"admin/dashboard.html\", ctx)\n", "path": "pootle/apps/pootle_app/views/admin/dashboard.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\nimport os\n\nfrom redis.exceptions import ConnectionError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.shortcuts import render\n\nfrom django_rq.queues import get_failed_queue, get_queue\nfrom django_rq.workers import Worker\n\nfrom pootle.core.decorators import admin_required\nfrom pootle.i18n.gettext import ugettext as _, ungettext\nfrom pootle_statistics.models import Submission\nfrom pootle_store.models import Suggestion\n\n\ndef _format_numbers(numbers):\n for k in numbers.keys():\n formatted_number = locale.format(\"%d\", numbers[k], grouping=True)\n # Under Windows, formatted number must be converted to Unicode\n if os.name == 'nt':\n formatted_number = formatted_number.decode(\n locale.getpreferredencoding()\n )\n numbers[k] = formatted_number\n\n\ndef server_stats():\n User = get_user_model()\n result = cache.get(\"server_stats\")\n if result is None:\n result = {}\n result['user_count'] = max(User.objects.filter(\n is_active=True).count()-2, 0)\n # 'default' and 'nobody' might be counted\n # FIXME: the special users should not be retuned with is_active\n result['submission_count'] = Submission.objects.count()\n result['pending_count'] = Suggestion.objects.pending().count()\n cache.set(\"server_stats\", result, 86400)\n _format_numbers(result)\n return result\n\n\ndef rq_stats():\n queue = get_queue()\n failed_queue = get_failed_queue()\n try:\n workers = Worker.all(queue.connection)\n except ConnectionError:\n return None\n\n num_workers = len(workers)\n is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0\n if is_running:\n # Translators: this refers to the status of the background job worker\n status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',\n num_workers) % num_workers\n else:\n # Translators: this refers to the status of the background job worker\n status_msg = _('Stopped')\n\n result = {\n 'job_count': queue.count,\n 'failed_job_count': failed_queue.count,\n 'is_running': is_running,\n 'status_msg': status_msg,\n }\n\n return result\n\n\ndef checks():\n from django.core.checks.registry import registry\n\n return [e for e in registry.run_checks() if not e.is_silenced()]\n\n\n@admin_required\ndef view(request):\n ctx = {\n 'page': 'admin-dashboard',\n 'server_stats': server_stats(),\n 'rq_stats': rq_stats(),\n 'checks': checks(),\n }\n return render(request, \"admin/dashboard.html\", ctx)\n", "path": "pootle/apps/pootle_app/views/admin/dashboard.py"}]}
| 1,387 | 117 |
gh_patches_debug_3666
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-245
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Bower for JS, CSS dependencies
We need something to manage our JavaScript (and perhaps CSS) dependencies and this tool is called [Bower](http://bower.io).
@wking [wrote](https://github.com/swcarpentry/amy/pull/239#issuecomment-89585714) some handy instructions on getting Bower up and running.
Bower packages we'd use for now:
- `bootstrap`
- `bootstrap-datepicker`
(This should land after #239)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/settings.py`
Content:
```
1 """
2 Django settings for amy project.
3
4 For more information on this file, see
5 https://docs.djangoproject.com/en/1.7/topics/settings/
6
7 For the full list of settings and their values, see
8 https://docs.djangoproject.com/en/1.7/ref/settings/
9 """
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 import os
13 import json
14
15 from django.conf import global_settings
16
17 BASE_DIR = os.path.dirname(os.path.dirname(__file__))
18
19
20 # Quick-start development settings - unsuitable for production
21 # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
22
23
24 # SECURITY WARNING: don't run with DEBUG turned on in production!
25 DEBUG = json.loads(os.environ.get('AMY_DEBUG', 'true'))
26 # For deployment in production:
27 # AMY_DEBUG=false AMY_SECRET_KEY="..." ./manage.py runserver ...
28
29 if DEBUG:
30 SECRET_KEY = '3l$35+@a%g!(^y^98oi%ei+%+yvtl3y0k^_7-fmx2oj09-ac5@'
31 else:
32 SECRET_KEY = None
33 SECRET_KEY = os.environ.get('AMY_SECRET_KEY', SECRET_KEY)
34
35
36 TEMPLATE_DEBUG = True
37
38 ALLOWED_HOSTS = [
39 'software-carpentry.org',
40 'software-carpentry.org.'
41 ]
42
43
44 # Application definition
45
46 INSTALLED_APPS = (
47 'django.contrib.auth',
48 'django.contrib.contenttypes',
49 'django.contrib.sessions',
50 'django.contrib.messages',
51 'django.contrib.staticfiles',
52 'workshops',
53 # this should be after 'workshops' because templates in
54 # 'templates/registration/' clash
55 'django.contrib.admin',
56 'crispy_forms',
57 )
58
59 CRISPY_TEMPLATE_PACK = 'bootstrap3'
60
61 MIDDLEWARE_CLASSES = (
62 'django.contrib.sessions.middleware.SessionMiddleware',
63 'django.middleware.common.CommonMiddleware',
64 'django.middleware.csrf.CsrfViewMiddleware',
65 'django.contrib.auth.middleware.AuthenticationMiddleware',
66 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
67 'django.contrib.messages.middleware.MessageMiddleware',
68 'django.middleware.clickjacking.XFrameOptionsMiddleware',
69 )
70
71 # WARNING: in Django 1.8 processors get moved from ``django.core`` to
72 # ``django.template``.
73 TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
74 "django.core.context_processors.request",
75 )
76
77 ROOT_URLCONF = 'amy.urls'
78
79 WSGI_APPLICATION = 'amy.wsgi.application'
80
81 from django.contrib.messages import constants as message_constants
82 MESSAGE_TAGS = {
83 message_constants.INFO: 'alert-info',
84 message_constants.SUCCESS: 'alert-success',
85 message_constants.WARNING: 'alert-warning',
86 message_constants.ERROR: 'alert-danger',
87 }
88
89
90 # Database
91 # https://docs.djangoproject.com/en/1.7/ref/settings/#databases
92
93 DATABASES = {
94 'default': {
95 'ENGINE': 'django.db.backends.sqlite3',
96 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
97 }
98 }
99
100 # Authentication
101
102 AUTH_USER_MODEL = 'workshops.Person'
103
104 # Internationalization
105 # https://docs.djangoproject.com/en/1.7/topics/i18n/
106
107 LANGUAGE_CODE = 'en-us'
108
109 TIME_ZONE = 'EST'
110
111 USE_I18N = True
112
113 USE_L10N = True
114
115 USE_TZ = True
116
117
118 # Static files (CSS, JavaScript, Images)
119 # https://docs.djangoproject.com/en/1.7/howto/static-files/
120
121 STATIC_URL = '/static/'
122
123 # Warn viewers of invalid template strings
124 TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'
125
126 # if "next" (or "?next") variable is not set when logging in, redirect to
127 # workshops
128 LOGIN_REDIRECT_URL = '/workshops/'
129
130 # here's where @login_required redirects to:
131 LOGIN_URL = '/account/login/'
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/amy/settings.py b/amy/settings.py
--- a/amy/settings.py
+++ b/amy/settings.py
@@ -119,6 +119,10 @@
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
+STATIC_ROOT = os.path.join(BASE_DIR, 'static')
+STATICFILES_DIRS = (
+ os.path.join(BASE_DIR, 'bower_components'),
+)
# Warn viewers of invalid template strings
TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'
|
{"golden_diff": "diff --git a/amy/settings.py b/amy/settings.py\n--- a/amy/settings.py\n+++ b/amy/settings.py\n@@ -119,6 +119,10 @@\n # https://docs.djangoproject.com/en/1.7/howto/static-files/\n \n STATIC_URL = '/static/'\n+STATIC_ROOT = os.path.join(BASE_DIR, 'static')\n+STATICFILES_DIRS = (\n+ os.path.join(BASE_DIR, 'bower_components'),\n+)\n \n # Warn viewers of invalid template strings\n TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'\n", "issue": "Use Bower for JS, CSS dependencies\nWe need something to manage our JavaScript (and perhaps CSS) dependencies and this tool is called [Bower](http://bower.io).\n\n@wking [wrote](https://github.com/swcarpentry/amy/pull/239#issuecomment-89585714) some handy instructions on getting Bower up and running.\n\nBower packages we'd use for now:\n- `bootstrap`\n- `bootstrap-datepicker`\n\n(This should land after #239)\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for amy project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport json\n\nfrom django.conf import global_settings\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n\n# SECURITY WARNING: don't run with DEBUG turned on in production!\nDEBUG = json.loads(os.environ.get('AMY_DEBUG', 'true'))\n# For deployment in production:\n# AMY_DEBUG=false AMY_SECRET_KEY=\"...\" ./manage.py runserver ...\n\nif DEBUG:\n SECRET_KEY = '3l$35+@a%g!(^y^98oi%ei+%+yvtl3y0k^_7-fmx2oj09-ac5@'\nelse:\n SECRET_KEY = None\nSECRET_KEY = os.environ.get('AMY_SECRET_KEY', SECRET_KEY)\n\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = [\n 'software-carpentry.org',\n 'software-carpentry.org.'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'workshops',\n # this should be after 'workshops' because templates in\n # 'templates/registration/' clash\n 'django.contrib.admin',\n 'crispy_forms',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# WARNING: in Django 1.8 processors get moved from ``django.core`` to\n# ``django.template``.\nTEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n \"django.core.context_processors.request\",\n)\n\nROOT_URLCONF = 'amy.urls'\n\nWSGI_APPLICATION = 'amy.wsgi.application'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.INFO: 'alert-info',\n message_constants.SUCCESS: 'alert-success',\n message_constants.WARNING: 'alert-warning',\n message_constants.ERROR: 'alert-danger',\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Authentication\n\nAUTH_USER_MODEL = 'workshops.Person'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'EST'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Warn viewers of invalid template strings\nTEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'\n\n# if \"next\" (or \"?next\") variable is not set when logging in, redirect to\n# workshops\nLOGIN_REDIRECT_URL = '/workshops/'\n\n# here's where @login_required redirects to:\nLOGIN_URL = '/account/login/'\n", "path": "amy/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for amy project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport json\n\nfrom django.conf import global_settings\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n\n# SECURITY WARNING: don't run with DEBUG turned on in production!\nDEBUG = json.loads(os.environ.get('AMY_DEBUG', 'true'))\n# For deployment in production:\n# AMY_DEBUG=false AMY_SECRET_KEY=\"...\" ./manage.py runserver ...\n\nif DEBUG:\n SECRET_KEY = '3l$35+@a%g!(^y^98oi%ei+%+yvtl3y0k^_7-fmx2oj09-ac5@'\nelse:\n SECRET_KEY = None\nSECRET_KEY = os.environ.get('AMY_SECRET_KEY', SECRET_KEY)\n\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = [\n 'software-carpentry.org',\n 'software-carpentry.org.'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'workshops',\n # this should be after 'workshops' because templates in\n # 'templates/registration/' clash\n 'django.contrib.admin',\n 'crispy_forms',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# WARNING: in Django 1.8 processors get moved from ``django.core`` to\n# ``django.template``.\nTEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n \"django.core.context_processors.request\",\n)\n\nROOT_URLCONF = 'amy.urls'\n\nWSGI_APPLICATION = 'amy.wsgi.application'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.INFO: 'alert-info',\n message_constants.SUCCESS: 'alert-success',\n message_constants.WARNING: 'alert-warning',\n message_constants.ERROR: 'alert-danger',\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Authentication\n\nAUTH_USER_MODEL = 'workshops.Person'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'EST'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'bower_components'),\n)\n\n# Warn viewers of invalid template strings\nTEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'\n\n# if \"next\" (or \"?next\") variable is not set when logging in, redirect to\n# workshops\nLOGIN_REDIRECT_URL = '/workshops/'\n\n# here's where @login_required redirects to:\nLOGIN_URL = '/account/login/'\n", "path": "amy/settings.py"}]}
| 1,485 | 121 |
gh_patches_debug_4062
|
rasdani/github-patches
|
git_diff
|
fail2ban__fail2ban-2651
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python 3.9 threading changes
Copied from https://bugzilla.redhat.com/show_bug.cgi?id=1808347
Bug ID: 1808347
Summary: fail2ban fails to build with Python 3.9: uses removed
threading.Thread.isAlive()
Product: Fedora
Version: rawhide
Status: NEW
Component: fail2ban
Assignee: orion@nwra.com
Reporter: mhroncok@redhat.com
QA Contact: extras-qa@fedoraproject.org
CC: anon.amish@gmail.com, axel.thimm@atrpms.net,
cstratak@redhat.com, mhroncok@redhat.com,
mplch@redhat.com, orion@nwra.com, vonsch@gmail.com
Blocks: 1785415 (PYTHON39)
Target Milestone: ---
Classification: Fedora
fail2ban fails to build with Python 3.9.0a4.
```
ERROR: test_del_file
(fail2ban.tests.filtertestcase.MonitorFailures<FilterPoll>(/tmp/monitorfailures_FilterPoll__nshi9c2fail2ban))
----------------------------------------------------------------------
Traceback (most recent call last):
File "./fail2ban/tests/filtertestcase.py", line 987, in setUp
self._sleep_4_poll()
File "./fail2ban/tests/filtertestcase.py", line 1008, in _sleep_4_poll
Utils.wait_for(self.filter.isAlive, _maxWaitTime(5))
AttributeError: 'FilterPoll' object has no attribute 'isAlive'
```
And more. This looks like:
See https://docs.python.org/3.9/whatsnew/3.9.html#removed
"The isAlive() method of threading.Thread has been removed. It was deprecated
since Python 3.8. Use is_alive() instead."
For the build logs, see:
https://copr-be.cloud.fedoraproject.org/results/@python/python3.9/fedora-rawhide-x86_64/01255081-fail2ban/
For all our attempts to build fail2ban with Python 3.9, see:
https://copr.fedorainfracloud.org/coprs/g/python/python3.9/package/fail2ban/
Testing and mass rebuild of packages is happening in copr. You can follow these
instructions to test locally in mock if your package builds with Python 3.9:
https://copr.fedorainfracloud.org/coprs/g/python/python3.9/
Let us know here if you have any questions.
Python 3.9 will be included in Fedora 33. To make that update smoother, we're
building Fedora packages with early pre-releases of Python 3.9.
A build failure prevents us from testing all dependent packages (transitive
[Build]Requires), so if this package is required a lot, it's important for us
to get it fixed soon.
We'd appreciate help from the people who know this package best, but if you
don't want to work on this now, let us know so we can try to work around it on
our side.
Referenced Bugs:
https://bugzilla.redhat.com/show_bug.cgi?id=1785415
[Bug 1785415] Python 3.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fail2ban/server/jailthread.py`
Content:
```
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 noet :
3
4 # This file is part of Fail2Ban.
5 #
6 # Fail2Ban is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # Fail2Ban is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Fail2Ban; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19
20 # Author: Cyril Jaquier
21 #
22
23 __author__ = "Cyril Jaquier"
24 __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
25 __license__ = "GPL"
26
27 import sys
28 from threading import Thread
29 from abc import abstractmethod
30
31 from .utils import Utils
32 from ..helpers import excepthook, prctl_set_th_name
33
34
35 class JailThread(Thread):
36 """Abstract class for threading elements in Fail2Ban.
37
38 Attributes
39 ----------
40 daemon
41 ident
42 name
43 status
44 active : bool
45 Control the state of the thread.
46 idle : bool
47 Control the idle state of the thread.
48 sleeptime : int
49 The time the thread sleeps for in the loop.
50 """
51
52 def __init__(self, name=None):
53 super(JailThread, self).__init__(name=name)
54 ## Should going with main thread also:
55 self.daemon = True
56 ## Control the state of the thread (None - was not started, True - active, False - stopped).
57 self.active = None
58 ## Control the idle state of the thread.
59 self.idle = False
60 ## The time the thread sleeps in the loop.
61 self.sleeptime = Utils.DEFAULT_SLEEP_TIME
62
63 # excepthook workaround for threads, derived from:
64 # http://bugs.python.org/issue1230540#msg91244
65 run = self.run
66
67 def run_with_except_hook(*args, **kwargs):
68 try:
69 run(*args, **kwargs)
70 except Exception as e:
71 # avoid very sporadic error "'NoneType' object has no attribute 'exc_info'" (https://bugs.python.org/issue7336)
72 # only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.
73 if sys is not None:
74 excepthook(*sys.exc_info())
75 else:
76 print(e)
77 self.run = run_with_except_hook
78
79 if sys.version_info >= (3,): # pragma: 2.x no cover
80 def _bootstrap(self):
81 prctl_set_th_name(self.name)
82 return super(JailThread, self)._bootstrap();
83 else: # pragma: 3.x no cover
84 def __bootstrap(self):
85 prctl_set_th_name(self.name)
86 return Thread._Thread__bootstrap(self)
87
88 @abstractmethod
89 def status(self, flavor="basic"): # pragma: no cover - abstract
90 """Abstract - Should provide status information.
91 """
92 pass
93
94 def start(self):
95 """Sets active flag and starts thread.
96 """
97 self.active = True
98 super(JailThread, self).start()
99
100 def stop(self):
101 """Sets `active` property to False, to flag run method to return.
102 """
103 self.active = False
104
105 @abstractmethod
106 def run(self): # pragma: no cover - absract
107 """Abstract - Called when thread starts, thread stops when returns.
108 """
109 pass
110
111 def join(self):
112 """ Safer join, that could be called also for not started (or ended) threads (used for cleanup).
113 """
114 ## if cleanup needed - create derivate and call it before join...
115
116 ## if was really started - should call join:
117 if self.active is not None:
118 super(JailThread, self).join()
119
120 ## python 2.x replace binding of private __bootstrap method:
121 if sys.version_info < (3,): # pragma: 3.x no cover
122 JailThread._Thread__bootstrap = JailThread._JailThread__bootstrap
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/fail2ban/server/jailthread.py b/fail2ban/server/jailthread.py
--- a/fail2ban/server/jailthread.py
+++ b/fail2ban/server/jailthread.py
@@ -120,3 +120,6 @@
## python 2.x replace binding of private __bootstrap method:
if sys.version_info < (3,): # pragma: 3.x no cover
JailThread._Thread__bootstrap = JailThread._JailThread__bootstrap
+## python 3.9, restore isAlive method:
+elif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover
+ JailThread.isAlive = JailThread.is_alive
|
{"golden_diff": "diff --git a/fail2ban/server/jailthread.py b/fail2ban/server/jailthread.py\n--- a/fail2ban/server/jailthread.py\n+++ b/fail2ban/server/jailthread.py\n@@ -120,3 +120,6 @@\n ## python 2.x replace binding of private __bootstrap method:\n if sys.version_info < (3,): # pragma: 3.x no cover\n \tJailThread._Thread__bootstrap = JailThread._JailThread__bootstrap\n+## python 3.9, restore isAlive method:\n+elif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover\n+\t JailThread.isAlive = JailThread.is_alive\n", "issue": "python 3.9 threading changes\nCopied from https://bugzilla.redhat.com/show_bug.cgi?id=1808347\r\n\r\n Bug ID: 1808347\r\n Summary: fail2ban fails to build with Python 3.9: uses removed\r\n threading.Thread.isAlive()\r\n Product: Fedora\r\n Version: rawhide\r\n Status: NEW\r\n Component: fail2ban\r\n Assignee: orion@nwra.com\r\n Reporter: mhroncok@redhat.com\r\n QA Contact: extras-qa@fedoraproject.org\r\n CC: anon.amish@gmail.com, axel.thimm@atrpms.net,\r\n cstratak@redhat.com, mhroncok@redhat.com,\r\n mplch@redhat.com, orion@nwra.com, vonsch@gmail.com\r\n Blocks: 1785415 (PYTHON39)\r\n Target Milestone: ---\r\n Classification: Fedora\r\n\r\nfail2ban fails to build with Python 3.9.0a4.\r\n```\r\nERROR: test_del_file\r\n(fail2ban.tests.filtertestcase.MonitorFailures<FilterPoll>(/tmp/monitorfailures_FilterPoll__nshi9c2fail2ban))\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"./fail2ban/tests/filtertestcase.py\", line 987, in setUp\r\n self._sleep_4_poll()\r\n File \"./fail2ban/tests/filtertestcase.py\", line 1008, in _sleep_4_poll\r\n Utils.wait_for(self.filter.isAlive, _maxWaitTime(5))\r\nAttributeError: 'FilterPoll' object has no attribute 'isAlive'\r\n```\r\n\r\nAnd more. This looks like:\r\n\r\nSee https://docs.python.org/3.9/whatsnew/3.9.html#removed\r\n\r\n\"The isAlive() method of threading.Thread has been removed. It was deprecated\r\nsince Python 3.8. Use is_alive() instead.\"\r\n\r\nFor the build logs, see:\r\nhttps://copr-be.cloud.fedoraproject.org/results/@python/python3.9/fedora-rawhide-x86_64/01255081-fail2ban/\r\n\r\nFor all our attempts to build fail2ban with Python 3.9, see:\r\nhttps://copr.fedorainfracloud.org/coprs/g/python/python3.9/package/fail2ban/\r\n\r\nTesting and mass rebuild of packages is happening in copr. You can follow these\r\ninstructions to test locally in mock if your package builds with Python 3.9:\r\nhttps://copr.fedorainfracloud.org/coprs/g/python/python3.9/\r\n\r\nLet us know here if you have any questions.\r\n\r\nPython 3.9 will be included in Fedora 33. To make that update smoother, we're\r\nbuilding Fedora packages with early pre-releases of Python 3.9.\r\nA build failure prevents us from testing all dependent packages (transitive\r\n[Build]Requires), so if this package is required a lot, it's important for us\r\nto get it fixed soon.\r\nWe'd appreciate help from the people who know this package best, but if you\r\ndon't want to work on this now, let us know so we can try to work around it on\r\nour side.\r\n\r\n\r\nReferenced Bugs:\r\n\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=1785415\r\n[Bug 1785415] Python 3.9\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n# \n\n__author__ = \"Cyril Jaquier\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\n\nimport sys\nfrom threading import Thread\nfrom abc import abstractmethod\n\nfrom .utils import Utils\nfrom ..helpers import excepthook, prctl_set_th_name\n\n\nclass JailThread(Thread):\n\t\"\"\"Abstract class for threading elements in Fail2Ban.\n\n\tAttributes\n\t----------\n\tdaemon\n\tident\n\tname\n\tstatus\n\tactive : bool\n\t\tControl the state of the thread.\n\tidle : bool\n\t\tControl the idle state of the thread.\n\tsleeptime : int\n\t\tThe time the thread sleeps for in the loop.\n\t\"\"\"\n\n\tdef __init__(self, name=None):\n\t\tsuper(JailThread, self).__init__(name=name)\n\t\t## Should going with main thread also:\n\t\tself.daemon = True\n\t\t## Control the state of the thread (None - was not started, True - active, False - stopped).\n\t\tself.active = None\n\t\t## Control the idle state of the thread.\n\t\tself.idle = False\n\t\t## The time the thread sleeps in the loop.\n\t\tself.sleeptime = Utils.DEFAULT_SLEEP_TIME\n\n\t\t# excepthook workaround for threads, derived from:\n\t\t# http://bugs.python.org/issue1230540#msg91244\n\t\trun = self.run\n\n\t\tdef run_with_except_hook(*args, **kwargs):\n\t\t\ttry:\n\t\t\t\trun(*args, **kwargs)\n\t\t\texcept Exception as e:\n\t\t\t\t# avoid very sporadic error \"'NoneType' object has no attribute 'exc_info'\" (https://bugs.python.org/issue7336)\n\t\t\t\t# only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.\n\t\t\t\tif sys is not None:\n\t\t\t\t\texcepthook(*sys.exc_info())\n\t\t\t\telse:\n\t\t\t\t\tprint(e)\n\t\tself.run = run_with_except_hook\n\n\tif sys.version_info >= (3,): # pragma: 2.x no cover\n\t\tdef _bootstrap(self):\n\t\t\tprctl_set_th_name(self.name)\n\t\t\treturn super(JailThread, self)._bootstrap();\n\telse: # pragma: 3.x no cover\n\t\tdef __bootstrap(self):\n\t\t\tprctl_set_th_name(self.name)\n\t\t\treturn Thread._Thread__bootstrap(self)\n\n\t@abstractmethod\n\tdef status(self, flavor=\"basic\"): # pragma: no cover - abstract\n\t\t\"\"\"Abstract - Should provide status information.\n\t\t\"\"\"\n\t\tpass\n\n\tdef start(self):\n\t\t\"\"\"Sets active flag and starts thread.\n\t\t\"\"\"\n\t\tself.active = True\n\t\tsuper(JailThread, self).start()\n\n\tdef stop(self):\n\t\t\"\"\"Sets `active` property to False, to flag run method to return.\n\t\t\"\"\"\n\t\tself.active = False\n\n\t@abstractmethod\n\tdef run(self): # pragma: no cover - absract\n\t\t\"\"\"Abstract - Called when thread starts, thread stops when returns.\n\t\t\"\"\"\n\t\tpass\n\n\tdef join(self):\n\t\t\"\"\" Safer join, that could be called also for not started (or ended) threads (used for cleanup).\n\t\t\"\"\"\n\t\t## if cleanup needed - create derivate and call it before join...\n\n\t\t## if was really started - should call join:\n\t\tif self.active is not None:\n\t\t\tsuper(JailThread, self).join()\n\n## python 2.x replace binding of private __bootstrap method:\nif sys.version_info < (3,): # pragma: 3.x no cover\n\tJailThread._Thread__bootstrap = JailThread._JailThread__bootstrap\n", "path": "fail2ban/server/jailthread.py"}], "after_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n# \n\n__author__ = \"Cyril Jaquier\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\n\nimport sys\nfrom threading import Thread\nfrom abc import abstractmethod\n\nfrom .utils import Utils\nfrom ..helpers import excepthook, prctl_set_th_name\n\n\nclass JailThread(Thread):\n\t\"\"\"Abstract class for threading elements in Fail2Ban.\n\n\tAttributes\n\t----------\n\tdaemon\n\tident\n\tname\n\tstatus\n\tactive : bool\n\t\tControl the state of the thread.\n\tidle : bool\n\t\tControl the idle state of the thread.\n\tsleeptime : int\n\t\tThe time the thread sleeps for in the loop.\n\t\"\"\"\n\n\tdef __init__(self, name=None):\n\t\tsuper(JailThread, self).__init__(name=name)\n\t\t## Should going with main thread also:\n\t\tself.daemon = True\n\t\t## Control the state of the thread (None - was not started, True - active, False - stopped).\n\t\tself.active = None\n\t\t## Control the idle state of the thread.\n\t\tself.idle = False\n\t\t## The time the thread sleeps in the loop.\n\t\tself.sleeptime = Utils.DEFAULT_SLEEP_TIME\n\n\t\t# excepthook workaround for threads, derived from:\n\t\t# http://bugs.python.org/issue1230540#msg91244\n\t\trun = self.run\n\n\t\tdef run_with_except_hook(*args, **kwargs):\n\t\t\ttry:\n\t\t\t\trun(*args, **kwargs)\n\t\t\texcept Exception as e:\n\t\t\t\t# avoid very sporadic error \"'NoneType' object has no attribute 'exc_info'\" (https://bugs.python.org/issue7336)\n\t\t\t\t# only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.\n\t\t\t\tif sys is not None:\n\t\t\t\t\texcepthook(*sys.exc_info())\n\t\t\t\telse:\n\t\t\t\t\tprint(e)\n\t\tself.run = run_with_except_hook\n\n\tif sys.version_info >= (3,): # pragma: 2.x no cover\n\t\tdef _bootstrap(self):\n\t\t\tprctl_set_th_name(self.name)\n\t\t\treturn super(JailThread, self)._bootstrap();\n\telse: # pragma: 3.x no cover\n\t\tdef __bootstrap(self):\n\t\t\tprctl_set_th_name(self.name)\n\t\t\treturn Thread._Thread__bootstrap(self)\n\n\t@abstractmethod\n\tdef status(self, flavor=\"basic\"): # pragma: no cover - abstract\n\t\t\"\"\"Abstract - Should provide status information.\n\t\t\"\"\"\n\t\tpass\n\n\tdef start(self):\n\t\t\"\"\"Sets active flag and starts thread.\n\t\t\"\"\"\n\t\tself.active = True\n\t\tsuper(JailThread, self).start()\n\n\tdef stop(self):\n\t\t\"\"\"Sets `active` property to False, to flag run method to return.\n\t\t\"\"\"\n\t\tself.active = False\n\n\t@abstractmethod\n\tdef run(self): # pragma: no cover - absract\n\t\t\"\"\"Abstract - Called when thread starts, thread stops when returns.\n\t\t\"\"\"\n\t\tpass\n\n\tdef join(self):\n\t\t\"\"\" Safer join, that could be called also for not started (or ended) threads (used for cleanup).\n\t\t\"\"\"\n\t\t## if cleanup needed - create derivate and call it before join...\n\n\t\t## if was really started - should call join:\n\t\tif self.active is not None:\n\t\t\tsuper(JailThread, self).join()\n\n## python 2.x replace binding of private __bootstrap method:\nif sys.version_info < (3,): # pragma: 3.x no cover\n\tJailThread._Thread__bootstrap = JailThread._JailThread__bootstrap\n## python 3.9, restore isAlive method:\nelif not hasattr(JailThread, 'isAlive'): # pragma: 2.x no cover\n\t JailThread.isAlive = JailThread.is_alive\n", "path": "fail2ban/server/jailthread.py"}]}
| 2,314 | 157 |
gh_patches_debug_36040
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1088
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: update the contextily example in the gallery
The example about adding a background map to your plot using contextily at https://geopandas.readthedocs.io/en/latest/gallery/plotting_basemap_background.html#sphx-glr-gallery-plotting-basemap-background-py can be updated with the latest contextily (https://github.com/darribas/contextily/releases/tag/v1.0rc2).
For example, the `add_basemap` function is now available in contextily itself.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting_basemap_background.py`
Content:
```
1 """
2 Adding a background map to plots
3 --------------------------------
4
5 This example shows how you can add a background basemap to plots created
6 with the geopandas ``.plot()`` method. This makes use of the
7 `contextily <https://github.com/darribas/contextily>`__ package to retrieve
8 web map tiles from several sources (OpenStreetMap, Stamen).
9
10 """
11 # sphinx_gallery_thumbnail_number = 3
12 import geopandas
13
14 ###############################################################################
15 # Let's use the NYC borough boundary data that is available in geopandas
16 # datasets. Plotting this gives the following result:
17
18 df = geopandas.read_file(geopandas.datasets.get_path('nybb'))
19 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
20
21 ###############################################################################
22 # Convert the data to Web Mercator
23 # ================================
24 #
25 # Web map tiles are typically provided in
26 # `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__
27 # (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert
28 # our data first to the same CRS to combine our polygons and background tiles
29 # in the same map:
30
31 df = df.to_crs(epsg=3857)
32
33 ###############################################################################
34 # Contextily helper function
35 # ==========================
36 #
37 # We define a small helper function that uses
38 # `contextily <https://github.com/darribas/contextily>`__ to add a map
39 # as background to an existing plot:
40
41 import contextily as ctx
42
43 def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
44 xmin, xmax, ymin, ymax = ax.axis()
45 basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
46 ax.imshow(basemap, extent=extent, interpolation='bilinear')
47 # restore original x/y limits
48 ax.axis((xmin, xmax, ymin, ymax))
49
50 ###############################################################################
51 # Add background tiles to plot
52 # ============================
53 #
54 # Now we can use the above function to easily add a background map to our
55 # plot. The `zoom` keyword is required and let's you specify the detail of the
56 # map tiles (be careful to not specify a too high `zoom` level, as this can
57 # result in a large download):
58
59 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
60 add_basemap(ax, zoom=10)
61
62 ###############################################################################
63 # By default, contextily uses the Stamen Terrain style. We can specify a
64 # different style using ``ctx.sources``:
65
66 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
67 add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)
68 ax.set_axis_off()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/plotting_basemap_background.py b/examples/plotting_basemap_background.py
--- a/examples/plotting_basemap_background.py
+++ b/examples/plotting_basemap_background.py
@@ -1,5 +1,5 @@
"""
-Adding a background map to plots
+Adding a background map to plots
--------------------------------
This example shows how you can add a background basemap to plots created
@@ -31,38 +31,31 @@
df = df.to_crs(epsg=3857)
###############################################################################
-# Contextily helper function
-# ==========================
-#
-# We define a small helper function that uses
-# `contextily <https://github.com/darribas/contextily>`__ to add a map
-# as background to an existing plot:
import contextily as ctx
-def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
- xmin, xmax, ymin, ymax = ax.axis()
- basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
- ax.imshow(basemap, extent=extent, interpolation='bilinear')
- # restore original x/y limits
- ax.axis((xmin, xmax, ymin, ymax))
-
###############################################################################
# Add background tiles to plot
# ============================
#
-# Now we can use the above function to easily add a background map to our
-# plot. The `zoom` keyword is required and let's you specify the detail of the
-# map tiles (be careful to not specify a too high `zoom` level, as this can
-# result in a large download):
+# We can use `add_basemap` function of contextily to easily add a background
+# map to our plot. :
+
+ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
+ctx.add_basemap(ax)
+
+###############################################################################
+# We can control the detail of the map tiles using the optional `zoom` keyword
+# (be careful to not specify a too high `zoom` level,
+# as this can result in a large download).:
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
-add_basemap(ax, zoom=10)
+ctx.add_basemap(ax, zoom=12)
###############################################################################
# By default, contextily uses the Stamen Terrain style. We can specify a
-# different style using ``ctx.sources``:
+# different style using ``ctx.providers``:
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
-add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)
+ctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)
ax.set_axis_off()
|
{"golden_diff": "diff --git a/examples/plotting_basemap_background.py b/examples/plotting_basemap_background.py\n--- a/examples/plotting_basemap_background.py\n+++ b/examples/plotting_basemap_background.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Adding a background map to plots \n+Adding a background map to plots\n --------------------------------\n \n This example shows how you can add a background basemap to plots created\n@@ -31,38 +31,31 @@\n df = df.to_crs(epsg=3857)\n \n ###############################################################################\n-# Contextily helper function\n-# ==========================\n-#\n-# We define a small helper function that uses\n-# `contextily <https://github.com/darribas/contextily>`__ to add a map\n-# as background to an existing plot: \n \n import contextily as ctx\n \n-def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):\n- xmin, xmax, ymin, ymax = ax.axis()\n- basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)\n- ax.imshow(basemap, extent=extent, interpolation='bilinear')\n- # restore original x/y limits\n- ax.axis((xmin, xmax, ymin, ymax))\n-\n ###############################################################################\n # Add background tiles to plot\n # ============================\n #\n-# Now we can use the above function to easily add a background map to our\n-# plot. The `zoom` keyword is required and let's you specify the detail of the\n-# map tiles (be careful to not specify a too high `zoom` level, as this can\n-# result in a large download):\n+# We can use `add_basemap` function of contextily to easily add a background\n+# map to our plot. :\n+\n+ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n+ctx.add_basemap(ax)\n+\n+###############################################################################\n+# We can control the detail of the map tiles using the optional `zoom` keyword\n+# (be careful to not specify a too high `zoom` level,\n+# as this can result in a large download).:\n \n ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n-add_basemap(ax, zoom=10)\n+ctx.add_basemap(ax, zoom=12)\n \n ###############################################################################\n # By default, contextily uses the Stamen Terrain style. We can specify a\n-# different style using ``ctx.sources``:\n+# different style using ``ctx.providers``:\n \n ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n-add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)\n+ctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)\n ax.set_axis_off()\n", "issue": "DOC: update the contextily example in the gallery\nThe example about adding a background map to your plot using contextily at https://geopandas.readthedocs.io/en/latest/gallery/plotting_basemap_background.html#sphx-glr-gallery-plotting-basemap-background-py can be updated with the latest contextily (https://github.com/darribas/contextily/releases/tag/v1.0rc2).\r\n\r\nFor example, the `add_basemap` function is now available in contextily itself.\n", "before_files": [{"content": "\"\"\"\nAdding a background map to plots \n--------------------------------\n\nThis example shows how you can add a background basemap to plots created\nwith the geopandas ``.plot()`` method. This makes use of the\n`contextily <https://github.com/darribas/contextily>`__ package to retrieve\nweb map tiles from several sources (OpenStreetMap, Stamen).\n\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nimport geopandas\n\n###############################################################################\n# Let's use the NYC borough boundary data that is available in geopandas\n# datasets. Plotting this gives the following result:\n\ndf = geopandas.read_file(geopandas.datasets.get_path('nybb'))\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n\n###############################################################################\n# Convert the data to Web Mercator\n# ================================\n#\n# Web map tiles are typically provided in\n# `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__\n# (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert\n# our data first to the same CRS to combine our polygons and background tiles\n# in the same map:\n\ndf = df.to_crs(epsg=3857)\n\n###############################################################################\n# Contextily helper function\n# ==========================\n#\n# We define a small helper function that uses\n# `contextily <https://github.com/darribas/contextily>`__ to add a map\n# as background to an existing plot: \n\nimport contextily as ctx\n\ndef add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):\n xmin, xmax, ymin, ymax = ax.axis()\n basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)\n ax.imshow(basemap, extent=extent, interpolation='bilinear')\n # restore original x/y limits\n ax.axis((xmin, xmax, ymin, ymax))\n\n###############################################################################\n# Add background tiles to plot\n# ============================\n#\n# Now we can use the above function to easily add a background map to our\n# plot. The `zoom` keyword is required and let's you specify the detail of the\n# map tiles (be careful to not specify a too high `zoom` level, as this can\n# result in a large download):\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nadd_basemap(ax, zoom=10)\n\n###############################################################################\n# By default, contextily uses the Stamen Terrain style. We can specify a\n# different style using ``ctx.sources``:\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nadd_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)\nax.set_axis_off()\n", "path": "examples/plotting_basemap_background.py"}], "after_files": [{"content": "\"\"\"\nAdding a background map to plots\n--------------------------------\n\nThis example shows how you can add a background basemap to plots created\nwith the geopandas ``.plot()`` method. This makes use of the\n`contextily <https://github.com/darribas/contextily>`__ package to retrieve\nweb map tiles from several sources (OpenStreetMap, Stamen).\n\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nimport geopandas\n\n###############################################################################\n# Let's use the NYC borough boundary data that is available in geopandas\n# datasets. Plotting this gives the following result:\n\ndf = geopandas.read_file(geopandas.datasets.get_path('nybb'))\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n\n###############################################################################\n# Convert the data to Web Mercator\n# ================================\n#\n# Web map tiles are typically provided in\n# `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__\n# (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert\n# our data first to the same CRS to combine our polygons and background tiles\n# in the same map:\n\ndf = df.to_crs(epsg=3857)\n\n###############################################################################\n\nimport contextily as ctx\n\n###############################################################################\n# Add background tiles to plot\n# ============================\n#\n# We can use `add_basemap` function of contextily to easily add a background\n# map to our plot. :\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nctx.add_basemap(ax)\n\n###############################################################################\n# We can control the detail of the map tiles using the optional `zoom` keyword\n# (be careful to not specify a too high `zoom` level,\n# as this can result in a large download).:\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nctx.add_basemap(ax, zoom=12)\n\n###############################################################################\n# By default, contextily uses the Stamen Terrain style. We can specify a\n# different style using ``ctx.providers``:\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)\nax.set_axis_off()\n", "path": "examples/plotting_basemap_background.py"}]}
| 1,144 | 646 |
gh_patches_debug_49866
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4284
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set attendees as required relationship to Orders API
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/orders.py`
Content:
```
1 from datetime import datetime
2
3 from flask import request
4 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
5 from marshmallow_jsonapi.flask import Schema, Relationship
6 from marshmallow_jsonapi import fields
7 from marshmallow import post_dump, validates_schema, validate
8 from flask_jwt import current_identity as current_user
9
10 from app.api.bootstrap import api
11 from app.api.data_layers.ChargesLayer import ChargesLayer
12 from app.api.helpers.db import save_to_db, safe_query
13 from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity
14 from app.api.helpers.payment import PayPalPaymentsManager
15 from app.api.helpers.ticketing import TicketingManager
16 from app.api.helpers.permission_manager import has_access
17 from app.api.helpers.permissions import jwt_required
18 from app.api.helpers.utilities import dasherize, require_relationship
19 from app.models import db
20 from app.models.discount_code import DiscountCode, TICKET
21 from app.models.order import Order, OrderTicket
22
23
24 class OrderSchema(Schema):
25 class Meta:
26 type_ = 'order'
27 self_view = 'v1.order_detail'
28 self_view_kwargs = {'id': '<id>'}
29 inflect = dasherize
30
31 @post_dump
32 def generate_payment_url(self, data):
33 if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \
34 data["status"]:
35 if data['payment_mode'] == 'stripe':
36 data['payment_url'] = 'stripe://payment'
37 elif data['payment_mode'] == 'paypal':
38 order = Order.query.filter_by(id=data['id']).first()
39 data['payment_url'] = PayPalPaymentsManager.get_checkout_url(order)
40 return data
41
42 @validates_schema
43 def initial_values(self, data):
44 if data.get('payment_mode') is None and 'POST' in request.method:
45 data['payment_mode'] = 'free'
46 return data
47
48 id = fields.Str(dump_only=True)
49 identifier = fields.Str(dump_only=True)
50 amount = fields.Float(validate=lambda n: n > 0)
51 address = fields.Str()
52 city = fields.Str()
53 state = fields.Str(db.String)
54 country = fields.Str(required=True)
55 zipcode = fields.Str()
56 completed_at = fields.DateTime(dump_only=True)
57 transaction_id = fields.Str(dump_only=True)
58 payment_mode = fields.Str()
59 paid_via = fields.Str(dump_only=True)
60 brand = fields.Str(dump_only=True)
61 exp_month = fields.Str(dump_only=True)
62 exp_year = fields.Str(dump_only=True)
63 last4 = fields.Str(dump_only=True)
64 status = fields.Str(validate=validate.OneOf(choices=["pending", "cancelled", "confirmed", "deleted"]))
65 discount_code_id = fields.Str()
66 payment_url = fields.Str(dump_only=True)
67
68 attendees = Relationship(attribute='ticket_holders',
69 self_view='v1.order_attendee',
70 self_view_kwargs={'identifier': '<identifier>'},
71 related_view='v1.attendee_list',
72 related_view_kwargs={'order_id': '<id>'},
73 schema='AttendeeSchema',
74 many=True,
75 type_='attendee')
76
77 tickets = Relationship(self_view='v1.order_ticket',
78 self_view_kwargs={'identifier': '<identifier>'},
79 related_view='v1.ticket_list',
80 related_view_kwargs={'order_id': '<id>'},
81 schema='TicketSchema',
82 many=True,
83 type_="ticket")
84
85 user = Relationship(self_view='v1.order_user',
86 self_view_kwargs={'identifier': '<identifier>'},
87 related_view='v1.user_detail',
88 related_view_kwargs={'id': '<user_id>'},
89 schema='UserSchema',
90 type_="user")
91
92 event = Relationship(self_view='v1.order_event',
93 self_view_kwargs={'identifier': '<identifier>'},
94 related_view='v1.event_detail',
95 related_view_kwargs={'id': '<event_id>'},
96 schema='EventSchema',
97 type_="event")
98
99 marketer = Relationship(self_view='v1.order_marketer',
100 self_view_kwargs={'identifier': '<identifier>'},
101 related_view='v1.user_detail',
102 related_view_kwargs={'id': '<marketer_id>'},
103 schema='UserSchema',
104 type_="user")
105
106 discount_code = Relationship(self_view='v1.order_discount',
107 self_view_kwargs={'identifier': '<identifier>'},
108 related_view='v1.discount_code_detail',
109 related_view_kwargs={'id': '<discount_code_id>'},
110 schema='DiscountCodeSchema',
111 type_="discount-code")
112
113
114 class OrdersListPost(ResourceList):
115 def before_post(self, args, kwargs, data=None):
116 require_relationship(['event'], data)
117 if not has_access('is_coorganizer', event_id=data['event']):
118 data['status'] = 'pending'
119
120 def before_create_object(self, data, view_kwargs):
121 # Apply discount only if the user is not event admin
122 if data.get('discount') and not has_access('is_coorganizer', event_id=data['event']):
123 discount_code = safe_query(self, DiscountCode, 'id', data['discount'], 'discount_code_id')
124 if not discount_code.is_active:
125 raise UnprocessableEntity({'source': 'discount_code_id'}, "Inactive Discount Code")
126 else:
127 now = datetime.utcnow()
128 valid_from = datetime.strptime(discount_code.valid_from, '%Y-%m-%d %H:%M:%S')
129 valid_till = datetime.strptime(discount_code.valid_till, '%Y-%m-%d %H:%M:%S')
130 if not (valid_from <= now <= valid_till):
131 raise UnprocessableEntity({'source': 'discount_code_id'}, "Inactive Discount Code")
132 if not TicketingManager.match_discount_quantity(discount_code, data['ticket_holders']):
133 raise UnprocessableEntity({'source': 'discount_code_id'}, 'Discount Usage Exceeded')
134
135 if discount_code.event.id != data['event'] and discount_code.user_for == TICKET:
136 raise UnprocessableEntity({'source': 'discount_code_id'}, "Invalid Discount Code")
137
138 def after_create_object(self, order, data, view_kwargs):
139 order_tickets = {}
140 for holder in order.ticket_holders:
141 if order_tickets.get(holder.ticket_id) is None:
142 order_tickets[holder.ticket_id] = 1
143 else:
144 order_tickets[holder.ticket_id] += 1
145 for ticket in order_tickets:
146 od = OrderTicket(order_id=order.id, ticket_id=ticket, quantity=order_tickets[ticket])
147 save_to_db(od)
148 order.quantity = order.get_tickets_count()
149 save_to_db(order)
150 if not has_access('is_coorganizer', **view_kwargs):
151 TicketingManager.calculate_update_amount(order)
152
153 data['user_id'] = current_user.id
154
155 methods = ['POST', ]
156 decorators = (jwt_required,)
157 schema = OrderSchema
158 data_layer = {'session': db.session,
159 'model': Order,
160 'methods': {'before_create_object': before_create_object,
161 'after_create_object': after_create_object
162 }}
163
164
165 class OrdersList(ResourceList):
166 def before_get(self, args, kwargs):
167 if kwargs.get('event_id') is None:
168 if 'GET' in request.method and has_access('is_admin'):
169 pass
170 else:
171 raise ForbiddenException({'source': ''}, "Admin Access Required")
172 elif not has_access('is_coorganizer', event_id=kwargs['event_id']):
173 raise ForbiddenException({'source': ''}, "Co-Organizer Access Required")
174
175 decorators = (jwt_required,)
176 schema = OrderSchema
177 data_layer = {'session': db.session,
178 'model': Order}
179
180
181 class OrderDetail(ResourceDetail):
182 def before_get_object(self, view_kwargs):
183 if view_kwargs.get('identifier'):
184 order = safe_query(self, Order, 'identifier', view_kwargs['identifier'], 'order_identifier')
185 view_kwargs['id'] = order.id
186
187 def before_update_object(self, order, data, view_kwargs):
188 if data.get('status'):
189 if has_access('is_coorganizer', event_id=order.event.id):
190 pass
191 else:
192 raise ForbiddenException({'pointer': 'data/status'},
193 "To update status minimum Co-organizer access required")
194
195 decorators = (api.has_permission('is_coorganizer', fetch="event_id", fetch_as="event_id", model=Order),)
196
197 schema = OrderSchema
198 data_layer = {'session': db.session,
199 'model': Order,
200 'methods': {'before_update_object': before_update_object}}
201
202
203 class OrderRelationship(ResourceRelationship):
204 decorators = (jwt_required,)
205 schema = OrderSchema
206 data_layer = {'session': db.session,
207 'model': Order}
208
209
210 class ChargeSchema(Schema):
211 class Meta:
212 type_ = 'charge'
213 inflect = dasherize
214 self_view = 'v1.charge_list'
215 self_view_kwargs = {'id': '<id>'}
216
217 id = fields.Str(dump_only=True)
218 stripe = fields.Str(allow_none=True)
219
220
221 class ChargeList(ResourceList):
222 methods = ['POST', ]
223 schema = ChargeSchema
224
225 data_layer = {
226 'class': ChargesLayer,
227 'session': db.session
228 }
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/orders.py b/app/api/orders.py
--- a/app/api/orders.py
+++ b/app/api/orders.py
@@ -113,7 +113,7 @@
class OrdersListPost(ResourceList):
def before_post(self, args, kwargs, data=None):
- require_relationship(['event'], data)
+ require_relationship(['event', 'attendees'], data)
if not has_access('is_coorganizer', event_id=data['event']):
data['status'] = 'pending'
|
{"golden_diff": "diff --git a/app/api/orders.py b/app/api/orders.py\n--- a/app/api/orders.py\n+++ b/app/api/orders.py\n@@ -113,7 +113,7 @@\n \n class OrdersListPost(ResourceList):\n def before_post(self, args, kwargs, data=None):\n- require_relationship(['event'], data)\n+ require_relationship(['event', 'attendees'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n data['status'] = 'pending'\n", "issue": "Set attendees as required relationship to Orders API\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask import request\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom marshmallow import post_dump, validates_schema, validate\nfrom flask_jwt import current_identity as current_user\n\nfrom app.api.bootstrap import api\nfrom app.api.data_layers.ChargesLayer import ChargesLayer\nfrom app.api.helpers.db import save_to_db, safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.payment import PayPalPaymentsManager\nfrom app.api.helpers.ticketing import TicketingManager\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import dasherize, require_relationship\nfrom app.models import db\nfrom app.models.discount_code import DiscountCode, TICKET\nfrom app.models.order import Order, OrderTicket\n\n\nclass OrderSchema(Schema):\n class Meta:\n type_ = 'order'\n self_view = 'v1.order_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @post_dump\n def generate_payment_url(self, data):\n if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \\\n data[\"status\"]:\n if data['payment_mode'] == 'stripe':\n data['payment_url'] = 'stripe://payment'\n elif data['payment_mode'] == 'paypal':\n order = Order.query.filter_by(id=data['id']).first()\n data['payment_url'] = PayPalPaymentsManager.get_checkout_url(order)\n return data\n\n @validates_schema\n def initial_values(self, data):\n if data.get('payment_mode') is None and 'POST' in request.method:\n data['payment_mode'] = 'free'\n return data\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(dump_only=True)\n amount = fields.Float(validate=lambda n: n > 0)\n address = fields.Str()\n city = fields.Str()\n state = fields.Str(db.String)\n country = fields.Str(required=True)\n zipcode = fields.Str()\n completed_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n payment_mode = fields.Str()\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n exp_year = fields.Str(dump_only=True)\n last4 = fields.Str(dump_only=True)\n status = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"cancelled\", \"confirmed\", \"deleted\"]))\n discount_code_id = fields.Str()\n payment_url = fields.Str(dump_only=True)\n\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.order_attendee',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.attendee_list',\n related_view_kwargs={'order_id': '<id>'},\n schema='AttendeeSchema',\n many=True,\n type_='attendee')\n\n tickets = Relationship(self_view='v1.order_ticket',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'order_id': '<id>'},\n schema='TicketSchema',\n many=True,\n type_=\"ticket\")\n\n user = Relationship(self_view='v1.order_user',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<user_id>'},\n schema='UserSchema',\n type_=\"user\")\n\n event = Relationship(self_view='v1.order_event',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.event_detail',\n related_view_kwargs={'id': '<event_id>'},\n schema='EventSchema',\n type_=\"event\")\n\n marketer = Relationship(self_view='v1.order_marketer',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<marketer_id>'},\n schema='UserSchema',\n type_=\"user\")\n\n discount_code = Relationship(self_view='v1.order_discount',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'id': '<discount_code_id>'},\n schema='DiscountCodeSchema',\n type_=\"discount-code\")\n\n\nclass OrdersListPost(ResourceList):\n def before_post(self, args, kwargs, data=None):\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n data['status'] = 'pending'\n\n def before_create_object(self, data, view_kwargs):\n # Apply discount only if the user is not event admin\n if data.get('discount') and not has_access('is_coorganizer', event_id=data['event']):\n discount_code = safe_query(self, DiscountCode, 'id', data['discount'], 'discount_code_id')\n if not discount_code.is_active:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n else:\n now = datetime.utcnow()\n valid_from = datetime.strptime(discount_code.valid_from, '%Y-%m-%d %H:%M:%S')\n valid_till = datetime.strptime(discount_code.valid_till, '%Y-%m-%d %H:%M:%S')\n if not (valid_from <= now <= valid_till):\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n if not TicketingManager.match_discount_quantity(discount_code, data['ticket_holders']):\n raise UnprocessableEntity({'source': 'discount_code_id'}, 'Discount Usage Exceeded')\n\n if discount_code.event.id != data['event'] and discount_code.user_for == TICKET:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Invalid Discount Code\")\n\n def after_create_object(self, order, data, view_kwargs):\n order_tickets = {}\n for holder in order.ticket_holders:\n if order_tickets.get(holder.ticket_id) is None:\n order_tickets[holder.ticket_id] = 1\n else:\n order_tickets[holder.ticket_id] += 1\n for ticket in order_tickets:\n od = OrderTicket(order_id=order.id, ticket_id=ticket, quantity=order_tickets[ticket])\n save_to_db(od)\n order.quantity = order.get_tickets_count()\n save_to_db(order)\n if not has_access('is_coorganizer', **view_kwargs):\n TicketingManager.calculate_update_amount(order)\n\n data['user_id'] = current_user.id\n\n methods = ['POST', ]\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass OrdersList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('event_id') is None:\n if 'GET' in request.method and has_access('is_admin'):\n pass\n else:\n raise ForbiddenException({'source': ''}, \"Admin Access Required\")\n elif not has_access('is_coorganizer', event_id=kwargs['event_id']):\n raise ForbiddenException({'source': ''}, \"Co-Organizer Access Required\")\n\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order}\n\n\nclass OrderDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['identifier'], 'order_identifier')\n view_kwargs['id'] = order.id\n\n def before_update_object(self, order, data, view_kwargs):\n if data.get('status'):\n if has_access('is_coorganizer', event_id=order.event.id):\n pass\n else:\n raise ForbiddenException({'pointer': 'data/status'},\n \"To update status minimum Co-organizer access required\")\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\", fetch_as=\"event_id\", model=Order),)\n\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {'before_update_object': before_update_object}}\n\n\nclass OrderRelationship(ResourceRelationship):\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order}\n\n\nclass ChargeSchema(Schema):\n class Meta:\n type_ = 'charge'\n inflect = dasherize\n self_view = 'v1.charge_list'\n self_view_kwargs = {'id': '<id>'}\n\n id = fields.Str(dump_only=True)\n stripe = fields.Str(allow_none=True)\n\n\nclass ChargeList(ResourceList):\n methods = ['POST', ]\n schema = ChargeSchema\n\n data_layer = {\n 'class': ChargesLayer,\n 'session': db.session\n }\n", "path": "app/api/orders.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom flask import request\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom marshmallow import post_dump, validates_schema, validate\nfrom flask_jwt import current_identity as current_user\n\nfrom app.api.bootstrap import api\nfrom app.api.data_layers.ChargesLayer import ChargesLayer\nfrom app.api.helpers.db import save_to_db, safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.payment import PayPalPaymentsManager\nfrom app.api.helpers.ticketing import TicketingManager\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import dasherize, require_relationship\nfrom app.models import db\nfrom app.models.discount_code import DiscountCode, TICKET\nfrom app.models.order import Order, OrderTicket\n\n\nclass OrderSchema(Schema):\n class Meta:\n type_ = 'order'\n self_view = 'v1.order_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @post_dump\n def generate_payment_url(self, data):\n if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \\\n data[\"status\"]:\n if data['payment_mode'] == 'stripe':\n data['payment_url'] = 'stripe://payment'\n elif data['payment_mode'] == 'paypal':\n order = Order.query.filter_by(id=data['id']).first()\n data['payment_url'] = PayPalPaymentsManager.get_checkout_url(order)\n return data\n\n @validates_schema\n def initial_values(self, data):\n if data.get('payment_mode') is None and 'POST' in request.method:\n data['payment_mode'] = 'free'\n return data\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(dump_only=True)\n amount = fields.Float(validate=lambda n: n > 0)\n address = fields.Str()\n city = fields.Str()\n state = fields.Str(db.String)\n country = fields.Str(required=True)\n zipcode = fields.Str()\n completed_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n payment_mode = fields.Str()\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n exp_year = fields.Str(dump_only=True)\n last4 = fields.Str(dump_only=True)\n status = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"cancelled\", \"confirmed\", \"deleted\"]))\n discount_code_id = fields.Str()\n payment_url = fields.Str(dump_only=True)\n\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.order_attendee',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.attendee_list',\n related_view_kwargs={'order_id': '<id>'},\n schema='AttendeeSchema',\n many=True,\n type_='attendee')\n\n tickets = Relationship(self_view='v1.order_ticket',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'order_id': '<id>'},\n schema='TicketSchema',\n many=True,\n type_=\"ticket\")\n\n user = Relationship(self_view='v1.order_user',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<user_id>'},\n schema='UserSchema',\n type_=\"user\")\n\n event = Relationship(self_view='v1.order_event',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.event_detail',\n related_view_kwargs={'id': '<event_id>'},\n schema='EventSchema',\n type_=\"event\")\n\n marketer = Relationship(self_view='v1.order_marketer',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<marketer_id>'},\n schema='UserSchema',\n type_=\"user\")\n\n discount_code = Relationship(self_view='v1.order_discount',\n self_view_kwargs={'identifier': '<identifier>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'id': '<discount_code_id>'},\n schema='DiscountCodeSchema',\n type_=\"discount-code\")\n\n\nclass OrdersListPost(ResourceList):\n def before_post(self, args, kwargs, data=None):\n require_relationship(['event', 'attendees'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n data['status'] = 'pending'\n\n def before_create_object(self, data, view_kwargs):\n # Apply discount only if the user is not event admin\n if data.get('discount') and not has_access('is_coorganizer', event_id=data['event']):\n discount_code = safe_query(self, DiscountCode, 'id', data['discount'], 'discount_code_id')\n if not discount_code.is_active:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n else:\n now = datetime.utcnow()\n valid_from = datetime.strptime(discount_code.valid_from, '%Y-%m-%d %H:%M:%S')\n valid_till = datetime.strptime(discount_code.valid_till, '%Y-%m-%d %H:%M:%S')\n if not (valid_from <= now <= valid_till):\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n if not TicketingManager.match_discount_quantity(discount_code, data['ticket_holders']):\n raise UnprocessableEntity({'source': 'discount_code_id'}, 'Discount Usage Exceeded')\n\n if discount_code.event.id != data['event'] and discount_code.user_for == TICKET:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Invalid Discount Code\")\n\n def after_create_object(self, order, data, view_kwargs):\n order_tickets = {}\n for holder in order.ticket_holders:\n if order_tickets.get(holder.ticket_id) is None:\n order_tickets[holder.ticket_id] = 1\n else:\n order_tickets[holder.ticket_id] += 1\n for ticket in order_tickets:\n od = OrderTicket(order_id=order.id, ticket_id=ticket, quantity=order_tickets[ticket])\n save_to_db(od)\n order.quantity = order.get_tickets_count()\n save_to_db(order)\n if not has_access('is_coorganizer', **view_kwargs):\n TicketingManager.calculate_update_amount(order)\n\n data['user_id'] = current_user.id\n\n methods = ['POST', ]\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass OrdersList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('event_id') is None:\n if 'GET' in request.method and has_access('is_admin'):\n pass\n else:\n raise ForbiddenException({'source': ''}, \"Admin Access Required\")\n elif not has_access('is_coorganizer', event_id=kwargs['event_id']):\n raise ForbiddenException({'source': ''}, \"Co-Organizer Access Required\")\n\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order}\n\n\nclass OrderDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['identifier'], 'order_identifier')\n view_kwargs['id'] = order.id\n\n def before_update_object(self, order, data, view_kwargs):\n if data.get('status'):\n if has_access('is_coorganizer', event_id=order.event.id):\n pass\n else:\n raise ForbiddenException({'pointer': 'data/status'},\n \"To update status minimum Co-organizer access required\")\n\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\", fetch_as=\"event_id\", model=Order),)\n\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {'before_update_object': before_update_object}}\n\n\nclass OrderRelationship(ResourceRelationship):\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order}\n\n\nclass ChargeSchema(Schema):\n class Meta:\n type_ = 'charge'\n inflect = dasherize\n self_view = 'v1.charge_list'\n self_view_kwargs = {'id': '<id>'}\n\n id = fields.Str(dump_only=True)\n stripe = fields.Str(allow_none=True)\n\n\nclass ChargeList(ResourceList):\n methods = ['POST', ]\n schema = ChargeSchema\n\n data_layer = {\n 'class': ChargesLayer,\n 'session': db.session\n }\n", "path": "app/api/orders.py"}]}
| 2,830 | 111 |
gh_patches_debug_36226
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-2205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SDK feature request: support soft constraint for scheduling a component to a preemptible node pool
From a pipeline, it would be useful to have an option to schedule onto a pre-emptible node if such a node pool is available, otherwise fall back to using non-preemptible.
(Under the hood this can apparently be done with node affinity and `preferredDuringSchedulingIgnoredDuringExecution`).
Ning's suggestion: expose config in the `gcp.use_preemptible_nodepool()` call to specify whether the constraint should be hard or soft.
/assign @gaoning777 (as per discussion)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/gcp.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from kubernetes.client import V1Toleration
16
17 def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
18 """An operator that configures the container to use GCP service account.
19
20 The user-gcp-sa secret is created as part of the kubeflow deployment that
21 stores the access token for kubeflow user service account.
22
23 With this service account, the container has a range of GCP APIs to
24 access to. This service account is automatically created as part of the
25 kubeflow deployment.
26
27 For the list of the GCP APIs this service account can access to, check
28 https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18
29
30 If you want to call the GCP APIs in a different project, grant the kf-user
31 service account access permission.
32 """
33
34 # permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
35 if secret_file_path_in_volume is None:
36 secret_file_path_in_volume = '/' + secret_name + '.json'
37
38 if volume_name is None:
39 volume_name = 'gcp-credentials-' + secret_name
40
41 else:
42 import warnings
43 warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)
44
45 def _use_gcp_secret(task):
46 from kubernetes import client as k8s_client
47 return (
48 task
49 .add_volume(
50 k8s_client.V1Volume(
51 name=volume_name,
52 secret=k8s_client.V1SecretVolumeSource(
53 secret_name=secret_name,
54 )
55 )
56 )
57 .add_volume_mount(
58 k8s_client.V1VolumeMount(
59 name=volume_name,
60 mount_path=secret_volume_mount_path,
61 )
62 )
63 .add_env_variable(
64 k8s_client.V1EnvVar(
65 name='GOOGLE_APPLICATION_CREDENTIALS',
66 value=secret_volume_mount_path + secret_file_path_in_volume,
67 )
68 )
69 .add_env_variable(
70 k8s_client.V1EnvVar(
71 name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
72 value=secret_volume_mount_path + secret_file_path_in_volume,
73 )
74 ) # Set GCloud Credentials by using the env var override.
75 # TODO: Is there a better way for GCloud to pick up the credential?
76 )
77
78 return _use_gcp_secret
79
80 def use_tpu(tpu_cores: int, tpu_resource: str, tf_version: str):
81 """An operator that configures GCP TPU spec in a container op.
82
83 Args:
84 tpu_cores: Required. The number of cores of TPU resource.
85 For example, the value can be '8', '32', '128', etc.
86 Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.
87 tpu_resource: Required. The resource name of the TPU resource.
88 For example, the value can be 'v2', 'preemptible-v1', 'v3' or 'preemptible-v3'.
89 Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.
90 tf_version: Required. The TensorFlow version that the TPU nodes use.
91 For example, the value can be '1.12', '1.11', '1.9' or '1.8'.
92 Check more details at: https://cloud.google.com/tpu/docs/supported-versions.
93 """
94
95 def _set_tpu_spec(task):
96 task.add_pod_annotation('tf-version.cloud-tpus.google.com', tf_version)
97 task.add_resource_limit('cloud-tpus.google.com/{}'.format(tpu_resource), str(tpu_cores))
98 return task
99
100 return _set_tpu_spec
101
102 def use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoSchedule',
103 key='preemptible',
104 operator='Equal',
105 value='true')):
106 """An operator that configures the GKE preemptible in a container op.
107 """
108
109 def _set_preemptible(task):
110 task.add_toleration(toleration)
111 task.add_node_selector_constraint("cloud.google.com/gke-preemptible", "true")
112 return task
113
114 return _set_preemptible
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/kfp/gcp.py b/sdk/python/kfp/gcp.py
--- a/sdk/python/kfp/gcp.py
+++ b/sdk/python/kfp/gcp.py
@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from kubernetes.client import V1Toleration
+from kubernetes.client import V1Toleration, V1Affinity, V1NodeAffinity, \
+ V1NodeSelector, V1NodeSelectorTerm, V1NodeSelectorRequirement, V1PreferredSchedulingTerm
def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account.
@@ -102,13 +103,31 @@
def use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoSchedule',
key='preemptible',
operator='Equal',
- value='true')):
+ value='true'),
+ hard_constraint: bool = False):
"""An operator that configures the GKE preemptible in a container op.
+ Args:
+ toleration (V1Toleration): toleration to pods, default is the preemptible label.
+ hard_constraint (bool): the constraint of scheduling the pods on preemptible
+ nodepools is hard. (Default: False)
"""
def _set_preemptible(task):
task.add_toleration(toleration)
- task.add_node_selector_constraint("cloud.google.com/gke-preemptible", "true")
+ node_selector_term = V1NodeSelectorTerm(match_expressions=[
+ V1NodeSelectorRequirement(key='cloud.google.com/gke-preemptible',
+ operator='In',
+ values=['true'])]
+ )
+ if hard_constraint:
+ node_affinity = V1NodeAffinity(required_during_scheduling_ignored_during_execution=
+ V1NodeSelector(node_selector_terms=[node_selector_term]))
+ else:
+ node_affinity = V1NodeAffinity(preferred_during_scheduling_ignored_during_execution=
+ V1PreferredSchedulingTerm(preference=node_selector_term,
+ weight=50))
+ affinity = V1Affinity(node_affinity=node_affinity)
+ task.add_affinity(affinity=affinity)
return task
return _set_preemptible
|
{"golden_diff": "diff --git a/sdk/python/kfp/gcp.py b/sdk/python/kfp/gcp.py\n--- a/sdk/python/kfp/gcp.py\n+++ b/sdk/python/kfp/gcp.py\n@@ -12,7 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from kubernetes.client import V1Toleration\n+from kubernetes.client import V1Toleration, V1Affinity, V1NodeAffinity, \\\n+ V1NodeSelector, V1NodeSelectorTerm, V1NodeSelectorRequirement, V1PreferredSchedulingTerm\n \n def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):\n \"\"\"An operator that configures the container to use GCP service account.\n@@ -102,13 +103,31 @@\n def use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoSchedule',\n key='preemptible',\n operator='Equal',\n- value='true')):\n+ value='true'),\n+ hard_constraint: bool = False):\n \"\"\"An operator that configures the GKE preemptible in a container op.\n+ Args:\n+ toleration (V1Toleration): toleration to pods, default is the preemptible label.\n+ hard_constraint (bool): the constraint of scheduling the pods on preemptible\n+ nodepools is hard. (Default: False)\n \"\"\"\n \n def _set_preemptible(task):\n task.add_toleration(toleration)\n- task.add_node_selector_constraint(\"cloud.google.com/gke-preemptible\", \"true\")\n+ node_selector_term = V1NodeSelectorTerm(match_expressions=[\n+ V1NodeSelectorRequirement(key='cloud.google.com/gke-preemptible',\n+ operator='In',\n+ values=['true'])]\n+ )\n+ if hard_constraint:\n+ node_affinity = V1NodeAffinity(required_during_scheduling_ignored_during_execution=\n+ V1NodeSelector(node_selector_terms=[node_selector_term]))\n+ else:\n+ node_affinity = V1NodeAffinity(preferred_during_scheduling_ignored_during_execution=\n+ V1PreferredSchedulingTerm(preference=node_selector_term,\n+ weight=50))\n+ affinity = V1Affinity(node_affinity=node_affinity)\n+ task.add_affinity(affinity=affinity)\n return task\n \n return _set_preemptible\n", "issue": "SDK feature request: support soft constraint for scheduling a component to a preemptible node pool\nFrom a pipeline, it would be useful to have an option to schedule onto a pre-emptible node if such a node pool is available, otherwise fall back to using non-preemptible.\r\n(Under the hood this can apparently be done with node affinity and `preferredDuringSchedulingIgnoredDuringExecution`).\r\n\r\nNing's suggestion: expose config in the `gcp.use_preemptible_nodepool()` call to specify whether the constraint should be hard or soft.\r\n\r\n/assign @gaoning777 (as per discussion)\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kubernetes.client import V1Toleration\n\ndef use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):\n \"\"\"An operator that configures the container to use GCP service account.\n\n The user-gcp-sa secret is created as part of the kubeflow deployment that\n stores the access token for kubeflow user service account.\n\n With this service account, the container has a range of GCP APIs to\n access to. This service account is automatically created as part of the\n kubeflow deployment.\n\n For the list of the GCP APIs this service account can access to, check\n https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18\n\n If you want to call the GCP APIs in a different project, grant the kf-user\n service account access permission.\n \"\"\"\n\n # permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']\n if secret_file_path_in_volume is None:\n secret_file_path_in_volume = '/' + secret_name + '.json'\n\n if volume_name is None:\n volume_name = 'gcp-credentials-' + secret_name\n\n else:\n import warnings\n warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)\n \n def _use_gcp_secret(task):\n from kubernetes import client as k8s_client\n return (\n task\n .add_volume(\n k8s_client.V1Volume(\n name=volume_name,\n secret=k8s_client.V1SecretVolumeSource(\n secret_name=secret_name,\n )\n )\n )\n .add_volume_mount(\n k8s_client.V1VolumeMount(\n name=volume_name,\n mount_path=secret_volume_mount_path,\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='GOOGLE_APPLICATION_CREDENTIALS',\n value=secret_volume_mount_path + secret_file_path_in_volume,\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',\n value=secret_volume_mount_path + secret_file_path_in_volume,\n )\n ) # Set GCloud Credentials by using the env var override.\n # TODO: Is there a better way for GCloud to pick up the credential?\n )\n \n return _use_gcp_secret\n\ndef use_tpu(tpu_cores: int, tpu_resource: str, tf_version: str):\n \"\"\"An operator that configures GCP TPU spec in a container op.\n\n Args:\n tpu_cores: Required. The number of cores of TPU resource. \n For example, the value can be '8', '32', '128', etc.\n Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.\n tpu_resource: Required. The resource name of the TPU resource. \n For example, the value can be 'v2', 'preemptible-v1', 'v3' or 'preemptible-v3'.\n Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.\n tf_version: Required. The TensorFlow version that the TPU nodes use.\n For example, the value can be '1.12', '1.11', '1.9' or '1.8'.\n Check more details at: https://cloud.google.com/tpu/docs/supported-versions.\n \"\"\"\n\n def _set_tpu_spec(task):\n task.add_pod_annotation('tf-version.cloud-tpus.google.com', tf_version)\n task.add_resource_limit('cloud-tpus.google.com/{}'.format(tpu_resource), str(tpu_cores))\n return task\n\n return _set_tpu_spec\n\ndef use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoSchedule',\n key='preemptible',\n operator='Equal',\n value='true')):\n \"\"\"An operator that configures the GKE preemptible in a container op.\n \"\"\"\n\n def _set_preemptible(task):\n task.add_toleration(toleration)\n task.add_node_selector_constraint(\"cloud.google.com/gke-preemptible\", \"true\")\n return task\n\n return _set_preemptible\n", "path": "sdk/python/kfp/gcp.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kubernetes.client import V1Toleration, V1Affinity, V1NodeAffinity, \\\n V1NodeSelector, V1NodeSelectorTerm, V1NodeSelectorRequirement, V1PreferredSchedulingTerm\n\ndef use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):\n \"\"\"An operator that configures the container to use GCP service account.\n\n The user-gcp-sa secret is created as part of the kubeflow deployment that\n stores the access token for kubeflow user service account.\n\n With this service account, the container has a range of GCP APIs to\n access to. This service account is automatically created as part of the\n kubeflow deployment.\n\n For the list of the GCP APIs this service account can access to, check\n https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18\n\n If you want to call the GCP APIs in a different project, grant the kf-user\n service account access permission.\n \"\"\"\n\n # permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']\n if secret_file_path_in_volume is None:\n secret_file_path_in_volume = '/' + secret_name + '.json'\n\n if volume_name is None:\n volume_name = 'gcp-credentials-' + secret_name\n\n else:\n import warnings\n warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)\n \n def _use_gcp_secret(task):\n from kubernetes import client as k8s_client\n return (\n task\n .add_volume(\n k8s_client.V1Volume(\n name=volume_name,\n secret=k8s_client.V1SecretVolumeSource(\n secret_name=secret_name,\n )\n )\n )\n .add_volume_mount(\n k8s_client.V1VolumeMount(\n name=volume_name,\n mount_path=secret_volume_mount_path,\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='GOOGLE_APPLICATION_CREDENTIALS',\n value=secret_volume_mount_path + secret_file_path_in_volume,\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',\n value=secret_volume_mount_path + secret_file_path_in_volume,\n )\n ) # Set GCloud Credentials by using the env var override.\n # TODO: Is there a better way for GCloud to pick up the credential?\n )\n \n return _use_gcp_secret\n\ndef use_tpu(tpu_cores: int, tpu_resource: str, tf_version: str):\n \"\"\"An operator that configures GCP TPU spec in a container op.\n\n Args:\n tpu_cores: Required. The number of cores of TPU resource. \n For example, the value can be '8', '32', '128', etc.\n Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.\n tpu_resource: Required. The resource name of the TPU resource. \n For example, the value can be 'v2', 'preemptible-v1', 'v3' or 'preemptible-v3'.\n Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec.\n tf_version: Required. The TensorFlow version that the TPU nodes use.\n For example, the value can be '1.12', '1.11', '1.9' or '1.8'.\n Check more details at: https://cloud.google.com/tpu/docs/supported-versions.\n \"\"\"\n\n def _set_tpu_spec(task):\n task.add_pod_annotation('tf-version.cloud-tpus.google.com', tf_version)\n task.add_resource_limit('cloud-tpus.google.com/{}'.format(tpu_resource), str(tpu_cores))\n return task\n\n return _set_tpu_spec\n\ndef use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoSchedule',\n key='preemptible',\n operator='Equal',\n value='true'),\n hard_constraint: bool = False):\n \"\"\"An operator that configures the GKE preemptible in a container op.\n Args:\n toleration (V1Toleration): toleration to pods, default is the preemptible label.\n hard_constraint (bool): the constraint of scheduling the pods on preemptible\n nodepools is hard. (Default: False)\n \"\"\"\n\n def _set_preemptible(task):\n task.add_toleration(toleration)\n node_selector_term = V1NodeSelectorTerm(match_expressions=[\n V1NodeSelectorRequirement(key='cloud.google.com/gke-preemptible',\n operator='In',\n values=['true'])]\n )\n if hard_constraint:\n node_affinity = V1NodeAffinity(required_during_scheduling_ignored_during_execution=\n V1NodeSelector(node_selector_terms=[node_selector_term]))\n else:\n node_affinity = V1NodeAffinity(preferred_during_scheduling_ignored_during_execution=\n V1PreferredSchedulingTerm(preference=node_selector_term,\n weight=50))\n affinity = V1Affinity(node_affinity=node_affinity)\n task.add_affinity(affinity=affinity)\n return task\n\n return _set_preemptible\n", "path": "sdk/python/kfp/gcp.py"}]}
| 1,778 | 552 |
gh_patches_debug_23404
|
rasdani/github-patches
|
git_diff
|
openshift__openshift-ansible-11070
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check for file paths outside of /etc/origin/master in master's config fails on auditConfig.policyConfiguratio
#### Description
The openshift_control_plane "Check for file paths outside of /etc/origin/master in master's config" fails on auditConfig policyConfiguration that includes nonResourceURLs specifications by interpreting these as file paths.
##### Version
```
ansible 2.7.5
config file = /home/ansible/openshift-provision-demo/ansible.cfg
configured module search path = [u'/home/ansible/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.5 (default, Oct 30 2018, 23:45:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
$ git describe
openshift-ansible-3.11.69-1-4-g0964a5f
```
##### Steps To Reproduce
1. Start with a master with /etc/origin/master/master-config.yaml containing:
```
auditConfig:
auditFilePath: /var/log/origin/audit.log
enabled: true
logFormat: json
policyConfiguration:
apiVersion: audit.k8s.io/v1beta1
omitStages:
- RequestReceived
rules:
- level: Metadata
nonResourceURLs:
- /login*
- /oauth*
```
2. Run openshift-ansible deploy
##### Expected Results
This check should not choke on the auditConfig policyConfiguration.
##### Observed Results
```
TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] ************************************************************************************************************
fatal: [demo-sbx-okd-v3-11-master-0.c.openshift-provision.internal]: FAILED! => {"msg": "A string value that appears to be a file path located outside of\n/dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin has been found in /etc/origin/master/master-config.yaml.\nIn 3.10 and newer, all files needed by the master must reside inside of\nthose directories or a subdirectory or it will not be readable by the\nmaster process. Please migrate all files needed by the master into\none of /dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin or a subdirectory and update your master configs before\nproceeding. The string found was: /login*\n***********************\nNOTE: the following items do not need to be migrated, they will be migrated\nfor you: oauthConfig.identityProviders"}
NO MORE HOSTS LEFT ************************************************************************************************************************************************************************************************
to retry, use: --limit @/home/ansible/openshift-provision-demo/provision-cluster/bootstrap.retry
PLAY RECAP ********************************************************************************************************************************************************************************************************
demo-sbx-okd-v3-11-image.c.openshift-provision.internal : ok=25 changed=0 unreachable=0 failed=0
demo-sbx-okd-v3-11-master-0.c.openshift-provision.internal : ok=28 changed=0 unreachable=0 failed=1
localhost : ok=59 changed=0 unreachable=0 failed=0
INSTALLER STATUS **************************************************************************************************************************************************************************************************
Initialization : In Progress (0:00:07)
Failure summary:
1. Hosts: demo-sbx-okd-v3-11-master-0.c.openshift-provision.internal
Play: Retrieve existing master configs and validate
Task: Check for file paths outside of /etc/origin/master in master's config
Message: A string value that appears to be a file path located outside of
/dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin has been found in /etc/origin/master/master-config.yaml.
In 3.10 and newer, all files needed by the master must reside inside of
those directories or a subdirectory or it will not be readable by the
master process. Please migrate all files needed by the master into
one of /dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin or a subdirectory and update your master configs before
proceeding. The string found was: /login*
***********************
NOTE: the following items do not need to be migrated, they will be migrated
for you: oauthConfig.identityProviders
```
##### Additional Information
This has behavior has been observed with OKD 3.11 and OCP 3.11 deployments.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/lib_utils/action_plugins/master_check_paths_in_config.py`
Content:
```
1 """
2 Ansible action plugin to ensure inventory variables are set
3 appropriately and no conflicting options have been provided.
4 """
5 import collections
6 import six
7
8 from ansible.plugins.action import ActionBase
9 from ansible import errors
10
11
12 FAIL_MSG = """A string value that appears to be a file path located outside of
13 {} has been found in /etc/origin/master/master-config.yaml.
14 In 3.10 and newer, all files needed by the master must reside inside of
15 those directories or a subdirectory or it will not be readable by the
16 master process. Please migrate all files needed by the master into
17 one of {} or a subdirectory and update your master configs before
18 proceeding. The string found was: {}
19 ***********************
20 NOTE: the following items do not need to be migrated, they will be migrated
21 for you: {}"""
22
23
24 ITEMS_TO_POP = (
25 ('oauthConfig', 'identityProviders'),
26 )
27 # Create csv string of dot-separated dictionary keys:
28 # eg: 'oathConfig.identityProviders, something.else.here'
29 MIGRATED_ITEMS = ", ".join([".".join(x) for x in ITEMS_TO_POP])
30
31 ALLOWED_DIRS = (
32 '/etc/origin/master/',
33 '/var/lib/origin',
34 '/etc/origin/cloudprovider',
35 '/etc/origin/kubelet-plugins',
36 '/usr/libexec/kubernetes/kubelet-plugins',
37 '/var/log/origin',
38 )
39
40 ALLOWED_DIRS_STRING = ', '.join(ALLOWED_DIRS)
41
42
43 def pop_migrated_fields(mastercfg):
44 """Some fields do not need to be searched because they will be migrated
45 for users automatically"""
46 # Walk down the tree and pop the specific item we migrate / don't care about
47 for item in ITEMS_TO_POP:
48 field = mastercfg
49 for sub_field in item:
50 parent_field = field
51 field = field[sub_field]
52 parent_field.pop(item[len(item) - 1])
53
54
55 def do_item_check(val, strings_to_check):
56 """Check type of val, append to strings_to_check if string, otherwise if
57 it's a dictionary-like object call walk_mapping, if it's a list-like
58 object call walk_sequence, else ignore."""
59 if isinstance(val, six.string_types):
60 strings_to_check.append(val)
61 elif isinstance(val, collections.Sequence):
62 # A list-like object
63 walk_sequence(val, strings_to_check)
64 elif isinstance(val, collections.Mapping):
65 # A dictionary-like object
66 walk_mapping(val, strings_to_check)
67 # If it's not a string, list, or dictionary, we're not interested.
68
69
70 def walk_sequence(items, strings_to_check):
71 """Walk recursively through a list, items"""
72 for item in items:
73 do_item_check(item, strings_to_check)
74
75
76 def walk_mapping(map_to_walk, strings_to_check):
77 """Walk recursively through map_to_walk dictionary and add strings to
78 strings_to_check"""
79 for _, val in map_to_walk.items():
80 do_item_check(val, strings_to_check)
81
82
83 def check_strings(strings_to_check):
84 """Check the strings we found to see if they look like file paths and if
85 they are, fail if not start with /etc/origin/master"""
86 for item in strings_to_check:
87 if item.startswith('/') or item.startswith('../'):
88 matches = 0
89 for allowed in ALLOWED_DIRS:
90 if item.startswith(allowed):
91 matches += 1
92 if matches == 0:
93 raise errors.AnsibleModuleError(
94 FAIL_MSG.format(ALLOWED_DIRS_STRING,
95 ALLOWED_DIRS_STRING,
96 item, MIGRATED_ITEMS))
97
98
99 # pylint: disable=R0903
100 class ActionModule(ActionBase):
101 """Action plugin to validate no files are needed by master that reside
102 outside of /etc/origin/master as masters will now run as pods and cannot
103 utilize files outside of that path as they will not be mounted inside the
104 containers."""
105 def run(self, tmp=None, task_vars=None):
106 """Run this action module"""
107 result = super(ActionModule, self).run(tmp, task_vars)
108
109 # self.task_vars holds all in-scope variables.
110 # Ignore settting self.task_vars outside of init.
111 # pylint: disable=W0201
112 self.task_vars = task_vars or {}
113
114 # mastercfg should be a dictionary from scraping an existing master's
115 # config yaml file.
116 mastercfg = self._task.args.get('mastercfg')
117
118 # We migrate some paths for users automatically, so we pop those.
119 pop_migrated_fields(mastercfg)
120
121 # Create an empty list to append strings from our config file to to check
122 # later.
123 strings_to_check = []
124
125 walk_mapping(mastercfg, strings_to_check)
126
127 check_strings(strings_to_check)
128
129 result["changed"] = False
130 result["failed"] = False
131 result["msg"] = "Aight, configs looking good"
132 return result
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/roles/lib_utils/action_plugins/master_check_paths_in_config.py b/roles/lib_utils/action_plugins/master_check_paths_in_config.py
--- a/roles/lib_utils/action_plugins/master_check_paths_in_config.py
+++ b/roles/lib_utils/action_plugins/master_check_paths_in_config.py
@@ -22,6 +22,7 @@
ITEMS_TO_POP = (
+ ('auditConfig', 'policyConfiguration'),
('oauthConfig', 'identityProviders'),
)
# Create csv string of dot-separated dictionary keys:
@@ -44,12 +45,19 @@
"""Some fields do not need to be searched because they will be migrated
for users automatically"""
# Walk down the tree and pop the specific item we migrate / don't care about
- for item in ITEMS_TO_POP:
- field = mastercfg
- for sub_field in item:
- parent_field = field
- field = field[sub_field]
- parent_field.pop(item[len(item) - 1])
+ for field_path in ITEMS_TO_POP:
+ pop_migrated_field(mastercfg, field_path)
+
+
+def pop_migrated_field(mastercfg, field_path):
+ """Remove field at given path from config"""
+ field = mastercfg
+ for sub_field in field_path:
+ parent_field = field
+ if sub_field not in field:
+ return
+ field = field[sub_field]
+ parent_field.pop(field_path[-1])
def do_item_check(val, strings_to_check):
|
{"golden_diff": "diff --git a/roles/lib_utils/action_plugins/master_check_paths_in_config.py b/roles/lib_utils/action_plugins/master_check_paths_in_config.py\n--- a/roles/lib_utils/action_plugins/master_check_paths_in_config.py\n+++ b/roles/lib_utils/action_plugins/master_check_paths_in_config.py\n@@ -22,6 +22,7 @@\n \n \n ITEMS_TO_POP = (\n+ ('auditConfig', 'policyConfiguration'),\n ('oauthConfig', 'identityProviders'),\n )\n # Create csv string of dot-separated dictionary keys:\n@@ -44,12 +45,19 @@\n \"\"\"Some fields do not need to be searched because they will be migrated\n for users automatically\"\"\"\n # Walk down the tree and pop the specific item we migrate / don't care about\n- for item in ITEMS_TO_POP:\n- field = mastercfg\n- for sub_field in item:\n- parent_field = field\n- field = field[sub_field]\n- parent_field.pop(item[len(item) - 1])\n+ for field_path in ITEMS_TO_POP:\n+ pop_migrated_field(mastercfg, field_path)\n+\n+\n+def pop_migrated_field(mastercfg, field_path):\n+ \"\"\"Remove field at given path from config\"\"\"\n+ field = mastercfg\n+ for sub_field in field_path:\n+ parent_field = field\n+ if sub_field not in field:\n+ return\n+ field = field[sub_field]\n+ parent_field.pop(field_path[-1])\n \n \n def do_item_check(val, strings_to_check):\n", "issue": "Check for file paths outside of /etc/origin/master in master's config fails on auditConfig.policyConfiguratio\n#### Description\r\n\r\nThe openshift_control_plane \"Check for file paths outside of /etc/origin/master in master's config\" fails on auditConfig policyConfiguration that includes nonResourceURLs specifications by interpreting these as file paths.\r\n\r\n##### Version\r\n\r\n\r\n\r\n```\r\nansible 2.7.5\r\n config file = /home/ansible/openshift-provision-demo/ansible.cfg\r\n configured module search path = [u'/home/ansible/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Oct 30 2018, 23:45:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]\r\n$ git describe \r\nopenshift-ansible-3.11.69-1-4-g0964a5f\r\n```\r\n\r\n##### Steps To Reproduce\r\n\r\n1. Start with a master with /etc/origin/master/master-config.yaml containing:\r\n\r\n```\r\nauditConfig:\r\n auditFilePath: /var/log/origin/audit.log\r\n enabled: true\r\n logFormat: json\r\n policyConfiguration:\r\n apiVersion: audit.k8s.io/v1beta1\r\n omitStages:\r\n - RequestReceived\r\n rules:\r\n - level: Metadata\r\n nonResourceURLs:\r\n - /login*\r\n - /oauth*\r\n```\r\n\r\n2. Run openshift-ansible deploy\r\n\r\n##### Expected Results\r\n\r\nThis check should not choke on the auditConfig policyConfiguration.\r\n\r\n##### Observed Results\r\n\r\n```\r\nTASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] ************************************************************************************************************\r\nfatal: [demo-sbx-okd-v3-11-master-0.c.openshift-provision.internal]: FAILED! => {\"msg\": \"A string value that appears to be a file path located outside of\\n/dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin has been found in /etc/origin/master/master-config.yaml.\\nIn 3.10 and newer, all files needed by the master must reside inside of\\nthose directories or a subdirectory or it will not be readable by the\\nmaster process. Please migrate all files needed by the master into\\none of /dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin or a subdirectory and update your master configs before\\nproceeding. The string found was: /login*\\n***********************\\nNOTE: the following items do not need to be migrated, they will be migrated\\nfor you: oauthConfig.identityProviders\"}\r\n\r\nNO MORE HOSTS LEFT ************************************************************************************************************************************************************************************************\r\n\tto retry, use: --limit @/home/ansible/openshift-provision-demo/provision-cluster/bootstrap.retry\r\n\r\nPLAY RECAP ********************************************************************************************************************************************************************************************************\r\ndemo-sbx-okd-v3-11-image.c.openshift-provision.internal : ok=25 changed=0 unreachable=0 failed=0 \r\ndemo-sbx-okd-v3-11-master-0.c.openshift-provision.internal : ok=28 changed=0 unreachable=0 failed=1 \r\nlocalhost : ok=59 changed=0 unreachable=0 failed=0 \r\n\r\n\r\nINSTALLER STATUS **************************************************************************************************************************************************************************************************\r\nInitialization : In Progress (0:00:07)\r\n\r\n\r\nFailure summary:\r\n\r\n\r\n 1. Hosts: demo-sbx-okd-v3-11-master-0.c.openshift-provision.internal\r\n Play: Retrieve existing master configs and validate\r\n Task: Check for file paths outside of /etc/origin/master in master's config\r\n Message: A string value that appears to be a file path located outside of\r\n /dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin has been found in /etc/origin/master/master-config.yaml.\r\n In 3.10 and newer, all files needed by the master must reside inside of\r\n those directories or a subdirectory or it will not be readable by the\r\n master process. Please migrate all files needed by the master into\r\n one of /dev/null, /etc/origin/master/, /var/lib/origin, /etc/origin/cloudprovider, /etc/origin/kubelet-plugins, /usr/libexec/kubernetes/kubelet-plugins, /var/log/origin or a subdirectory and update your master configs before\r\n proceeding. The string found was: /login*\r\n ***********************\r\n NOTE: the following items do not need to be migrated, they will be migrated\r\n for you: oauthConfig.identityProviders\r\n```\r\n\r\n##### Additional Information\r\n\r\nThis has behavior has been observed with OKD 3.11 and OCP 3.11 deployments.\n", "before_files": [{"content": "\"\"\"\nAnsible action plugin to ensure inventory variables are set\nappropriately and no conflicting options have been provided.\n\"\"\"\nimport collections\nimport six\n\nfrom ansible.plugins.action import ActionBase\nfrom ansible import errors\n\n\nFAIL_MSG = \"\"\"A string value that appears to be a file path located outside of\n{} has been found in /etc/origin/master/master-config.yaml.\nIn 3.10 and newer, all files needed by the master must reside inside of\nthose directories or a subdirectory or it will not be readable by the\nmaster process. Please migrate all files needed by the master into\none of {} or a subdirectory and update your master configs before\nproceeding. The string found was: {}\n***********************\nNOTE: the following items do not need to be migrated, they will be migrated\nfor you: {}\"\"\"\n\n\nITEMS_TO_POP = (\n ('oauthConfig', 'identityProviders'),\n)\n# Create csv string of dot-separated dictionary keys:\n# eg: 'oathConfig.identityProviders, something.else.here'\nMIGRATED_ITEMS = \", \".join([\".\".join(x) for x in ITEMS_TO_POP])\n\nALLOWED_DIRS = (\n '/etc/origin/master/',\n '/var/lib/origin',\n '/etc/origin/cloudprovider',\n '/etc/origin/kubelet-plugins',\n '/usr/libexec/kubernetes/kubelet-plugins',\n '/var/log/origin',\n)\n\nALLOWED_DIRS_STRING = ', '.join(ALLOWED_DIRS)\n\n\ndef pop_migrated_fields(mastercfg):\n \"\"\"Some fields do not need to be searched because they will be migrated\n for users automatically\"\"\"\n # Walk down the tree and pop the specific item we migrate / don't care about\n for item in ITEMS_TO_POP:\n field = mastercfg\n for sub_field in item:\n parent_field = field\n field = field[sub_field]\n parent_field.pop(item[len(item) - 1])\n\n\ndef do_item_check(val, strings_to_check):\n \"\"\"Check type of val, append to strings_to_check if string, otherwise if\n it's a dictionary-like object call walk_mapping, if it's a list-like\n object call walk_sequence, else ignore.\"\"\"\n if isinstance(val, six.string_types):\n strings_to_check.append(val)\n elif isinstance(val, collections.Sequence):\n # A list-like object\n walk_sequence(val, strings_to_check)\n elif isinstance(val, collections.Mapping):\n # A dictionary-like object\n walk_mapping(val, strings_to_check)\n # If it's not a string, list, or dictionary, we're not interested.\n\n\ndef walk_sequence(items, strings_to_check):\n \"\"\"Walk recursively through a list, items\"\"\"\n for item in items:\n do_item_check(item, strings_to_check)\n\n\ndef walk_mapping(map_to_walk, strings_to_check):\n \"\"\"Walk recursively through map_to_walk dictionary and add strings to\n strings_to_check\"\"\"\n for _, val in map_to_walk.items():\n do_item_check(val, strings_to_check)\n\n\ndef check_strings(strings_to_check):\n \"\"\"Check the strings we found to see if they look like file paths and if\n they are, fail if not start with /etc/origin/master\"\"\"\n for item in strings_to_check:\n if item.startswith('/') or item.startswith('../'):\n matches = 0\n for allowed in ALLOWED_DIRS:\n if item.startswith(allowed):\n matches += 1\n if matches == 0:\n raise errors.AnsibleModuleError(\n FAIL_MSG.format(ALLOWED_DIRS_STRING,\n ALLOWED_DIRS_STRING,\n item, MIGRATED_ITEMS))\n\n\n# pylint: disable=R0903\nclass ActionModule(ActionBase):\n \"\"\"Action plugin to validate no files are needed by master that reside\n outside of /etc/origin/master as masters will now run as pods and cannot\n utilize files outside of that path as they will not be mounted inside the\n containers.\"\"\"\n def run(self, tmp=None, task_vars=None):\n \"\"\"Run this action module\"\"\"\n result = super(ActionModule, self).run(tmp, task_vars)\n\n # self.task_vars holds all in-scope variables.\n # Ignore settting self.task_vars outside of init.\n # pylint: disable=W0201\n self.task_vars = task_vars or {}\n\n # mastercfg should be a dictionary from scraping an existing master's\n # config yaml file.\n mastercfg = self._task.args.get('mastercfg')\n\n # We migrate some paths for users automatically, so we pop those.\n pop_migrated_fields(mastercfg)\n\n # Create an empty list to append strings from our config file to to check\n # later.\n strings_to_check = []\n\n walk_mapping(mastercfg, strings_to_check)\n\n check_strings(strings_to_check)\n\n result[\"changed\"] = False\n result[\"failed\"] = False\n result[\"msg\"] = \"Aight, configs looking good\"\n return result\n", "path": "roles/lib_utils/action_plugins/master_check_paths_in_config.py"}], "after_files": [{"content": "\"\"\"\nAnsible action plugin to ensure inventory variables are set\nappropriately and no conflicting options have been provided.\n\"\"\"\nimport collections\nimport six\n\nfrom ansible.plugins.action import ActionBase\nfrom ansible import errors\n\n\nFAIL_MSG = \"\"\"A string value that appears to be a file path located outside of\n{} has been found in /etc/origin/master/master-config.yaml.\nIn 3.10 and newer, all files needed by the master must reside inside of\nthose directories or a subdirectory or it will not be readable by the\nmaster process. Please migrate all files needed by the master into\none of {} or a subdirectory and update your master configs before\nproceeding. The string found was: {}\n***********************\nNOTE: the following items do not need to be migrated, they will be migrated\nfor you: {}\"\"\"\n\n\nITEMS_TO_POP = (\n ('auditConfig', 'policyConfiguration'),\n ('oauthConfig', 'identityProviders'),\n)\n# Create csv string of dot-separated dictionary keys:\n# eg: 'oathConfig.identityProviders, something.else.here'\nMIGRATED_ITEMS = \", \".join([\".\".join(x) for x in ITEMS_TO_POP])\n\nALLOWED_DIRS = (\n '/etc/origin/master/',\n '/var/lib/origin',\n '/etc/origin/cloudprovider',\n '/etc/origin/kubelet-plugins',\n '/usr/libexec/kubernetes/kubelet-plugins',\n '/var/log/origin',\n)\n\nALLOWED_DIRS_STRING = ', '.join(ALLOWED_DIRS)\n\n\ndef pop_migrated_fields(mastercfg):\n \"\"\"Some fields do not need to be searched because they will be migrated\n for users automatically\"\"\"\n # Walk down the tree and pop the specific item we migrate / don't care about\n for field_path in ITEMS_TO_POP:\n pop_migrated_field(mastercfg, field_path)\n\n\ndef pop_migrated_field(mastercfg, field_path):\n \"\"\"Remove field at given path from config\"\"\"\n field = mastercfg\n for sub_field in field_path:\n parent_field = field\n if sub_field not in field:\n return\n field = field[sub_field]\n parent_field.pop(field_path[-1])\n\n\ndef do_item_check(val, strings_to_check):\n \"\"\"Check type of val, append to strings_to_check if string, otherwise if\n it's a dictionary-like object call walk_mapping, if it's a list-like\n object call walk_sequence, else ignore.\"\"\"\n if isinstance(val, six.string_types):\n strings_to_check.append(val)\n elif isinstance(val, collections.Sequence):\n # A list-like object\n walk_sequence(val, strings_to_check)\n elif isinstance(val, collections.Mapping):\n # A dictionary-like object\n walk_mapping(val, strings_to_check)\n # If it's not a string, list, or dictionary, we're not interested.\n\n\ndef walk_sequence(items, strings_to_check):\n \"\"\"Walk recursively through a list, items\"\"\"\n for item in items:\n do_item_check(item, strings_to_check)\n\n\ndef walk_mapping(map_to_walk, strings_to_check):\n \"\"\"Walk recursively through map_to_walk dictionary and add strings to\n strings_to_check\"\"\"\n for _, val in map_to_walk.items():\n do_item_check(val, strings_to_check)\n\n\ndef check_strings(strings_to_check):\n \"\"\"Check the strings we found to see if they look like file paths and if\n they are, fail if not start with /etc/origin/master\"\"\"\n for item in strings_to_check:\n if item.startswith('/') or item.startswith('../'):\n matches = 0\n for allowed in ALLOWED_DIRS:\n if item.startswith(allowed):\n matches += 1\n if matches == 0:\n raise errors.AnsibleModuleError(\n FAIL_MSG.format(ALLOWED_DIRS_STRING,\n ALLOWED_DIRS_STRING,\n item, MIGRATED_ITEMS))\n\n\n# pylint: disable=R0903\nclass ActionModule(ActionBase):\n \"\"\"Action plugin to validate no files are needed by master that reside\n outside of /etc/origin/master as masters will now run as pods and cannot\n utilize files outside of that path as they will not be mounted inside the\n containers.\"\"\"\n def run(self, tmp=None, task_vars=None):\n \"\"\"Run this action module\"\"\"\n result = super(ActionModule, self).run(tmp, task_vars)\n\n # self.task_vars holds all in-scope variables.\n # Ignore settting self.task_vars outside of init.\n # pylint: disable=W0201\n self.task_vars = task_vars or {}\n\n # mastercfg should be a dictionary from scraping an existing master's\n # config yaml file.\n mastercfg = self._task.args.get('mastercfg')\n\n # We migrate some paths for users automatically, so we pop those.\n pop_migrated_fields(mastercfg)\n\n # Create an empty list to append strings from our config file to to check\n # later.\n strings_to_check = []\n\n walk_mapping(mastercfg, strings_to_check)\n\n check_strings(strings_to_check)\n\n result[\"changed\"] = False\n result[\"failed\"] = False\n result[\"msg\"] = \"Aight, configs looking good\"\n return result\n", "path": "roles/lib_utils/action_plugins/master_check_paths_in_config.py"}]}
| 2,752 | 326 |
gh_patches_debug_14806
|
rasdani/github-patches
|
git_diff
|
koxudaxi__datamodel-code-generator-1826
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing code generator changes behavior of pyyaml
When importing ``datamodel_code_generator``, the behaviour of ``pyyaml`` is changed after the import, datetime objects suddenly are deserialized from yaml as `str` whereas before they were deserialized as `datetime` objects.
## Steps to reproduce:
### create clean venv
```bash
python3 -m venv blub
blub/bin/pip install datamodel-code-generator pyyaml
```
### Run this reproducer with the interpreter from the venv
```python
import yaml
import tempfile
import datetime
tmp=tempfile.NamedTemporaryFile(mode='w')
def write_read():
tmp=tempfile.NamedTemporaryFile(mode='w')
yaml.safe_dump({"datetime": datetime.datetime.now()}, tmp)
dumped_reloaded = yaml.safe_load(open(tmp.name,"r"))
print(dumped_reloaded)
print("before importing datamodel-code-generator:")
write_read()
import datamodel_code_generator
print("\nafter importing datamodel-code-generator:")
write_read()
```
**Actual output:** (time string + object may change depending on run time)
```
before importing datamodel-code-generator:
{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 19, 612368)}
after importing datamodel-code-generator:
{'datetime': '2024-01-30 09:51:20.019991'}
```
**Expected output:** (time string + object may change depending on run time):
```
before importing datamodel-code-generator:
{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 19, 612368)}
after importing datamodel-code-generator:
{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 20, 019991)}
```
The reason seems to be that the SafeLoader of ``yaml`` is modified globally by ``datamodel-code-generator``
https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/util.py#L72
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datamodel_code_generator/util.py`
Content:
```
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar
5
6 import pydantic
7 from packaging import version
8 from pydantic import BaseModel as _BaseModel
9
10 PYDANTIC_VERSION = version.parse(
11 pydantic.VERSION if isinstance(pydantic.VERSION, str) else str(pydantic.VERSION)
12 )
13
14 PYDANTIC_V2: bool = PYDANTIC_VERSION >= version.parse('2.0b3')
15
16 if TYPE_CHECKING:
17 cached_property = property
18 from yaml import SafeLoader
19
20 Protocol = object
21 runtime_checkable: Callable[..., Any]
22
23 from typing_extensions import Literal
24
25 def load_toml(path: Path) -> Dict[str, Any]:
26 ...
27
28 else:
29 try:
30 from typing import Protocol
31 except ImportError:
32 from typing_extensions import Protocol # noqa
33 try:
34 from typing import runtime_checkable
35 except ImportError:
36 from typing_extensions import runtime_checkable # noqa
37 try:
38 from yaml import CSafeLoader as SafeLoader
39 except ImportError: # pragma: no cover
40 from yaml import SafeLoader
41
42 try:
43 from functools import cached_property
44 except ImportError:
45 _NOT_FOUND = object()
46
47 class cached_property:
48 def __init__(self, func: Callable) -> None:
49 self.func: Callable = func
50 self.__doc__: Any = func.__doc__
51
52 def __get__(self, instance: Any, owner: Any = None) -> Any:
53 value = instance.__dict__.get(self.func.__name__, _NOT_FOUND)
54 if value is _NOT_FOUND: # pragma: no cover
55 value = instance.__dict__[self.func.__name__] = self.func(instance)
56 return value
57
58 try:
59 import tomllib
60
61 def load_toml(path: Path) -> Dict[str, Any]:
62 with path.open('rb') as f:
63 return tomllib.load(f)
64
65 except ImportError:
66 import toml
67
68 def load_toml(path: Path) -> Dict[str, Any]:
69 return toml.load(path)
70
71
72 SafeLoader.yaml_constructors[
73 'tag:yaml.org,2002:timestamp'
74 ] = SafeLoader.yaml_constructors['tag:yaml.org,2002:str']
75
76 Model = TypeVar('Model', bound=_BaseModel)
77
78
79 def model_validator(
80 mode: Literal['before', 'after'] = 'after',
81 ) -> Callable[[Callable[[Model, Any], Any]], Callable[[Model, Any], Any]]:
82 def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:
83 if PYDANTIC_V2:
84 from pydantic import model_validator as model_validator_v2
85
86 return model_validator_v2(mode=mode)(method) # type: ignore
87 else:
88 from pydantic import root_validator
89
90 return root_validator(method, pre=mode == 'before') # type: ignore
91
92 return inner
93
94
95 def field_validator(
96 field_name: str,
97 *fields: str,
98 mode: Literal['before', 'after'] = 'after',
99 ) -> Callable[[Any], Callable[[Model, Any], Any]]:
100 def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:
101 if PYDANTIC_V2:
102 from pydantic import field_validator as field_validator_v2
103
104 return field_validator_v2(field_name, *fields, mode=mode)(method) # type: ignore
105 else:
106 from pydantic import validator
107
108 return validator(field_name, *fields, pre=mode == 'before')(method) # type: ignore
109
110 return inner
111
112
113 if PYDANTIC_V2:
114 from pydantic import ConfigDict as ConfigDict
115 else:
116 ConfigDict = dict # type: ignore
117
118
119 class BaseModel(_BaseModel):
120 if PYDANTIC_V2:
121 model_config = ConfigDict(strict=False)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datamodel_code_generator/util.py b/datamodel_code_generator/util.py
--- a/datamodel_code_generator/util.py
+++ b/datamodel_code_generator/util.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import copy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar
@@ -69,9 +70,13 @@
return toml.load(path)
-SafeLoader.yaml_constructors[
- 'tag:yaml.org,2002:timestamp'
-] = SafeLoader.yaml_constructors['tag:yaml.org,2002:str']
+SafeLoaderTemp = copy.deepcopy(SafeLoader)
+SafeLoaderTemp.yaml_constructors = copy.deepcopy(SafeLoader.yaml_constructors)
+SafeLoaderTemp.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeLoaderTemp.yaml_constructors['tag:yaml.org,2002:str'],
+)
+SafeLoader = SafeLoaderTemp
Model = TypeVar('Model', bound=_BaseModel)
|
{"golden_diff": "diff --git a/datamodel_code_generator/util.py b/datamodel_code_generator/util.py\n--- a/datamodel_code_generator/util.py\n+++ b/datamodel_code_generator/util.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import copy\n from pathlib import Path\n from typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar\n \n@@ -69,9 +70,13 @@\n return toml.load(path)\n \n \n-SafeLoader.yaml_constructors[\n- 'tag:yaml.org,2002:timestamp'\n-] = SafeLoader.yaml_constructors['tag:yaml.org,2002:str']\n+SafeLoaderTemp = copy.deepcopy(SafeLoader)\n+SafeLoaderTemp.yaml_constructors = copy.deepcopy(SafeLoader.yaml_constructors)\n+SafeLoaderTemp.add_constructor(\n+ 'tag:yaml.org,2002:timestamp',\n+ SafeLoaderTemp.yaml_constructors['tag:yaml.org,2002:str'],\n+)\n+SafeLoader = SafeLoaderTemp\n \n Model = TypeVar('Model', bound=_BaseModel)\n", "issue": "Importing code generator changes behavior of pyyaml\nWhen importing ``datamodel_code_generator``, the behaviour of ``pyyaml`` is changed after the import, datetime objects suddenly are deserialized from yaml as `str` whereas before they were deserialized as `datetime` objects.\r\n\r\n## Steps to reproduce:\r\n### create clean venv\r\n```bash\r\npython3 -m venv blub\r\nblub/bin/pip install datamodel-code-generator pyyaml\r\n```\r\n\r\n### Run this reproducer with the interpreter from the venv\r\n```python\r\nimport yaml\r\nimport tempfile\r\nimport datetime\r\n\r\ntmp=tempfile.NamedTemporaryFile(mode='w')\r\n\r\ndef write_read():\r\n tmp=tempfile.NamedTemporaryFile(mode='w')\r\n yaml.safe_dump({\"datetime\": datetime.datetime.now()}, tmp)\r\n dumped_reloaded = yaml.safe_load(open(tmp.name,\"r\"))\r\n print(dumped_reloaded)\r\n\r\nprint(\"before importing datamodel-code-generator:\")\r\nwrite_read()\r\n\r\nimport datamodel_code_generator\r\nprint(\"\\nafter importing datamodel-code-generator:\")\r\nwrite_read()\r\n```\r\n\r\n**Actual output:** (time string + object may change depending on run time)\r\n```\r\nbefore importing datamodel-code-generator:\r\n{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 19, 612368)}\r\n\r\nafter importing datamodel-code-generator:\r\n{'datetime': '2024-01-30 09:51:20.019991'}\r\n```\r\n\r\n**Expected output:** (time string + object may change depending on run time):\r\n```\r\nbefore importing datamodel-code-generator:\r\n{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 19, 612368)}\r\n\r\nafter importing datamodel-code-generator:\r\n{'datetime': datetime.datetime(2024, 1, 30, 9, 51, 20, 019991)}\r\n```\r\n\r\nThe reason seems to be that the SafeLoader of ``yaml`` is modified globally by ``datamodel-code-generator``\r\nhttps://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/util.py#L72\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import BaseModel as _BaseModel\n\nPYDANTIC_VERSION = version.parse(\n pydantic.VERSION if isinstance(pydantic.VERSION, str) else str(pydantic.VERSION)\n)\n\nPYDANTIC_V2: bool = PYDANTIC_VERSION >= version.parse('2.0b3')\n\nif TYPE_CHECKING:\n cached_property = property\n from yaml import SafeLoader\n\n Protocol = object\n runtime_checkable: Callable[..., Any]\n\n from typing_extensions import Literal\n\n def load_toml(path: Path) -> Dict[str, Any]:\n ...\n\nelse:\n try:\n from typing import Protocol\n except ImportError:\n from typing_extensions import Protocol # noqa\n try:\n from typing import runtime_checkable\n except ImportError:\n from typing_extensions import runtime_checkable # noqa\n try:\n from yaml import CSafeLoader as SafeLoader\n except ImportError: # pragma: no cover\n from yaml import SafeLoader\n\n try:\n from functools import cached_property\n except ImportError:\n _NOT_FOUND = object()\n\n class cached_property:\n def __init__(self, func: Callable) -> None:\n self.func: Callable = func\n self.__doc__: Any = func.__doc__\n\n def __get__(self, instance: Any, owner: Any = None) -> Any:\n value = instance.__dict__.get(self.func.__name__, _NOT_FOUND)\n if value is _NOT_FOUND: # pragma: no cover\n value = instance.__dict__[self.func.__name__] = self.func(instance)\n return value\n\n try:\n import tomllib\n\n def load_toml(path: Path) -> Dict[str, Any]:\n with path.open('rb') as f:\n return tomllib.load(f)\n\n except ImportError:\n import toml\n\n def load_toml(path: Path) -> Dict[str, Any]:\n return toml.load(path)\n\n\nSafeLoader.yaml_constructors[\n 'tag:yaml.org,2002:timestamp'\n] = SafeLoader.yaml_constructors['tag:yaml.org,2002:str']\n\nModel = TypeVar('Model', bound=_BaseModel)\n\n\ndef model_validator(\n mode: Literal['before', 'after'] = 'after',\n) -> Callable[[Callable[[Model, Any], Any]], Callable[[Model, Any], Any]]:\n def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:\n if PYDANTIC_V2:\n from pydantic import model_validator as model_validator_v2\n\n return model_validator_v2(mode=mode)(method) # type: ignore\n else:\n from pydantic import root_validator\n\n return root_validator(method, pre=mode == 'before') # type: ignore\n\n return inner\n\n\ndef field_validator(\n field_name: str,\n *fields: str,\n mode: Literal['before', 'after'] = 'after',\n) -> Callable[[Any], Callable[[Model, Any], Any]]:\n def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:\n if PYDANTIC_V2:\n from pydantic import field_validator as field_validator_v2\n\n return field_validator_v2(field_name, *fields, mode=mode)(method) # type: ignore\n else:\n from pydantic import validator\n\n return validator(field_name, *fields, pre=mode == 'before')(method) # type: ignore\n\n return inner\n\n\nif PYDANTIC_V2:\n from pydantic import ConfigDict as ConfigDict\nelse:\n ConfigDict = dict # type: ignore\n\n\nclass BaseModel(_BaseModel):\n if PYDANTIC_V2:\n model_config = ConfigDict(strict=False)\n", "path": "datamodel_code_generator/util.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport copy\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar\n\nimport pydantic\nfrom packaging import version\nfrom pydantic import BaseModel as _BaseModel\n\nPYDANTIC_VERSION = version.parse(\n pydantic.VERSION if isinstance(pydantic.VERSION, str) else str(pydantic.VERSION)\n)\n\nPYDANTIC_V2: bool = PYDANTIC_VERSION >= version.parse('2.0b3')\n\nif TYPE_CHECKING:\n cached_property = property\n from yaml import SafeLoader\n\n Protocol = object\n runtime_checkable: Callable[..., Any]\n\n from typing_extensions import Literal\n\n def load_toml(path: Path) -> Dict[str, Any]:\n ...\n\nelse:\n try:\n from typing import Protocol\n except ImportError:\n from typing_extensions import Protocol # noqa\n try:\n from typing import runtime_checkable\n except ImportError:\n from typing_extensions import runtime_checkable # noqa\n try:\n from yaml import CSafeLoader as SafeLoader\n except ImportError: # pragma: no cover\n from yaml import SafeLoader\n\n try:\n from functools import cached_property\n except ImportError:\n _NOT_FOUND = object()\n\n class cached_property:\n def __init__(self, func: Callable) -> None:\n self.func: Callable = func\n self.__doc__: Any = func.__doc__\n\n def __get__(self, instance: Any, owner: Any = None) -> Any:\n value = instance.__dict__.get(self.func.__name__, _NOT_FOUND)\n if value is _NOT_FOUND: # pragma: no cover\n value = instance.__dict__[self.func.__name__] = self.func(instance)\n return value\n\n try:\n import tomllib\n\n def load_toml(path: Path) -> Dict[str, Any]:\n with path.open('rb') as f:\n return tomllib.load(f)\n\n except ImportError:\n import toml\n\n def load_toml(path: Path) -> Dict[str, Any]:\n return toml.load(path)\n\n\nSafeLoaderTemp = copy.deepcopy(SafeLoader)\nSafeLoaderTemp.yaml_constructors = copy.deepcopy(SafeLoader.yaml_constructors)\nSafeLoaderTemp.add_constructor(\n 'tag:yaml.org,2002:timestamp',\n SafeLoaderTemp.yaml_constructors['tag:yaml.org,2002:str'],\n)\nSafeLoader = SafeLoaderTemp\n\nModel = TypeVar('Model', bound=_BaseModel)\n\n\ndef model_validator(\n mode: Literal['before', 'after'] = 'after',\n) -> Callable[[Callable[[Model, Any], Any]], Callable[[Model, Any], Any]]:\n def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:\n if PYDANTIC_V2:\n from pydantic import model_validator as model_validator_v2\n\n return model_validator_v2(mode=mode)(method) # type: ignore\n else:\n from pydantic import root_validator\n\n return root_validator(method, pre=mode == 'before') # type: ignore\n\n return inner\n\n\ndef field_validator(\n field_name: str,\n *fields: str,\n mode: Literal['before', 'after'] = 'after',\n) -> Callable[[Any], Callable[[Model, Any], Any]]:\n def inner(method: Callable[[Model, Any], Any]) -> Callable[[Model, Any], Any]:\n if PYDANTIC_V2:\n from pydantic import field_validator as field_validator_v2\n\n return field_validator_v2(field_name, *fields, mode=mode)(method) # type: ignore\n else:\n from pydantic import validator\n\n return validator(field_name, *fields, pre=mode == 'before')(method) # type: ignore\n\n return inner\n\n\nif PYDANTIC_V2:\n from pydantic import ConfigDict as ConfigDict\nelse:\n ConfigDict = dict # type: ignore\n\n\nclass BaseModel(_BaseModel):\n if PYDANTIC_V2:\n model_config = ConfigDict(strict=False)\n", "path": "datamodel_code_generator/util.py"}]}
| 1,910 | 233 |
gh_patches_debug_8614
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-1436
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correction of cfg-www.rst documentation
change StopBuildEndpointMatcher(role="owner") to
util.StopBuildEndpointMatcher(role="owner")
issue ref: #3043
Signed-off-by: desurd <david.desurmont@gmail.com>
## Remove this paragraph
Please have a look at our developer documentation before submitting your Pull Request.
http://trac.buildbot.net/wiki/Development
And especially:
http://trac.buildbot.net/wiki/SubmittingPatches
## Contributor Checklist:
* [ ] I have updated the unit tests
* [ ] I have created a file in the master/buildbot/newsfragment directory (and read the README.txt in that directory)
* [ ] I have updated the appropriate documentation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/db/logs.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import sqlalchemy as sa
17
18 from buildbot.db import base
19 from twisted.internet import defer
20 from twisted.python import log
21
22
23 class LogsConnectorComponent(base.DBConnectorComponent):
24
25 # Postgres and MySQL will both allow bigger sizes than this. The limit
26 # for MySQL appears to be max_packet_size (default 1M).
27 MAX_CHUNK_SIZE = 65536
28
29 def _getLog(self, whereclause):
30 def thd(conn):
31 q = self.db.model.logs.select(whereclause=whereclause)
32 res = conn.execute(q)
33 row = res.fetchone()
34
35 rv = None
36 if row:
37 rv = self._logdictFromRow(row)
38 res.close()
39 return rv
40 return self.db.pool.do(thd)
41
42 def getLog(self, logid):
43 return self._getLog(self.db.model.logs.c.id == logid)
44
45 def getLogBySlug(self, stepid, slug):
46 tbl = self.db.model.logs
47 return self._getLog((tbl.c.slug == slug) & (tbl.c.stepid == stepid))
48
49 def getLogs(self, stepid):
50 def thd(conn):
51 tbl = self.db.model.logs
52 q = tbl.select()
53 q = q.where(tbl.c.stepid == stepid)
54 q = q.order_by(tbl.c.id)
55 res = conn.execute(q)
56 return [self._logdictFromRow(row) for row in res.fetchall()]
57 return self.db.pool.do(thd)
58
59 def getLogLines(self, logid, first_line, last_line):
60 def thd(conn):
61 # get a set of chunks that completely cover the requested range
62 tbl = self.db.model.logchunks
63 q = sa.select([tbl.c.first_line, tbl.c.last_line,
64 tbl.c.content, tbl.c.compressed])
65 q = q.where(tbl.c.logid == logid)
66 q = q.where(tbl.c.first_line <= last_line)
67 q = q.where(tbl.c.last_line >= first_line)
68 q = q.order_by(tbl.c.first_line)
69 rv = []
70 for row in conn.execute(q):
71 assert not row.compressed, "compressed rows not supported yet"
72 content = row.content.decode('utf-8')
73 if row.first_line < first_line:
74 idx = -1
75 count = first_line - row.first_line
76 for _ in xrange(count):
77 idx = content.index('\n', idx + 1)
78 content = content[idx + 1:]
79 if row.last_line > last_line:
80 idx = len(content) + 1
81 count = row.last_line - last_line
82 for _ in xrange(count):
83 idx = content.rindex('\n', 0, idx - 1)
84 content = content[:idx]
85 rv.append(content)
86 return u'\n'.join(rv) + u'\n' if rv else u''
87 return self.db.pool.do(thd)
88
89 def addLog(self, stepid, name, slug, type):
90 assert type in 'tsh', "Log type must be one of t, s, or h"
91
92 def thd(conn):
93 try:
94 r = conn.execute(self.db.model.logs.insert(),
95 dict(name=name, slug=slug, stepid=stepid,
96 complete=0, num_lines=0, type=type))
97 return r.inserted_primary_key[0]
98 except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
99 raise KeyError(
100 "log with slug '%r' already exists in this step" % (slug,))
101 return self.db.pool.do(thd)
102
103 def appendLog(self, logid, content):
104 # check for trailing newline and strip it for storage -- chunks omit
105 # the trailing newline
106 assert content[-1] == u'\n'
107 content = content[:-1]
108
109 def thd(conn):
110 q = sa.select([self.db.model.logs.c.num_lines])
111 q = q.where(self.db.model.logs.c.id == logid)
112 res = conn.execute(q)
113 row = res.fetchone()
114 res.close()
115 if not row:
116 return # ignore a missing log
117
118 # Break the content up into chunks. This takes advantage of the
119 # fact that no character but u'\n' maps to b'\n' in UTF-8.
120
121 first_line = chunk_first_line = row[0]
122 remaining = content.encode('utf-8')
123 while remaining:
124 chunk, remaining = self._splitBigChunk(remaining, logid)
125
126 last_line = chunk_first_line + chunk.count('\n')
127 conn.execute(self.db.model.logchunks.insert(),
128 dict(logid=logid, first_line=chunk_first_line,
129 last_line=last_line, content=chunk,
130 compressed=0))
131 chunk_first_line = last_line + 1
132
133 conn.execute(self.db.model.logs.update(whereclause=(self.db.model.logs.c.id == logid)),
134 num_lines=last_line + 1)
135 return (first_line, last_line)
136 return self.db.pool.do(thd)
137
138 def _splitBigChunk(self, content, logid):
139 """
140 Split CONTENT on a line boundary into a prefix smaller than 64k and
141 a suffix containing the remainder, omitting the splitting newline.
142 """
143 # if it's small enough, just return it
144 if len(content) < self.MAX_CHUNK_SIZE:
145 return content, None
146
147 # find the last newline before the limit
148 i = content.rfind('\n', 0, self.MAX_CHUNK_SIZE)
149 if i != -1:
150 return content[:i], content[i + 1:]
151
152 log.msg('truncating long line for log %d' % logid)
153
154 # first, truncate this down to something that decodes correctly
155 truncline = content[:self.MAX_CHUNK_SIZE]
156 while truncline:
157 try:
158 truncline.decode('utf-8')
159 break
160 except UnicodeDecodeError:
161 truncline = truncline[:-1]
162
163 # then find the beginning of the next line
164 i = content.find('\n', self.MAX_CHUNK_SIZE)
165 if i == -1:
166 return truncline, None
167 else:
168 return truncline, content[i + 1:]
169
170 def finishLog(self, logid):
171 def thd(conn):
172 tbl = self.db.model.logs
173 q = tbl.update(whereclause=(tbl.c.id == logid))
174 conn.execute(q, complete=1)
175 return self.db.pool.do(thd)
176
177 def compressLog(self, logid):
178 # TODO: compression not supported yet
179 return defer.succeed(None)
180
181 def _logdictFromRow(self, row):
182 rv = dict(row)
183 rv['complete'] = bool(rv['complete'])
184 return rv
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/db/logs.py b/master/buildbot/db/logs.py
--- a/master/buildbot/db/logs.py
+++ b/master/buildbot/db/logs.py
@@ -80,7 +80,7 @@
idx = len(content) + 1
count = row.last_line - last_line
for _ in xrange(count):
- idx = content.rindex('\n', 0, idx - 1)
+ idx = content.rindex('\n', 0, idx)
content = content[:idx]
rv.append(content)
return u'\n'.join(rv) + u'\n' if rv else u''
|
{"golden_diff": "diff --git a/master/buildbot/db/logs.py b/master/buildbot/db/logs.py\n--- a/master/buildbot/db/logs.py\n+++ b/master/buildbot/db/logs.py\n@@ -80,7 +80,7 @@\n idx = len(content) + 1\n count = row.last_line - last_line\n for _ in xrange(count):\n- idx = content.rindex('\\n', 0, idx - 1)\n+ idx = content.rindex('\\n', 0, idx)\n content = content[:idx]\n rv.append(content)\n return u'\\n'.join(rv) + u'\\n' if rv else u''\n", "issue": "Correction of cfg-www.rst documentation\nchange StopBuildEndpointMatcher(role=\"owner\") to\r\nutil.StopBuildEndpointMatcher(role=\"owner\")\r\n\r\nissue ref: #3043\r\n\r\nSigned-off-by: desurd <david.desurmont@gmail.com>\r\n\r\n## Remove this paragraph\r\nPlease have a look at our developer documentation before submitting your Pull Request.\r\n\r\nhttp://trac.buildbot.net/wiki/Development\r\nAnd especially:\r\nhttp://trac.buildbot.net/wiki/SubmittingPatches\r\n\r\n## Contributor Checklist:\r\n\r\n* [ ] I have updated the unit tests\r\n* [ ] I have created a file in the master/buildbot/newsfragment directory (and read the README.txt in that directory)\r\n* [ ] I have updated the appropriate documentation\r\n\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\n\nfrom buildbot.db import base\nfrom twisted.internet import defer\nfrom twisted.python import log\n\n\nclass LogsConnectorComponent(base.DBConnectorComponent):\n\n # Postgres and MySQL will both allow bigger sizes than this. The limit\n # for MySQL appears to be max_packet_size (default 1M).\n MAX_CHUNK_SIZE = 65536\n\n def _getLog(self, whereclause):\n def thd(conn):\n q = self.db.model.logs.select(whereclause=whereclause)\n res = conn.execute(q)\n row = res.fetchone()\n\n rv = None\n if row:\n rv = self._logdictFromRow(row)\n res.close()\n return rv\n return self.db.pool.do(thd)\n\n def getLog(self, logid):\n return self._getLog(self.db.model.logs.c.id == logid)\n\n def getLogBySlug(self, stepid, slug):\n tbl = self.db.model.logs\n return self._getLog((tbl.c.slug == slug) & (tbl.c.stepid == stepid))\n\n def getLogs(self, stepid):\n def thd(conn):\n tbl = self.db.model.logs\n q = tbl.select()\n q = q.where(tbl.c.stepid == stepid)\n q = q.order_by(tbl.c.id)\n res = conn.execute(q)\n return [self._logdictFromRow(row) for row in res.fetchall()]\n return self.db.pool.do(thd)\n\n def getLogLines(self, logid, first_line, last_line):\n def thd(conn):\n # get a set of chunks that completely cover the requested range\n tbl = self.db.model.logchunks\n q = sa.select([tbl.c.first_line, tbl.c.last_line,\n tbl.c.content, tbl.c.compressed])\n q = q.where(tbl.c.logid == logid)\n q = q.where(tbl.c.first_line <= last_line)\n q = q.where(tbl.c.last_line >= first_line)\n q = q.order_by(tbl.c.first_line)\n rv = []\n for row in conn.execute(q):\n assert not row.compressed, \"compressed rows not supported yet\"\n content = row.content.decode('utf-8')\n if row.first_line < first_line:\n idx = -1\n count = first_line - row.first_line\n for _ in xrange(count):\n idx = content.index('\\n', idx + 1)\n content = content[idx + 1:]\n if row.last_line > last_line:\n idx = len(content) + 1\n count = row.last_line - last_line\n for _ in xrange(count):\n idx = content.rindex('\\n', 0, idx - 1)\n content = content[:idx]\n rv.append(content)\n return u'\\n'.join(rv) + u'\\n' if rv else u''\n return self.db.pool.do(thd)\n\n def addLog(self, stepid, name, slug, type):\n assert type in 'tsh', \"Log type must be one of t, s, or h\"\n\n def thd(conn):\n try:\n r = conn.execute(self.db.model.logs.insert(),\n dict(name=name, slug=slug, stepid=stepid,\n complete=0, num_lines=0, type=type))\n return r.inserted_primary_key[0]\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n raise KeyError(\n \"log with slug '%r' already exists in this step\" % (slug,))\n return self.db.pool.do(thd)\n\n def appendLog(self, logid, content):\n # check for trailing newline and strip it for storage -- chunks omit\n # the trailing newline\n assert content[-1] == u'\\n'\n content = content[:-1]\n\n def thd(conn):\n q = sa.select([self.db.model.logs.c.num_lines])\n q = q.where(self.db.model.logs.c.id == logid)\n res = conn.execute(q)\n row = res.fetchone()\n res.close()\n if not row:\n return # ignore a missing log\n\n # Break the content up into chunks. This takes advantage of the\n # fact that no character but u'\\n' maps to b'\\n' in UTF-8.\n\n first_line = chunk_first_line = row[0]\n remaining = content.encode('utf-8')\n while remaining:\n chunk, remaining = self._splitBigChunk(remaining, logid)\n\n last_line = chunk_first_line + chunk.count('\\n')\n conn.execute(self.db.model.logchunks.insert(),\n dict(logid=logid, first_line=chunk_first_line,\n last_line=last_line, content=chunk,\n compressed=0))\n chunk_first_line = last_line + 1\n\n conn.execute(self.db.model.logs.update(whereclause=(self.db.model.logs.c.id == logid)),\n num_lines=last_line + 1)\n return (first_line, last_line)\n return self.db.pool.do(thd)\n\n def _splitBigChunk(self, content, logid):\n \"\"\"\n Split CONTENT on a line boundary into a prefix smaller than 64k and\n a suffix containing the remainder, omitting the splitting newline.\n \"\"\"\n # if it's small enough, just return it\n if len(content) < self.MAX_CHUNK_SIZE:\n return content, None\n\n # find the last newline before the limit\n i = content.rfind('\\n', 0, self.MAX_CHUNK_SIZE)\n if i != -1:\n return content[:i], content[i + 1:]\n\n log.msg('truncating long line for log %d' % logid)\n\n # first, truncate this down to something that decodes correctly\n truncline = content[:self.MAX_CHUNK_SIZE]\n while truncline:\n try:\n truncline.decode('utf-8')\n break\n except UnicodeDecodeError:\n truncline = truncline[:-1]\n\n # then find the beginning of the next line\n i = content.find('\\n', self.MAX_CHUNK_SIZE)\n if i == -1:\n return truncline, None\n else:\n return truncline, content[i + 1:]\n\n def finishLog(self, logid):\n def thd(conn):\n tbl = self.db.model.logs\n q = tbl.update(whereclause=(tbl.c.id == logid))\n conn.execute(q, complete=1)\n return self.db.pool.do(thd)\n\n def compressLog(self, logid):\n # TODO: compression not supported yet\n return defer.succeed(None)\n\n def _logdictFromRow(self, row):\n rv = dict(row)\n rv['complete'] = bool(rv['complete'])\n return rv\n", "path": "master/buildbot/db/logs.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\n\nfrom buildbot.db import base\nfrom twisted.internet import defer\nfrom twisted.python import log\n\n\nclass LogsConnectorComponent(base.DBConnectorComponent):\n\n # Postgres and MySQL will both allow bigger sizes than this. The limit\n # for MySQL appears to be max_packet_size (default 1M).\n MAX_CHUNK_SIZE = 65536\n\n def _getLog(self, whereclause):\n def thd(conn):\n q = self.db.model.logs.select(whereclause=whereclause)\n res = conn.execute(q)\n row = res.fetchone()\n\n rv = None\n if row:\n rv = self._logdictFromRow(row)\n res.close()\n return rv\n return self.db.pool.do(thd)\n\n def getLog(self, logid):\n return self._getLog(self.db.model.logs.c.id == logid)\n\n def getLogBySlug(self, stepid, slug):\n tbl = self.db.model.logs\n return self._getLog((tbl.c.slug == slug) & (tbl.c.stepid == stepid))\n\n def getLogs(self, stepid):\n def thd(conn):\n tbl = self.db.model.logs\n q = tbl.select()\n q = q.where(tbl.c.stepid == stepid)\n q = q.order_by(tbl.c.id)\n res = conn.execute(q)\n return [self._logdictFromRow(row) for row in res.fetchall()]\n return self.db.pool.do(thd)\n\n def getLogLines(self, logid, first_line, last_line):\n def thd(conn):\n # get a set of chunks that completely cover the requested range\n tbl = self.db.model.logchunks\n q = sa.select([tbl.c.first_line, tbl.c.last_line,\n tbl.c.content, tbl.c.compressed])\n q = q.where(tbl.c.logid == logid)\n q = q.where(tbl.c.first_line <= last_line)\n q = q.where(tbl.c.last_line >= first_line)\n q = q.order_by(tbl.c.first_line)\n rv = []\n for row in conn.execute(q):\n assert not row.compressed, \"compressed rows not supported yet\"\n content = row.content.decode('utf-8')\n if row.first_line < first_line:\n idx = -1\n count = first_line - row.first_line\n for _ in xrange(count):\n idx = content.index('\\n', idx + 1)\n content = content[idx + 1:]\n if row.last_line > last_line:\n idx = len(content) + 1\n count = row.last_line - last_line\n for _ in xrange(count):\n idx = content.rindex('\\n', 0, idx)\n content = content[:idx]\n rv.append(content)\n return u'\\n'.join(rv) + u'\\n' if rv else u''\n return self.db.pool.do(thd)\n\n def addLog(self, stepid, name, slug, type):\n assert type in 'tsh', \"Log type must be one of t, s, or h\"\n\n def thd(conn):\n try:\n r = conn.execute(self.db.model.logs.insert(),\n dict(name=name, slug=slug, stepid=stepid,\n complete=0, num_lines=0, type=type))\n return r.inserted_primary_key[0]\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n raise KeyError(\n \"log with slug '%r' already exists in this step\" % (slug,))\n return self.db.pool.do(thd)\n\n def appendLog(self, logid, content):\n # check for trailing newline and strip it for storage -- chunks omit\n # the trailing newline\n assert content[-1] == u'\\n'\n content = content[:-1]\n\n def thd(conn):\n q = sa.select([self.db.model.logs.c.num_lines])\n q = q.where(self.db.model.logs.c.id == logid)\n res = conn.execute(q)\n row = res.fetchone()\n res.close()\n if not row:\n return # ignore a missing log\n\n # Break the content up into chunks. This takes advantage of the\n # fact that no character but u'\\n' maps to b'\\n' in UTF-8.\n\n first_line = chunk_first_line = row[0]\n remaining = content.encode('utf-8')\n while remaining:\n chunk, remaining = self._splitBigChunk(remaining, logid)\n\n last_line = chunk_first_line + chunk.count('\\n')\n conn.execute(self.db.model.logchunks.insert(),\n dict(logid=logid, first_line=chunk_first_line,\n last_line=last_line, content=chunk,\n compressed=0))\n chunk_first_line = last_line + 1\n\n conn.execute(self.db.model.logs.update(whereclause=(self.db.model.logs.c.id == logid)),\n num_lines=last_line + 1)\n return (first_line, last_line)\n return self.db.pool.do(thd)\n\n def _splitBigChunk(self, content, logid):\n \"\"\"\n Split CONTENT on a line boundary into a prefix smaller than 64k and\n a suffix containing the remainder, omitting the splitting newline.\n \"\"\"\n # if it's small enough, just return it\n if len(content) < self.MAX_CHUNK_SIZE:\n return content, None\n\n # find the last newline before the limit\n i = content.rfind('\\n', 0, self.MAX_CHUNK_SIZE)\n if i != -1:\n return content[:i], content[i + 1:]\n\n log.msg('truncating long line for log %d' % logid)\n\n # first, truncate this down to something that decodes correctly\n truncline = content[:self.MAX_CHUNK_SIZE]\n while truncline:\n try:\n truncline.decode('utf-8')\n break\n except UnicodeDecodeError:\n truncline = truncline[:-1]\n\n # then find the beginning of the next line\n i = content.find('\\n', self.MAX_CHUNK_SIZE)\n if i == -1:\n return truncline, None\n else:\n return truncline, content[i + 1:]\n\n def finishLog(self, logid):\n def thd(conn):\n tbl = self.db.model.logs\n q = tbl.update(whereclause=(tbl.c.id == logid))\n conn.execute(q, complete=1)\n return self.db.pool.do(thd)\n\n def compressLog(self, logid):\n # TODO: compression not supported yet\n return defer.succeed(None)\n\n def _logdictFromRow(self, row):\n rv = dict(row)\n rv['complete'] = bool(rv['complete'])\n return rv\n", "path": "master/buildbot/db/logs.py"}]}
| 2,494 | 139 |
gh_patches_debug_17208
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-2704
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3
4 def fetch_requirements(path):
5 with open(path, 'r') as fd:
6 return [r.strip() for r in fd.readlines()]
7
8
9 def fetch_readme():
10 with open('README.md', encoding='utf-8') as f:
11 return f.read()
12
13
14 def fetch_version():
15 with open('version.txt', 'r') as f:
16 return f.read().strip()
17
18
19 setup(
20 name='chatgpt',
21 version=fetch_version(),
22 packages=find_packages(exclude=(
23 'tests',
24 'benchmarks',
25 'requirements',
26 '*.egg-info',
27 )),
28 description='A RLFH implementation (ChatGPT) powered by ColossalAI',
29 long_description=fetch_readme(),
30 long_description_content_type='text/markdown',
31 license='Apache Software License 2.0',
32 url='https://github.com/hpcaitech/ChatGPT',
33 install_requires=fetch_requirements('requirements/requirements.txt'),
34 python_requires='>=3.6',
35 classifiers=[
36 'Programming Language :: Python :: 3',
37 'License :: OSI Approved :: Apache Software License',
38 'Environment :: GPU :: NVIDIA CUDA',
39 'Topic :: Scientific/Engineering :: Artificial Intelligence',
40 'Topic :: System :: Distributed Computing',
41 ],
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/applications/ChatGPT/setup.py b/applications/ChatGPT/setup.py
--- a/applications/ChatGPT/setup.py
+++ b/applications/ChatGPT/setup.py
@@ -22,7 +22,6 @@
packages=find_packages(exclude=(
'tests',
'benchmarks',
- 'requirements',
'*.egg-info',
)),
description='A RLFH implementation (ChatGPT) powered by ColossalAI',
@@ -30,7 +29,7 @@
long_description_content_type='text/markdown',
license='Apache Software License 2.0',
url='https://github.com/hpcaitech/ChatGPT',
- install_requires=fetch_requirements('requirements/requirements.txt'),
+ install_requires=fetch_requirements('requirements.txt'),
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
|
{"golden_diff": "diff --git a/applications/ChatGPT/setup.py b/applications/ChatGPT/setup.py\n--- a/applications/ChatGPT/setup.py\n+++ b/applications/ChatGPT/setup.py\n@@ -22,7 +22,6 @@\n packages=find_packages(exclude=(\n 'tests',\n 'benchmarks',\n- 'requirements',\n '*.egg-info',\n )),\n description='A RLFH implementation (ChatGPT) powered by ColossalAI',\n@@ -30,7 +29,7 @@\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://github.com/hpcaitech/ChatGPT',\n- install_requires=fetch_requirements('requirements/requirements.txt'),\n+ install_requires=fetch_requirements('requirements.txt'),\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ndef fetch_readme():\n with open('README.md', encoding='utf-8') as f:\n return f.read()\n\n\ndef fetch_version():\n with open('version.txt', 'r') as f:\n return f.read().strip()\n\n\nsetup(\n name='chatgpt',\n version=fetch_version(),\n packages=find_packages(exclude=(\n 'tests',\n 'benchmarks',\n 'requirements',\n '*.egg-info',\n )),\n description='A RLFH implementation (ChatGPT) powered by ColossalAI',\n long_description=fetch_readme(),\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://github.com/hpcaitech/ChatGPT',\n install_requires=fetch_requirements('requirements/requirements.txt'),\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License',\n 'Environment :: GPU :: NVIDIA CUDA',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: System :: Distributed Computing',\n ],\n)\n", "path": "applications/ChatGPT/setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ndef fetch_readme():\n with open('README.md', encoding='utf-8') as f:\n return f.read()\n\n\ndef fetch_version():\n with open('version.txt', 'r') as f:\n return f.read().strip()\n\n\nsetup(\n name='chatgpt',\n version=fetch_version(),\n packages=find_packages(exclude=(\n 'tests',\n 'benchmarks',\n '*.egg-info',\n )),\n description='A RLFH implementation (ChatGPT) powered by ColossalAI',\n long_description=fetch_readme(),\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://github.com/hpcaitech/ChatGPT',\n install_requires=fetch_requirements('requirements.txt'),\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License',\n 'Environment :: GPU :: NVIDIA CUDA',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: System :: Distributed Computing',\n ],\n)\n", "path": "applications/ChatGPT/setup.py"}]}
| 639 | 198 |
gh_patches_debug_49256
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-388
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test batch_size parameter is correctly applied for all distributions
With #323, we are now supporting `batch_size` parameter for both the class and functional forms of both the distributions. Let us add a simple test to validate that this is working as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/log_normal.py`
Content:
```
1 import numpy as np
2 import torch
3 from torch.autograd import Variable
4
5 from pyro.distributions.distribution import Distribution
6
7
8 class LogNormal(Distribution):
9 """
10 :param mu: mean *(vector)*
11 :param sigma: standard deviations *(vector (0, Infinity))*
12
13 A distribution over probability vectors obtained by exp-transforming a random
14 variable drawn from ``Normal({mu: mu, sigma: sigma})``.
15 """
16 reparameterized = True
17
18 def __init__(self, mu, sigma, batch_size=None, *args, **kwargs):
19 """
20 Params:
21 `mu` - mean
22 `sigma` - root variance
23 """
24 self.mu = mu
25 self.sigma = sigma
26 if mu.size() != sigma.size():
27 raise ValueError("Expected mu.size() == sigma.size(), but got {} vs {}"
28 .format(mu.size(), sigma.size()))
29 if mu.dim() == 1 and batch_size is not None:
30 self.mu = mu.expand(batch_size, mu.size(0))
31 self.sigma = sigma.expand(batch_size, sigma.size(0))
32 super(LogNormal, self).__init__(*args, **kwargs)
33
34 def batch_shape(self, x=None):
35 event_dim = 1
36 mu = self.mu
37 if x is not None and x.size() != mu.size():
38 mu = self.mu.expand(x.size()[:-event_dim] + self.event_shape())
39 return mu.size()[:-event_dim]
40
41 def event_shape(self):
42 event_dim = 1
43 return self.mu.size()[-event_dim:]
44
45 def shape(self, x=None):
46 return self.batch_shape(x) + self.event_shape()
47
48 def sample(self):
49 """
50 Reparameterized log-normal sampler.
51 """
52 eps = Variable(torch.randn(1).type_as(self.mu.data))
53 z = self.mu + self.sigma * eps
54 return torch.exp(z)
55
56 def batch_log_pdf(self, x):
57 """
58 log-normal log-likelihood
59 """
60 mu = self.mu.expand(self.shape(x))
61 sigma = self.sigma.expand(self.shape(x))
62 ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)])
63 .type_as(mu.data).expand_as(x))
64 ll_2 = -torch.log(sigma * x)
65 ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)
66 batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)
67 batch_log_pdf_shape = self.batch_shape(x) + (1,)
68 return batch_log_pdf.contiguous().view(batch_log_pdf_shape)
69
70 def analytic_mean(self):
71 return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))
72
73 def analytic_var(self):
74 return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \
75 torch.pow(self.analytic_mean(), 2)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/log_normal.py b/pyro/distributions/log_normal.py
--- a/pyro/distributions/log_normal.py
+++ b/pyro/distributions/log_normal.py
@@ -49,7 +49,7 @@
"""
Reparameterized log-normal sampler.
"""
- eps = Variable(torch.randn(1).type_as(self.mu.data))
+ eps = Variable(torch.randn(self.mu.size()).type_as(self.mu.data))
z = self.mu + self.sigma * eps
return torch.exp(z)
|
{"golden_diff": "diff --git a/pyro/distributions/log_normal.py b/pyro/distributions/log_normal.py\n--- a/pyro/distributions/log_normal.py\n+++ b/pyro/distributions/log_normal.py\n@@ -49,7 +49,7 @@\n \"\"\"\n Reparameterized log-normal sampler.\n \"\"\"\n- eps = Variable(torch.randn(1).type_as(self.mu.data))\n+ eps = Variable(torch.randn(self.mu.size()).type_as(self.mu.data))\n z = self.mu + self.sigma * eps\n return torch.exp(z)\n", "issue": "Test batch_size parameter is correctly applied for all distributions\nWith #323, we are now supporting `batch_size` parameter for both the class and functional forms of both the distributions. Let us add a simple test to validate that this is working as expected. \n", "before_files": [{"content": "import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\n\n\nclass LogNormal(Distribution):\n \"\"\"\n :param mu: mean *(vector)*\n :param sigma: standard deviations *(vector (0, Infinity))*\n\n A distribution over probability vectors obtained by exp-transforming a random\n variable drawn from ``Normal({mu: mu, sigma: sigma})``.\n \"\"\"\n reparameterized = True\n\n def __init__(self, mu, sigma, batch_size=None, *args, **kwargs):\n \"\"\"\n Params:\n `mu` - mean\n `sigma` - root variance\n \"\"\"\n self.mu = mu\n self.sigma = sigma\n if mu.size() != sigma.size():\n raise ValueError(\"Expected mu.size() == sigma.size(), but got {} vs {}\"\n .format(mu.size(), sigma.size()))\n if mu.dim() == 1 and batch_size is not None:\n self.mu = mu.expand(batch_size, mu.size(0))\n self.sigma = sigma.expand(batch_size, sigma.size(0))\n super(LogNormal, self).__init__(*args, **kwargs)\n\n def batch_shape(self, x=None):\n event_dim = 1\n mu = self.mu\n if x is not None and x.size() != mu.size():\n mu = self.mu.expand(x.size()[:-event_dim] + self.event_shape())\n return mu.size()[:-event_dim]\n\n def event_shape(self):\n event_dim = 1\n return self.mu.size()[-event_dim:]\n\n def shape(self, x=None):\n return self.batch_shape(x) + self.event_shape()\n\n def sample(self):\n \"\"\"\n Reparameterized log-normal sampler.\n \"\"\"\n eps = Variable(torch.randn(1).type_as(self.mu.data))\n z = self.mu + self.sigma * eps\n return torch.exp(z)\n\n def batch_log_pdf(self, x):\n \"\"\"\n log-normal log-likelihood\n \"\"\"\n mu = self.mu.expand(self.shape(x))\n sigma = self.sigma.expand(self.shape(x))\n ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)])\n .type_as(mu.data).expand_as(x))\n ll_2 = -torch.log(sigma * x)\n ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)\n batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)\n batch_log_pdf_shape = self.batch_shape(x) + (1,)\n return batch_log_pdf.contiguous().view(batch_log_pdf_shape)\n\n def analytic_mean(self):\n return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))\n\n def analytic_var(self):\n return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \\\n torch.pow(self.analytic_mean(), 2)\n", "path": "pyro/distributions/log_normal.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\n\n\nclass LogNormal(Distribution):\n \"\"\"\n :param mu: mean *(vector)*\n :param sigma: standard deviations *(vector (0, Infinity))*\n\n A distribution over probability vectors obtained by exp-transforming a random\n variable drawn from ``Normal({mu: mu, sigma: sigma})``.\n \"\"\"\n reparameterized = True\n\n def __init__(self, mu, sigma, batch_size=None, *args, **kwargs):\n \"\"\"\n Params:\n `mu` - mean\n `sigma` - root variance\n \"\"\"\n self.mu = mu\n self.sigma = sigma\n if mu.size() != sigma.size():\n raise ValueError(\"Expected mu.size() == sigma.size(), but got {} vs {}\"\n .format(mu.size(), sigma.size()))\n if mu.dim() == 1 and batch_size is not None:\n self.mu = mu.expand(batch_size, mu.size(0))\n self.sigma = sigma.expand(batch_size, sigma.size(0))\n super(LogNormal, self).__init__(*args, **kwargs)\n\n def batch_shape(self, x=None):\n event_dim = 1\n mu = self.mu\n if x is not None and x.size() != mu.size():\n mu = self.mu.expand(x.size()[:-event_dim] + self.event_shape())\n return mu.size()[:-event_dim]\n\n def event_shape(self):\n event_dim = 1\n return self.mu.size()[-event_dim:]\n\n def shape(self, x=None):\n return self.batch_shape(x) + self.event_shape()\n\n def sample(self):\n \"\"\"\n Reparameterized log-normal sampler.\n \"\"\"\n eps = Variable(torch.randn(self.mu.size()).type_as(self.mu.data))\n z = self.mu + self.sigma * eps\n return torch.exp(z)\n\n def batch_log_pdf(self, x):\n \"\"\"\n log-normal log-likelihood\n \"\"\"\n mu = self.mu.expand(self.shape(x))\n sigma = self.sigma.expand(self.shape(x))\n ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)])\n .type_as(mu.data).expand_as(x))\n ll_2 = -torch.log(sigma * x)\n ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)\n batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)\n batch_log_pdf_shape = self.batch_shape(x) + (1,)\n return batch_log_pdf.contiguous().view(batch_log_pdf_shape)\n\n def analytic_mean(self):\n return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))\n\n def analytic_var(self):\n return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \\\n torch.pow(self.analytic_mean(), 2)\n", "path": "pyro/distributions/log_normal.py"}]}
| 1,107 | 115 |
gh_patches_debug_32700
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-6252
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot pickle mne.preprocessing.ica.ICA anymore
With the current master (installed via `pip install git+https://github.com/mne-tools/mne-python.git#egg=mne`), I get an error when pickling an `mne.preprocessing.ica.ICA` object:
```
import pickle
import mne
print(mne.__version__)
ica = mne.preprocessing.ICA()
raw = mne.io.read_raw_edf("/Users/clemens/Downloads/testfiles/S001R04.edf", preload=True)
ica.fit(raw)
pickle.loads(pickle.dumps(ica))
# Traceback (most recent call last):
# File "/Users/clemens/Repositories/mnelab/.venv/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
# exec(code_obj, self.user_global_ns, self.user_ns)
# File "<ipython-input-8-a056623ee708>", line 1, in <module>
# pickle.loads(pickle.dumps(ica))
# TypeError: __new__() missing 1 required positional argument: 'val'
```
The same code works with the latest released version 0.17.2. I didn't see any change to the `ICA` object that might be causing this (but maybe I'm missing something). If indeed nothing changed, could this behavior be due to how pip installs stuff from GitHub (vs. "proper" packages from PyPI)? If so, how can I fix this (I need to pickle because I want to use the `multiprocessing` module)?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne/utils/_bunch.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Bunch-related classes."""
3 # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
4 # Eric Larson <larson.eric.d@gmail.com>
5 #
6 # License: BSD (3-clause)
7
8 from copy import deepcopy
9
10
11 ###############################################################################
12 # Create a Bunch class that acts like a struct (mybunch.key = val )
13
14 class Bunch(dict):
15 """Dictionnary-like object thatexposes its keys as attributes."""
16
17 def __init__(self, **kwargs): # noqa: D102
18 dict.__init__(self, kwargs)
19 self.__dict__ = self
20
21
22 ###############################################################################
23 # A protected version that prevents overwriting
24
25 class BunchConst(Bunch):
26 """Class to prevent us from re-defining constants (DRY)."""
27
28 def __setattr__(self, attr, val): # noqa: D105
29 if attr != '__dict__' and hasattr(self, attr):
30 raise AttributeError('Attribute "%s" already set' % attr)
31 super().__setattr__(attr, val)
32
33
34 ###############################################################################
35 # A version that tweaks the __repr__ of its values based on keys
36
37 class BunchConstNamed(BunchConst):
38 """Class to provide nice __repr__ for our integer constants.
39
40 Only supports string keys and int or float values.
41 """
42
43 def __setattr__(self, attr, val): # noqa: D105
44 assert isinstance(attr, str)
45 if isinstance(val, int):
46 val = NamedInt(attr, val)
47 elif isinstance(val, float):
48 val = NamedFloat(attr, val)
49 else:
50 assert isinstance(val, BunchConstNamed), type(val)
51 super().__setattr__(attr, val)
52
53
54 class Named(object):
55 """Provide shared methods for giving named-representation subclasses."""
56
57 def __str__(self): # noqa: D105
58 return '%s (%s)' % (super().__str__(), self._name)
59
60 __repr__ = __str__
61
62 # see https://stackoverflow.com/a/15774013/2175965
63 def __copy__(self): # noqa: D105
64 cls = self.__class__
65 result = cls.__new__(cls)
66 result.__dict__.update(self.__dict__)
67 return result
68
69 def __deepcopy__(self, memo): # noqa: D105
70 cls = self.__class__
71 result = cls.__new__(cls, self._name, self)
72 memo[id(self)] = result
73 for k, v in self.__dict__.items():
74 setattr(result, k, deepcopy(v, memo))
75 return result
76
77
78 class NamedInt(Named, int):
79 """Int with a name in __repr__."""
80
81 def __new__(cls, name, val): # noqa: D102,D105
82 out = int.__new__(cls, val)
83 out._name = name
84 return out
85
86
87 class NamedFloat(Named, float):
88 """Float with a name in __repr__."""
89
90 def __new__(cls, name, val): # noqa: D102,D105
91 out = float.__new__(cls, val)
92 out._name = name
93 return out
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py
--- a/mne/utils/_bunch.py
+++ b/mne/utils/_bunch.py
@@ -9,10 +9,10 @@
###############################################################################
-# Create a Bunch class that acts like a struct (mybunch.key = val )
+# Create a Bunch class that acts like a struct (mybunch.key = val)
class Bunch(dict):
- """Dictionnary-like object thatexposes its keys as attributes."""
+ """Dictionary-like object that exposes its keys as attributes."""
def __init__(self, **kwargs): # noqa: D102
dict.__init__(self, kwargs)
@@ -51,9 +51,14 @@
super().__setattr__(attr, val)
-class Named(object):
+class _Named(object):
"""Provide shared methods for giving named-representation subclasses."""
+ def __new__(cls, name, val): # noqa: D102,D105
+ out = _named_subclass(cls).__new__(cls, val)
+ out._name = name
+ return out
+
def __str__(self): # noqa: D105
return '%s (%s)' % (super().__str__(), self._name)
@@ -74,20 +79,25 @@
setattr(result, k, deepcopy(v, memo))
return result
+ def __getnewargs__(self): # noqa: D105
+ return self._name, _named_subclass(self)(self)
+
+
+def _named_subclass(klass):
+ if not isinstance(klass, type):
+ klass = klass.__class__
+ subklass = klass.mro()[-2]
+ assert subklass in (int, float)
+ return subklass
+
-class NamedInt(Named, int):
+class NamedInt(_Named, int):
"""Int with a name in __repr__."""
- def __new__(cls, name, val): # noqa: D102,D105
- out = int.__new__(cls, val)
- out._name = name
- return out
+ pass
-class NamedFloat(Named, float):
+class NamedFloat(_Named, float):
"""Float with a name in __repr__."""
- def __new__(cls, name, val): # noqa: D102,D105
- out = float.__new__(cls, val)
- out._name = name
- return out
+ pass
|
{"golden_diff": "diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py\n--- a/mne/utils/_bunch.py\n+++ b/mne/utils/_bunch.py\n@@ -9,10 +9,10 @@\n \n \n ###############################################################################\n-# Create a Bunch class that acts like a struct (mybunch.key = val )\n+# Create a Bunch class that acts like a struct (mybunch.key = val)\n \n class Bunch(dict):\n- \"\"\"Dictionnary-like object thatexposes its keys as attributes.\"\"\"\n+ \"\"\"Dictionary-like object that exposes its keys as attributes.\"\"\"\n \n def __init__(self, **kwargs): # noqa: D102\n dict.__init__(self, kwargs)\n@@ -51,9 +51,14 @@\n super().__setattr__(attr, val)\n \n \n-class Named(object):\n+class _Named(object):\n \"\"\"Provide shared methods for giving named-representation subclasses.\"\"\"\n \n+ def __new__(cls, name, val): # noqa: D102,D105\n+ out = _named_subclass(cls).__new__(cls, val)\n+ out._name = name\n+ return out\n+\n def __str__(self): # noqa: D105\n return '%s (%s)' % (super().__str__(), self._name)\n \n@@ -74,20 +79,25 @@\n setattr(result, k, deepcopy(v, memo))\n return result\n \n+ def __getnewargs__(self): # noqa: D105\n+ return self._name, _named_subclass(self)(self)\n+\n+\n+def _named_subclass(klass):\n+ if not isinstance(klass, type):\n+ klass = klass.__class__\n+ subklass = klass.mro()[-2]\n+ assert subklass in (int, float)\n+ return subklass\n+\n \n-class NamedInt(Named, int):\n+class NamedInt(_Named, int):\n \"\"\"Int with a name in __repr__.\"\"\"\n \n- def __new__(cls, name, val): # noqa: D102,D105\n- out = int.__new__(cls, val)\n- out._name = name\n- return out\n+ pass\n \n \n-class NamedFloat(Named, float):\n+class NamedFloat(_Named, float):\n \"\"\"Float with a name in __repr__.\"\"\"\n \n- def __new__(cls, name, val): # noqa: D102,D105\n- out = float.__new__(cls, val)\n- out._name = name\n- return out\n+ pass\n", "issue": "Cannot pickle mne.preprocessing.ica.ICA anymore\nWith the current master (installed via `pip install git+https://github.com/mne-tools/mne-python.git#egg=mne`), I get an error when pickling an `mne.preprocessing.ica.ICA` object:\r\n\r\n```\r\nimport pickle\r\nimport mne\r\n\r\n\r\nprint(mne.__version__)\r\nica = mne.preprocessing.ICA()\r\nraw = mne.io.read_raw_edf(\"/Users/clemens/Downloads/testfiles/S001R04.edf\", preload=True)\r\nica.fit(raw)\r\npickle.loads(pickle.dumps(ica))\r\n# Traceback (most recent call last):\r\n# File \"/Users/clemens/Repositories/mnelab/.venv/lib/python3.7/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\r\n# exec(code_obj, self.user_global_ns, self.user_ns)\r\n# File \"<ipython-input-8-a056623ee708>\", line 1, in <module>\r\n# pickle.loads(pickle.dumps(ica))\r\n# TypeError: __new__() missing 1 required positional argument: 'val'\r\n```\r\n\r\nThe same code works with the latest released version 0.17.2. I didn't see any change to the `ICA` object that might be causing this (but maybe I'm missing something). If indeed nothing changed, could this behavior be due to how pip installs stuff from GitHub (vs. \"proper\" packages from PyPI)? If so, how can I fix this (I need to pickle because I want to use the `multiprocessing` module)?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Bunch-related classes.\"\"\"\n# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nfrom copy import deepcopy\n\n\n###############################################################################\n# Create a Bunch class that acts like a struct (mybunch.key = val )\n\nclass Bunch(dict):\n \"\"\"Dictionnary-like object thatexposes its keys as attributes.\"\"\"\n\n def __init__(self, **kwargs): # noqa: D102\n dict.__init__(self, kwargs)\n self.__dict__ = self\n\n\n###############################################################################\n# A protected version that prevents overwriting\n\nclass BunchConst(Bunch):\n \"\"\"Class to prevent us from re-defining constants (DRY).\"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n if attr != '__dict__' and hasattr(self, attr):\n raise AttributeError('Attribute \"%s\" already set' % attr)\n super().__setattr__(attr, val)\n\n\n###############################################################################\n# A version that tweaks the __repr__ of its values based on keys\n\nclass BunchConstNamed(BunchConst):\n \"\"\"Class to provide nice __repr__ for our integer constants.\n\n Only supports string keys and int or float values.\n \"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n assert isinstance(attr, str)\n if isinstance(val, int):\n val = NamedInt(attr, val)\n elif isinstance(val, float):\n val = NamedFloat(attr, val)\n else:\n assert isinstance(val, BunchConstNamed), type(val)\n super().__setattr__(attr, val)\n\n\nclass Named(object):\n \"\"\"Provide shared methods for giving named-representation subclasses.\"\"\"\n\n def __str__(self): # noqa: D105\n return '%s (%s)' % (super().__str__(), self._name)\n\n __repr__ = __str__\n\n # see https://stackoverflow.com/a/15774013/2175965\n def __copy__(self): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def __deepcopy__(self, memo): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls, self._name, self)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, deepcopy(v, memo))\n return result\n\n\nclass NamedInt(Named, int):\n \"\"\"Int with a name in __repr__.\"\"\"\n\n def __new__(cls, name, val): # noqa: D102,D105\n out = int.__new__(cls, val)\n out._name = name\n return out\n\n\nclass NamedFloat(Named, float):\n \"\"\"Float with a name in __repr__.\"\"\"\n\n def __new__(cls, name, val): # noqa: D102,D105\n out = float.__new__(cls, val)\n out._name = name\n return out\n", "path": "mne/utils/_bunch.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Bunch-related classes.\"\"\"\n# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Eric Larson <larson.eric.d@gmail.com>\n#\n# License: BSD (3-clause)\n\nfrom copy import deepcopy\n\n\n###############################################################################\n# Create a Bunch class that acts like a struct (mybunch.key = val)\n\nclass Bunch(dict):\n \"\"\"Dictionary-like object that exposes its keys as attributes.\"\"\"\n\n def __init__(self, **kwargs): # noqa: D102\n dict.__init__(self, kwargs)\n self.__dict__ = self\n\n\n###############################################################################\n# A protected version that prevents overwriting\n\nclass BunchConst(Bunch):\n \"\"\"Class to prevent us from re-defining constants (DRY).\"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n if attr != '__dict__' and hasattr(self, attr):\n raise AttributeError('Attribute \"%s\" already set' % attr)\n super().__setattr__(attr, val)\n\n\n###############################################################################\n# A version that tweaks the __repr__ of its values based on keys\n\nclass BunchConstNamed(BunchConst):\n \"\"\"Class to provide nice __repr__ for our integer constants.\n\n Only supports string keys and int or float values.\n \"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n assert isinstance(attr, str)\n if isinstance(val, int):\n val = NamedInt(attr, val)\n elif isinstance(val, float):\n val = NamedFloat(attr, val)\n else:\n assert isinstance(val, BunchConstNamed), type(val)\n super().__setattr__(attr, val)\n\n\nclass _Named(object):\n \"\"\"Provide shared methods for giving named-representation subclasses.\"\"\"\n\n def __new__(cls, name, val): # noqa: D102,D105\n out = _named_subclass(cls).__new__(cls, val)\n out._name = name\n return out\n\n def __str__(self): # noqa: D105\n return '%s (%s)' % (super().__str__(), self._name)\n\n __repr__ = __str__\n\n # see https://stackoverflow.com/a/15774013/2175965\n def __copy__(self): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def __deepcopy__(self, memo): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls, self._name, self)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, deepcopy(v, memo))\n return result\n\n def __getnewargs__(self): # noqa: D105\n return self._name, _named_subclass(self)(self)\n\n\ndef _named_subclass(klass):\n if not isinstance(klass, type):\n klass = klass.__class__\n subklass = klass.mro()[-2]\n assert subklass in (int, float)\n return subklass\n\n\nclass NamedInt(_Named, int):\n \"\"\"Int with a name in __repr__.\"\"\"\n\n pass\n\n\nclass NamedFloat(_Named, float):\n \"\"\"Float with a name in __repr__.\"\"\"\n\n pass\n", "path": "mne/utils/_bunch.py"}]}
| 1,514 | 583 |
gh_patches_debug_21333
|
rasdani/github-patches
|
git_diff
|
netket__netket-846
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs for sigma operators
The documentation of the ladder operator `netket.operator.spin.sigmap` says `σ^+ = σ^x + i σ^y`, but in fact it is defined as `σ^+ = 1/2(σ^x + i σ^y)` in the code.
The same error is in the documentation of `netket.operator.spin.sigmam` operator.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netket/operator/spin.py`
Content:
```
1 # Copyright 2021 The NetKet Authors - All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from netket.hilbert import AbstractHilbert
16 from netket.utils.types import DType
17
18 from ._local_operator import LocalOperator as _LocalOperator
19
20
21 def sigmax(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
22 """
23 Builds the :math:`\\sigma^x` operator acting on the `site`-th of the Hilbert
24 space `hilbert`.
25
26 If `hilbert` is a non-Spin space of local dimension M, it is considered
27 as a (M-1)/2 - spin space.
28
29 :param hilbert: The hilbert space
30 :param site: the site on which this operator acts
31 :return: a nk.operator.LocalOperator
32 """
33 import numpy as np
34
35 N = hilbert.size_at_index(site)
36 S = (N - 1) / 2
37
38 D = [np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)]
39 mat = np.diag(D, 1) + np.diag(D, -1)
40 return _LocalOperator(hilbert, mat, [site], dtype=dtype)
41
42
43 def sigmay(
44 hilbert: AbstractHilbert, site: int, dtype: DType = complex
45 ) -> _LocalOperator:
46 """
47 Builds the :math:`\\sigma^y` operator acting on the `site`-th of the Hilbert
48 space `hilbert`.
49
50 If `hilbert` is a non-Spin space of local dimension M, it is considered
51 as a (M-1)/2 - spin space.
52
53 :param hilbert: The hilbert space
54 :param site: the site on which this operator acts
55 :return: a nk.operator.LocalOperator
56 """
57 import numpy as np
58
59 N = hilbert.size_at_index(site)
60 S = (N - 1) / 2
61
62 D = np.array([1j * np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)])
63 mat = np.diag(D, -1) + np.diag(-D, 1)
64 return _LocalOperator(hilbert, mat, [site], dtype=dtype)
65
66
67 def sigmaz(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
68 """
69 Builds the :math:`\\sigma^z` operator acting on the `site`-th of the Hilbert
70 space `hilbert`.
71
72 If `hilbert` is a non-Spin space of local dimension M, it is considered
73 as a (M-1)/2 - spin space.
74
75 :param hilbert: The hilbert space
76 :param site: the site on which this operator acts
77 :return: a nk.operator.LocalOperator
78 """
79 import numpy as np
80
81 N = hilbert.size_at_index(site)
82 S = (N - 1) / 2
83
84 D = np.array([2 * m for m in np.arange(S, -(S + 1), -1)])
85 mat = np.diag(D, 0)
86 return _LocalOperator(hilbert, mat, [site], dtype=dtype)
87
88
89 def sigmam(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
90 """
91 Builds the :math:`\\sigma^{-} = \\sigma^x - i \\sigma^y` operator acting on the
92 `site`-th of the Hilbert space `hilbert`.
93
94 If `hilbert` is a non-Spin space of local dimension M, it is considered
95 as a (M-1)/2 - spin space.
96
97 :param hilbert: The hilbert space
98 :param site: the site on which this operator acts
99 :return: a nk.operator.LocalOperator
100 """
101 import numpy as np
102
103 N = hilbert.size_at_index(site)
104 S = (N - 1) / 2
105
106 S2 = (S + 1) * S
107 D = np.array([np.sqrt(S2 - m * (m - 1)) for m in np.arange(S, -S, -1)])
108 mat = np.diag(D, -1)
109 return _LocalOperator(hilbert, mat, [site], dtype=dtype)
110
111
112 def sigmap(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
113 """
114 Builds the :math:`\\sigma^{+} = \\sigma^x + i \\sigma^y` operator acting on the
115 `site`-th of the Hilbert space `hilbert`.
116
117 If `hilbert` is a non-Spin space of local dimension M, it is considered
118 as a (M-1)/2 - spin space.
119
120 :param hilbert: The hilbert space
121 :param site: the site on which this operator acts
122 :return: a nk.operator.LocalOperator
123 """
124 import numpy as np
125
126 N = hilbert.size_at_index(site)
127 S = (N - 1) / 2
128
129 S2 = (S + 1) * S
130 D = np.array([np.sqrt(S2 - m * (m + 1)) for m in np.arange(S - 1, -(S + 1), -1)])
131 mat = np.diag(D, 1)
132 return _LocalOperator(hilbert, mat, [site], dtype=dtype)
133
134
135 # clean up the module
136 del AbstractHilbert, DType
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netket/operator/spin.py b/netket/operator/spin.py
--- a/netket/operator/spin.py
+++ b/netket/operator/spin.py
@@ -88,7 +88,7 @@
def sigmam(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
"""
- Builds the :math:`\\sigma^{-} = \\sigma^x - i \\sigma^y` operator acting on the
+ Builds the :math:`\\sigma^{-} = \\frac{1}{2}(\\sigma^x - i \\sigma^y)` operator acting on the
`site`-th of the Hilbert space `hilbert`.
If `hilbert` is a non-Spin space of local dimension M, it is considered
@@ -111,7 +111,7 @@
def sigmap(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:
"""
- Builds the :math:`\\sigma^{+} = \\sigma^x + i \\sigma^y` operator acting on the
+ Builds the :math:`\\sigma^{+} = \\frac{1}{2}(\\sigma^x + i \\sigma^y)` operator acting on the
`site`-th of the Hilbert space `hilbert`.
If `hilbert` is a non-Spin space of local dimension M, it is considered
|
{"golden_diff": "diff --git a/netket/operator/spin.py b/netket/operator/spin.py\n--- a/netket/operator/spin.py\n+++ b/netket/operator/spin.py\n@@ -88,7 +88,7 @@\n \n def sigmam(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n- Builds the :math:`\\\\sigma^{-} = \\\\sigma^x - i \\\\sigma^y` operator acting on the\n+ Builds the :math:`\\\\sigma^{-} = \\\\frac{1}{2}(\\\\sigma^x - i \\\\sigma^y)` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n \n If `hilbert` is a non-Spin space of local dimension M, it is considered\n@@ -111,7 +111,7 @@\n \n def sigmap(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n- Builds the :math:`\\\\sigma^{+} = \\\\sigma^x + i \\\\sigma^y` operator acting on the\n+ Builds the :math:`\\\\sigma^{+} = \\\\frac{1}{2}(\\\\sigma^x + i \\\\sigma^y)` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n \n If `hilbert` is a non-Spin space of local dimension M, it is considered\n", "issue": "Docs for sigma operators\nThe documentation of the ladder operator `netket.operator.spin.sigmap` says `\u03c3^+ = \u03c3^x + i \u03c3^y`, but in fact it is defined as `\u03c3^+ = 1/2(\u03c3^x + i \u03c3^y)` in the code.\r\n\r\nThe same error is in the documentation of `netket.operator.spin.sigmam` operator.\n", "before_files": [{"content": "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils.types import DType\n\nfrom ._local_operator import LocalOperator as _LocalOperator\n\n\ndef sigmax(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^x` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = [np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)]\n mat = np.diag(D, 1) + np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmay(\n hilbert: AbstractHilbert, site: int, dtype: DType = complex\n) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^y` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = np.array([1j * np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)])\n mat = np.diag(D, -1) + np.diag(-D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmaz(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^z` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = np.array([2 * m for m in np.arange(S, -(S + 1), -1)])\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmam(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^{-} = \\\\sigma^x - i \\\\sigma^y` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n S2 = (S + 1) * S\n D = np.array([np.sqrt(S2 - m * (m - 1)) for m in np.arange(S, -S, -1)])\n mat = np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmap(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^{+} = \\\\sigma^x + i \\\\sigma^y` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n S2 = (S + 1) * S\n D = np.array([np.sqrt(S2 - m * (m + 1)) for m in np.arange(S - 1, -(S + 1), -1)])\n mat = np.diag(D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\n# clean up the module\ndel AbstractHilbert, DType\n", "path": "netket/operator/spin.py"}], "after_files": [{"content": "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils.types import DType\n\nfrom ._local_operator import LocalOperator as _LocalOperator\n\n\ndef sigmax(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^x` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = [np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)]\n mat = np.diag(D, 1) + np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmay(\n hilbert: AbstractHilbert, site: int, dtype: DType = complex\n) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^y` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = np.array([1j * np.sqrt((S + 1) * 2 * a - a * (a + 1)) for a in np.arange(1, N)])\n mat = np.diag(D, -1) + np.diag(-D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmaz(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^z` operator acting on the `site`-th of the Hilbert\n space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n D = np.array([2 * m for m in np.arange(S, -(S + 1), -1)])\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmam(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^{-} = \\\\frac{1}{2}(\\\\sigma^x - i \\\\sigma^y)` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n S2 = (S + 1) * S\n D = np.array([np.sqrt(S2 - m * (m - 1)) for m in np.arange(S, -S, -1)])\n mat = np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef sigmap(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the :math:`\\\\sigma^{+} = \\\\frac{1}{2}(\\\\sigma^x + i \\\\sigma^y)` operator acting on the\n `site`-th of the Hilbert space `hilbert`.\n\n If `hilbert` is a non-Spin space of local dimension M, it is considered\n as a (M-1)/2 - spin space.\n\n :param hilbert: The hilbert space\n :param site: the site on which this operator acts\n :return: a nk.operator.LocalOperator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n S = (N - 1) / 2\n\n S2 = (S + 1) * S\n D = np.array([np.sqrt(S2 - m * (m + 1)) for m in np.arange(S - 1, -(S + 1), -1)])\n mat = np.diag(D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\n# clean up the module\ndel AbstractHilbert, DType\n", "path": "netket/operator/spin.py"}]}
| 2,047 | 327 |
gh_patches_debug_12781
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-5674
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image Copy '--target-subscription' Argument Ignored
### Related command
<!--- Please provide the related command with az {command} if you can, so that we can quickly route to the related team for help. --->
```bash
az image copy --source-resource-group <source_rg_name> \
--source-object-name <source_image_name> \
--source-type image \
--target-subscription <target_subscription_id> \
--target-location <target_location> \
--target-resource-group <target_rg_name> \
--target-name <target_image_name> \
--temporary-resource-group-name <temp_rg_name>
```
### Extension name (the extension in question)
image-copy-extension
### Description of issue (in as much detail as possible)
My organization is using the `image-copy-extension` and we noticed on recent workflows that the extension is now ignoring the `--target-subscription` argument and seems to use the authentication context subscription as the target subscription for all temporary resource and final image creation instead.
We noticed this change because previously the temporary resource group we used was stored within the target subscription and this meant that the temporary storage account, blob, and snapshot all got created within this temporary resource group but now the extension seems to be looking for the temporary resource group in the default subscription associated with the authentication context instead of the target subscription provided to the `--target-subscription` argument.
My Python experience is still limited but I have looked at the [recent commit](https://github.com/Azure/azure-cli-extensions/commit/3a6621cc0c96aab5ddd2388f4a2e6a81f4ed2f0a) and I see that the `target_subscription` variable is now being set without checking if the argument has been passed and I'm not sure if this was changed for a reason or if this is just a bug that can be fixed by only setting the `target_subscription` variable if the argument is null.

Link to the line in question.
https://github.com/Azure/azure-cli-extensions/blob/42102c3a81ad23407e7d929521ea212805b4688d/src/image-copy/azext_imagecopy/custom.py#L37
There is also a debug line below the variable declaration that we've seen continues to display the subscription ID for the authentication context and not the target subscription we're providing as an argument.
-----
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/image-copy/azext_imagecopy/custom.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from multiprocessing import Pool
7
8 from knack.util import CLIError
9 from knack.log import get_logger
10
11 from azext_imagecopy.cli_utils import run_cli_command, prepare_cli_command, get_storage_account_id_from_blob_path
12 from azext_imagecopy.create_target import create_target_image
13
14 logger = get_logger(__name__)
15
16
17 # pylint: disable=too-many-statements
18 # pylint: disable=too-many-locals
19 # pylint: disable=too-many-branches
20 def imagecopy(cmd, source_resource_group_name, source_object_name, target_location,
21 target_resource_group_name, temporary_resource_group_name='image-copy-rg',
22 source_type='image', cleanup=False, parallel_degree=-1, tags=None, target_name=None,
23 target_subscription=None, export_as_snapshot='false', timeout=3600):
24 only_show_errors = cmd.cli_ctx.only_show_errors
25 if cleanup:
26 # If --cleanup is set, forbid using an existing temporary resource group name.
27 # It is dangerous to clean up an existing resource group.
28 cli_cmd = prepare_cli_command(['group', 'exists', '-n', temporary_resource_group_name],
29 output_as_json=False,
30 only_show_errors=only_show_errors)
31 cmd_output = run_cli_command(cli_cmd)
32 if 'true' in cmd_output:
33 raise CLIError('Don\'t specify an existing resource group in --temporary-resource-group-name '
34 'when --cleanup is set')
35
36 from azure.cli.core.commands.client_factory import get_subscription_id
37 target_subscription = get_subscription_id(cmd.cli_ctx)
38 logger.debug('subscription id - %s', target_subscription)
39
40 # get the os disk id from source vm/image
41 logger.warning("Getting OS disk ID of the source VM/image")
42 cli_cmd = prepare_cli_command([source_type, 'show',
43 '--name', source_object_name,
44 '--resource-group', source_resource_group_name],
45 only_show_errors=only_show_errors)
46
47 json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)
48
49 if json_cmd_output['storageProfile']['dataDisks']:
50 logger.warning(
51 "Data disks in the source detected, but are ignored by this extension!")
52
53 source_os_disk_id = None
54 source_os_disk_type = None
55
56 try:
57 source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
58 if source_os_disk_id is None:
59 raise TypeError
60 source_os_disk_type = "DISK"
61 logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
62 except TypeError:
63 try:
64 source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']
65 if source_os_disk_id is None:
66 raise TypeError
67 source_os_disk_type = "BLOB"
68 logger.debug("found %s: %s", source_os_disk_type,
69 source_os_disk_id)
70 except TypeError:
71 try: # images created by e.g. image-copy extension
72 source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']
73 if source_os_disk_id is None:
74 raise TypeError
75 source_os_disk_type = "SNAPSHOT"
76 logger.debug("found %s: %s", source_os_disk_type,
77 source_os_disk_id)
78 except TypeError:
79 pass
80
81 if source_os_disk_type is None or source_os_disk_id is None:
82 logger.error(
83 'Unable to locate a supported OS disk type in the provided source object')
84 raise CLIError('Invalid OS Disk Source Type')
85
86 source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
87 logger.debug("source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
88 source_os_disk_type, source_os_disk_id, source_os_type)
89
90 # create source snapshots
91 # TODO: skip creating another snapshot when the source is a snapshot
92 logger.warning("Creating source snapshot")
93 source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
94 snapshot_location = json_cmd_output['location']
95 hyper_v_generation = json_cmd_output['hyperVGeneration']
96 if source_os_disk_type == "BLOB":
97 source_storage_account_id = get_storage_account_id_from_blob_path(source_os_disk_id,
98 source_resource_group_name,
99 target_subscription)
100 cmd_content = ['snapshot', 'create',
101 '--name', source_os_disk_snapshot_name,
102 '--location', snapshot_location,
103 '--resource-group', source_resource_group_name,
104 '--source', source_os_disk_id,
105 '--source-storage-account-id', source_storage_account_id]
106 else:
107 cmd_content = ['snapshot', 'create',
108 '--name', source_os_disk_snapshot_name,
109 '--location', snapshot_location,
110 '--resource-group', source_resource_group_name,
111 '--source', source_os_disk_id]
112 if hyper_v_generation:
113 cmd_content = cmd_content + ['--hyper-v-generation', hyper_v_generation]
114 cli_cmd = prepare_cli_command(cmd_content, only_show_errors=only_show_errors)
115 run_cli_command(cli_cmd)
116
117 # Get SAS URL for the snapshotName
118 logger.warning(
119 "Getting sas url for the source snapshot with timeout: %d seconds", timeout)
120 if timeout < 3600:
121 logger.error("Timeout should be greater than 3600 seconds")
122 raise CLIError('Invalid Timeout')
123
124 cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
125 '--name', source_os_disk_snapshot_name,
126 '--resource-group', source_resource_group_name,
127 '--duration-in-seconds', str(timeout)],
128 only_show_errors=only_show_errors)
129
130 json_output = run_cli_command(cli_cmd, return_as_json=True)
131
132 source_os_disk_snapshot_url = json_output['accessSas']
133 logger.debug("source os disk snapshot url: %s",
134 source_os_disk_snapshot_url)
135
136 # Start processing in the target locations
137
138 transient_resource_group_name = temporary_resource_group_name
139 # pick the first location for the temp group
140 transient_resource_group_location = target_location[0].strip()
141 create_resource_group(transient_resource_group_name,
142 transient_resource_group_location,
143 target_subscription,
144 only_show_errors)
145
146 target_locations_count = len(target_location)
147 logger.warning("Target location count: %s", target_locations_count)
148
149 create_resource_group(target_resource_group_name,
150 target_location[0].strip(),
151 target_subscription,
152 only_show_errors)
153
154 pool = None
155 try:
156
157 # try to get a handle on arm's 409s
158 azure_pool_frequency = 5
159 if target_locations_count >= 5:
160 azure_pool_frequency = 15
161 elif target_locations_count >= 3:
162 azure_pool_frequency = 10
163
164 if (target_locations_count == 1) or (parallel_degree == 1):
165 # Going to copy to targets one-by-one
166 logger.debug("Starting sync process for all locations")
167 for location in target_location:
168 location = location.strip()
169 create_target_image(location, transient_resource_group_name, source_type,
170 source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
171 source_os_type, target_resource_group_name, azure_pool_frequency,
172 tags, target_name, target_subscription, export_as_snapshot, timeout,
173 hyper_v_generation, only_show_errors)
174 else:
175 if parallel_degree == -1:
176 pool = Pool(target_locations_count)
177 else:
178 pool = Pool(min(parallel_degree, target_locations_count))
179
180 tasks = []
181 for location in target_location:
182 location = location.strip()
183 task_content = (location, transient_resource_group_name, source_type,
184 source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
185 source_os_type, target_resource_group_name, azure_pool_frequency,
186 tags, target_name, target_subscription, export_as_snapshot, timeout,
187 hyper_v_generation, only_show_errors)
188 tasks.append(task_content)
189
190 logger.warning("Starting async process for all locations")
191
192 for task in tasks:
193 pool.apply_async(create_target_image, task)
194
195 pool.close()
196 pool.join()
197
198 except KeyboardInterrupt:
199 logger.warning('User cancelled the operation')
200 if cleanup:
201 logger.warning('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
202 'You can use the following command: az resource list --tag created_by=image-copy-extension')
203 if pool is not None:
204 pool.terminate()
205 return
206
207 # Cleanup
208 if cleanup:
209 logger.warning('Deleting transient resources')
210
211 # Delete resource group
212 cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
213 '--name', transient_resource_group_name],
214 subscription=target_subscription,
215 only_show_errors=only_show_errors)
216 run_cli_command(cli_cmd)
217
218 # Revoke sas for source snapshot
219 cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
220 '--name', source_os_disk_snapshot_name,
221 '--resource-group', source_resource_group_name],
222 only_show_errors=only_show_errors)
223 run_cli_command(cli_cmd)
224
225 # Delete source snapshot
226 # TODO: skip this if source is snapshot and not creating a new one
227 cli_cmd = prepare_cli_command(['snapshot', 'delete',
228 '--name', source_os_disk_snapshot_name,
229 '--resource-group', source_resource_group_name],
230 only_show_errors=only_show_errors)
231 run_cli_command(cli_cmd)
232
233 logger.warning('Image copy finished')
234
235
236 def create_resource_group(resource_group_name, location, subscription=None, only_show_errors=None):
237 # check if target resource group exists
238 cli_cmd = prepare_cli_command(['group', 'exists',
239 '--name', resource_group_name],
240 output_as_json=False,
241 subscription=subscription,
242 only_show_errors=only_show_errors)
243
244 cmd_output = run_cli_command(cli_cmd)
245
246 if 'true' in cmd_output:
247 return
248
249 # create the target resource group
250 logger.warning("Creating resource group: %s", resource_group_name)
251 cli_cmd = prepare_cli_command(['group', 'create',
252 '--name', resource_group_name,
253 '--location', location],
254 subscription=subscription,
255 only_show_errors=only_show_errors)
256
257 run_cli_command(cli_cmd)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/image-copy/azext_imagecopy/custom.py b/src/image-copy/azext_imagecopy/custom.py
--- a/src/image-copy/azext_imagecopy/custom.py
+++ b/src/image-copy/azext_imagecopy/custom.py
@@ -33,8 +33,9 @@
raise CLIError('Don\'t specify an existing resource group in --temporary-resource-group-name '
'when --cleanup is set')
- from azure.cli.core.commands.client_factory import get_subscription_id
- target_subscription = get_subscription_id(cmd.cli_ctx)
+ if not target_subscription:
+ from azure.cli.core.commands.client_factory import get_subscription_id
+ target_subscription = get_subscription_id(cmd.cli_ctx)
logger.debug('subscription id - %s', target_subscription)
# get the os disk id from source vm/image
|
{"golden_diff": "diff --git a/src/image-copy/azext_imagecopy/custom.py b/src/image-copy/azext_imagecopy/custom.py\n--- a/src/image-copy/azext_imagecopy/custom.py\n+++ b/src/image-copy/azext_imagecopy/custom.py\n@@ -33,8 +33,9 @@\n raise CLIError('Don\\'t specify an existing resource group in --temporary-resource-group-name '\n 'when --cleanup is set')\n \n- from azure.cli.core.commands.client_factory import get_subscription_id\n- target_subscription = get_subscription_id(cmd.cli_ctx)\n+ if not target_subscription:\n+ from azure.cli.core.commands.client_factory import get_subscription_id\n+ target_subscription = get_subscription_id(cmd.cli_ctx)\n logger.debug('subscription id - %s', target_subscription)\n \n # get the os disk id from source vm/image\n", "issue": "Image Copy '--target-subscription' Argument Ignored\n### Related command\r\n<!--- Please provide the related command with az {command} if you can, so that we can quickly route to the related team for help. --->\r\n```bash\r\naz image copy --source-resource-group <source_rg_name> \\\r\n --source-object-name <source_image_name> \\\r\n --source-type image \\\r\n --target-subscription <target_subscription_id> \\\r\n --target-location <target_location> \\\r\n --target-resource-group <target_rg_name> \\\r\n --target-name <target_image_name> \\\r\n --temporary-resource-group-name <temp_rg_name>\r\n```\r\n\r\n### Extension name (the extension in question)\r\nimage-copy-extension\r\n\r\n### Description of issue (in as much detail as possible)\r\n\r\nMy organization is using the `image-copy-extension` and we noticed on recent workflows that the extension is now ignoring the `--target-subscription` argument and seems to use the authentication context subscription as the target subscription for all temporary resource and final image creation instead.\r\n\r\nWe noticed this change because previously the temporary resource group we used was stored within the target subscription and this meant that the temporary storage account, blob, and snapshot all got created within this temporary resource group but now the extension seems to be looking for the temporary resource group in the default subscription associated with the authentication context instead of the target subscription provided to the `--target-subscription` argument.\r\n\r\nMy Python experience is still limited but I have looked at the [recent commit](https://github.com/Azure/azure-cli-extensions/commit/3a6621cc0c96aab5ddd2388f4a2e6a81f4ed2f0a) and I see that the `target_subscription` variable is now being set without checking if the argument has been passed and I'm not sure if this was changed for a reason or if this is just a bug that can be fixed by only setting the `target_subscription` variable if the argument is null.\r\n\r\n\r\n\r\nLink to the line in question.\r\n\r\nhttps://github.com/Azure/azure-cli-extensions/blob/42102c3a81ad23407e7d929521ea212805b4688d/src/image-copy/azext_imagecopy/custom.py#L37\r\n\r\nThere is also a debug line below the variable declaration that we've seen continues to display the subscription ID for the authentication context and not the target subscription we're providing as an argument.\r\n\r\n-----\r\n\r\n\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom multiprocessing import Pool\n\nfrom knack.util import CLIError\nfrom knack.log import get_logger\n\nfrom azext_imagecopy.cli_utils import run_cli_command, prepare_cli_command, get_storage_account_id_from_blob_path\nfrom azext_imagecopy.create_target import create_target_image\n\nlogger = get_logger(__name__)\n\n\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\n# pylint: disable=too-many-branches\ndef imagecopy(cmd, source_resource_group_name, source_object_name, target_location,\n target_resource_group_name, temporary_resource_group_name='image-copy-rg',\n source_type='image', cleanup=False, parallel_degree=-1, tags=None, target_name=None,\n target_subscription=None, export_as_snapshot='false', timeout=3600):\n only_show_errors = cmd.cli_ctx.only_show_errors\n if cleanup:\n # If --cleanup is set, forbid using an existing temporary resource group name.\n # It is dangerous to clean up an existing resource group.\n cli_cmd = prepare_cli_command(['group', 'exists', '-n', temporary_resource_group_name],\n output_as_json=False,\n only_show_errors=only_show_errors)\n cmd_output = run_cli_command(cli_cmd)\n if 'true' in cmd_output:\n raise CLIError('Don\\'t specify an existing resource group in --temporary-resource-group-name '\n 'when --cleanup is set')\n\n from azure.cli.core.commands.client_factory import get_subscription_id\n target_subscription = get_subscription_id(cmd.cli_ctx)\n logger.debug('subscription id - %s', target_subscription)\n\n # get the os disk id from source vm/image\n logger.warning(\"Getting OS disk ID of the source VM/image\")\n cli_cmd = prepare_cli_command([source_type, 'show',\n '--name', source_object_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n\n json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)\n\n if json_cmd_output['storageProfile']['dataDisks']:\n logger.warning(\n \"Data disks in the source detected, but are ignored by this extension!\")\n\n source_os_disk_id = None\n source_os_disk_type = None\n\n try:\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"DISK\"\n logger.debug(\"found %s: %s\", source_os_disk_type, source_os_disk_id)\n except TypeError:\n try:\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"BLOB\"\n logger.debug(\"found %s: %s\", source_os_disk_type,\n source_os_disk_id)\n except TypeError:\n try: # images created by e.g. image-copy extension\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"SNAPSHOT\"\n logger.debug(\"found %s: %s\", source_os_disk_type,\n source_os_disk_id)\n except TypeError:\n pass\n\n if source_os_disk_type is None or source_os_disk_id is None:\n logger.error(\n 'Unable to locate a supported OS disk type in the provided source object')\n raise CLIError('Invalid OS Disk Source Type')\n\n source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']\n logger.debug(\"source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s\",\n source_os_disk_type, source_os_disk_id, source_os_type)\n\n # create source snapshots\n # TODO: skip creating another snapshot when the source is a snapshot\n logger.warning(\"Creating source snapshot\")\n source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'\n snapshot_location = json_cmd_output['location']\n hyper_v_generation = json_cmd_output['hyperVGeneration']\n if source_os_disk_type == \"BLOB\":\n source_storage_account_id = get_storage_account_id_from_blob_path(source_os_disk_id,\n source_resource_group_name,\n target_subscription)\n cmd_content = ['snapshot', 'create',\n '--name', source_os_disk_snapshot_name,\n '--location', snapshot_location,\n '--resource-group', source_resource_group_name,\n '--source', source_os_disk_id,\n '--source-storage-account-id', source_storage_account_id]\n else:\n cmd_content = ['snapshot', 'create',\n '--name', source_os_disk_snapshot_name,\n '--location', snapshot_location,\n '--resource-group', source_resource_group_name,\n '--source', source_os_disk_id]\n if hyper_v_generation:\n cmd_content = cmd_content + ['--hyper-v-generation', hyper_v_generation]\n cli_cmd = prepare_cli_command(cmd_content, only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Get SAS URL for the snapshotName\n logger.warning(\n \"Getting sas url for the source snapshot with timeout: %d seconds\", timeout)\n if timeout < 3600:\n logger.error(\"Timeout should be greater than 3600 seconds\")\n raise CLIError('Invalid Timeout')\n\n cli_cmd = prepare_cli_command(['snapshot', 'grant-access',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name,\n '--duration-in-seconds', str(timeout)],\n only_show_errors=only_show_errors)\n\n json_output = run_cli_command(cli_cmd, return_as_json=True)\n\n source_os_disk_snapshot_url = json_output['accessSas']\n logger.debug(\"source os disk snapshot url: %s\",\n source_os_disk_snapshot_url)\n\n # Start processing in the target locations\n\n transient_resource_group_name = temporary_resource_group_name\n # pick the first location for the temp group\n transient_resource_group_location = target_location[0].strip()\n create_resource_group(transient_resource_group_name,\n transient_resource_group_location,\n target_subscription,\n only_show_errors)\n\n target_locations_count = len(target_location)\n logger.warning(\"Target location count: %s\", target_locations_count)\n\n create_resource_group(target_resource_group_name,\n target_location[0].strip(),\n target_subscription,\n only_show_errors)\n\n pool = None\n try:\n\n # try to get a handle on arm's 409s\n azure_pool_frequency = 5\n if target_locations_count >= 5:\n azure_pool_frequency = 15\n elif target_locations_count >= 3:\n azure_pool_frequency = 10\n\n if (target_locations_count == 1) or (parallel_degree == 1):\n # Going to copy to targets one-by-one\n logger.debug(\"Starting sync process for all locations\")\n for location in target_location:\n location = location.strip()\n create_target_image(location, transient_resource_group_name, source_type,\n source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,\n source_os_type, target_resource_group_name, azure_pool_frequency,\n tags, target_name, target_subscription, export_as_snapshot, timeout,\n hyper_v_generation, only_show_errors)\n else:\n if parallel_degree == -1:\n pool = Pool(target_locations_count)\n else:\n pool = Pool(min(parallel_degree, target_locations_count))\n\n tasks = []\n for location in target_location:\n location = location.strip()\n task_content = (location, transient_resource_group_name, source_type,\n source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,\n source_os_type, target_resource_group_name, azure_pool_frequency,\n tags, target_name, target_subscription, export_as_snapshot, timeout,\n hyper_v_generation, only_show_errors)\n tasks.append(task_content)\n\n logger.warning(\"Starting async process for all locations\")\n\n for task in tasks:\n pool.apply_async(create_target_image, task)\n\n pool.close()\n pool.join()\n\n except KeyboardInterrupt:\n logger.warning('User cancelled the operation')\n if cleanup:\n logger.warning('To cleanup temporary resources look for ones tagged with \"image-copy-extension\". \\n'\n 'You can use the following command: az resource list --tag created_by=image-copy-extension')\n if pool is not None:\n pool.terminate()\n return\n\n # Cleanup\n if cleanup:\n logger.warning('Deleting transient resources')\n\n # Delete resource group\n cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',\n '--name', transient_resource_group_name],\n subscription=target_subscription,\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Revoke sas for source snapshot\n cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Delete source snapshot\n # TODO: skip this if source is snapshot and not creating a new one\n cli_cmd = prepare_cli_command(['snapshot', 'delete',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n logger.warning('Image copy finished')\n\n\ndef create_resource_group(resource_group_name, location, subscription=None, only_show_errors=None):\n # check if target resource group exists\n cli_cmd = prepare_cli_command(['group', 'exists',\n '--name', resource_group_name],\n output_as_json=False,\n subscription=subscription,\n only_show_errors=only_show_errors)\n\n cmd_output = run_cli_command(cli_cmd)\n\n if 'true' in cmd_output:\n return\n\n # create the target resource group\n logger.warning(\"Creating resource group: %s\", resource_group_name)\n cli_cmd = prepare_cli_command(['group', 'create',\n '--name', resource_group_name,\n '--location', location],\n subscription=subscription,\n only_show_errors=only_show_errors)\n\n run_cli_command(cli_cmd)\n", "path": "src/image-copy/azext_imagecopy/custom.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom multiprocessing import Pool\n\nfrom knack.util import CLIError\nfrom knack.log import get_logger\n\nfrom azext_imagecopy.cli_utils import run_cli_command, prepare_cli_command, get_storage_account_id_from_blob_path\nfrom azext_imagecopy.create_target import create_target_image\n\nlogger = get_logger(__name__)\n\n\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\n# pylint: disable=too-many-branches\ndef imagecopy(cmd, source_resource_group_name, source_object_name, target_location,\n target_resource_group_name, temporary_resource_group_name='image-copy-rg',\n source_type='image', cleanup=False, parallel_degree=-1, tags=None, target_name=None,\n target_subscription=None, export_as_snapshot='false', timeout=3600):\n only_show_errors = cmd.cli_ctx.only_show_errors\n if cleanup:\n # If --cleanup is set, forbid using an existing temporary resource group name.\n # It is dangerous to clean up an existing resource group.\n cli_cmd = prepare_cli_command(['group', 'exists', '-n', temporary_resource_group_name],\n output_as_json=False,\n only_show_errors=only_show_errors)\n cmd_output = run_cli_command(cli_cmd)\n if 'true' in cmd_output:\n raise CLIError('Don\\'t specify an existing resource group in --temporary-resource-group-name '\n 'when --cleanup is set')\n\n if not target_subscription:\n from azure.cli.core.commands.client_factory import get_subscription_id\n target_subscription = get_subscription_id(cmd.cli_ctx)\n logger.debug('subscription id - %s', target_subscription)\n\n # get the os disk id from source vm/image\n logger.warning(\"Getting OS disk ID of the source VM/image\")\n cli_cmd = prepare_cli_command([source_type, 'show',\n '--name', source_object_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n\n json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)\n\n if json_cmd_output['storageProfile']['dataDisks']:\n logger.warning(\n \"Data disks in the source detected, but are ignored by this extension!\")\n\n source_os_disk_id = None\n source_os_disk_type = None\n\n try:\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"DISK\"\n logger.debug(\"found %s: %s\", source_os_disk_type, source_os_disk_id)\n except TypeError:\n try:\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"BLOB\"\n logger.debug(\"found %s: %s\", source_os_disk_type,\n source_os_disk_id)\n except TypeError:\n try: # images created by e.g. image-copy extension\n source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']\n if source_os_disk_id is None:\n raise TypeError\n source_os_disk_type = \"SNAPSHOT\"\n logger.debug(\"found %s: %s\", source_os_disk_type,\n source_os_disk_id)\n except TypeError:\n pass\n\n if source_os_disk_type is None or source_os_disk_id is None:\n logger.error(\n 'Unable to locate a supported OS disk type in the provided source object')\n raise CLIError('Invalid OS Disk Source Type')\n\n source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']\n logger.debug(\"source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s\",\n source_os_disk_type, source_os_disk_id, source_os_type)\n\n # create source snapshots\n # TODO: skip creating another snapshot when the source is a snapshot\n logger.warning(\"Creating source snapshot\")\n source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'\n snapshot_location = json_cmd_output['location']\n hyper_v_generation = json_cmd_output['hyperVGeneration']\n if source_os_disk_type == \"BLOB\":\n source_storage_account_id = get_storage_account_id_from_blob_path(source_os_disk_id,\n source_resource_group_name,\n target_subscription)\n cmd_content = ['snapshot', 'create',\n '--name', source_os_disk_snapshot_name,\n '--location', snapshot_location,\n '--resource-group', source_resource_group_name,\n '--source', source_os_disk_id,\n '--source-storage-account-id', source_storage_account_id]\n else:\n cmd_content = ['snapshot', 'create',\n '--name', source_os_disk_snapshot_name,\n '--location', snapshot_location,\n '--resource-group', source_resource_group_name,\n '--source', source_os_disk_id]\n if hyper_v_generation:\n cmd_content = cmd_content + ['--hyper-v-generation', hyper_v_generation]\n cli_cmd = prepare_cli_command(cmd_content, only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Get SAS URL for the snapshotName\n logger.warning(\n \"Getting sas url for the source snapshot with timeout: %d seconds\", timeout)\n if timeout < 3600:\n logger.error(\"Timeout should be greater than 3600 seconds\")\n raise CLIError('Invalid Timeout')\n\n cli_cmd = prepare_cli_command(['snapshot', 'grant-access',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name,\n '--duration-in-seconds', str(timeout)],\n only_show_errors=only_show_errors)\n\n json_output = run_cli_command(cli_cmd, return_as_json=True)\n\n source_os_disk_snapshot_url = json_output['accessSas']\n logger.debug(\"source os disk snapshot url: %s\",\n source_os_disk_snapshot_url)\n\n # Start processing in the target locations\n\n transient_resource_group_name = temporary_resource_group_name\n # pick the first location for the temp group\n transient_resource_group_location = target_location[0].strip()\n create_resource_group(transient_resource_group_name,\n transient_resource_group_location,\n target_subscription,\n only_show_errors)\n\n target_locations_count = len(target_location)\n logger.warning(\"Target location count: %s\", target_locations_count)\n\n create_resource_group(target_resource_group_name,\n target_location[0].strip(),\n target_subscription,\n only_show_errors)\n\n pool = None\n try:\n\n # try to get a handle on arm's 409s\n azure_pool_frequency = 5\n if target_locations_count >= 5:\n azure_pool_frequency = 15\n elif target_locations_count >= 3:\n azure_pool_frequency = 10\n\n if (target_locations_count == 1) or (parallel_degree == 1):\n # Going to copy to targets one-by-one\n logger.debug(\"Starting sync process for all locations\")\n for location in target_location:\n location = location.strip()\n create_target_image(location, transient_resource_group_name, source_type,\n source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,\n source_os_type, target_resource_group_name, azure_pool_frequency,\n tags, target_name, target_subscription, export_as_snapshot, timeout,\n hyper_v_generation, only_show_errors)\n else:\n if parallel_degree == -1:\n pool = Pool(target_locations_count)\n else:\n pool = Pool(min(parallel_degree, target_locations_count))\n\n tasks = []\n for location in target_location:\n location = location.strip()\n task_content = (location, transient_resource_group_name, source_type,\n source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,\n source_os_type, target_resource_group_name, azure_pool_frequency,\n tags, target_name, target_subscription, export_as_snapshot, timeout,\n hyper_v_generation, only_show_errors)\n tasks.append(task_content)\n\n logger.warning(\"Starting async process for all locations\")\n\n for task in tasks:\n pool.apply_async(create_target_image, task)\n\n pool.close()\n pool.join()\n\n except KeyboardInterrupt:\n logger.warning('User cancelled the operation')\n if cleanup:\n logger.warning('To cleanup temporary resources look for ones tagged with \"image-copy-extension\". \\n'\n 'You can use the following command: az resource list --tag created_by=image-copy-extension')\n if pool is not None:\n pool.terminate()\n return\n\n # Cleanup\n if cleanup:\n logger.warning('Deleting transient resources')\n\n # Delete resource group\n cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',\n '--name', transient_resource_group_name],\n subscription=target_subscription,\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Revoke sas for source snapshot\n cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n # Delete source snapshot\n # TODO: skip this if source is snapshot and not creating a new one\n cli_cmd = prepare_cli_command(['snapshot', 'delete',\n '--name', source_os_disk_snapshot_name,\n '--resource-group', source_resource_group_name],\n only_show_errors=only_show_errors)\n run_cli_command(cli_cmd)\n\n logger.warning('Image copy finished')\n\n\ndef create_resource_group(resource_group_name, location, subscription=None, only_show_errors=None):\n # check if target resource group exists\n cli_cmd = prepare_cli_command(['group', 'exists',\n '--name', resource_group_name],\n output_as_json=False,\n subscription=subscription,\n only_show_errors=only_show_errors)\n\n cmd_output = run_cli_command(cli_cmd)\n\n if 'true' in cmd_output:\n return\n\n # create the target resource group\n logger.warning(\"Creating resource group: %s\", resource_group_name)\n cli_cmd = prepare_cli_command(['group', 'create',\n '--name', resource_group_name,\n '--location', location],\n subscription=subscription,\n only_show_errors=only_show_errors)\n\n run_cli_command(cli_cmd)\n", "path": "src/image-copy/azext_imagecopy/custom.py"}]}
| 3,746 | 178 |
gh_patches_debug_41690
|
rasdani/github-patches
|
git_diff
|
Cloud-CV__EvalAI-928
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log change in Challenge and Challenge Phase model when file fields are updated
__Problem__
We need a way to figure out if the `evaluation_script`field value is changed for Challenge Model or not. Same goes for `test_annotation` field in ChallengePhase Model.
__Please be more clear__ ?
When ever a model is saved, its saved function is called. So this task involves figuring out a way in save model to detect if the model is updated or added. If the model is updated, then further it should be figured out whether the `evaluation_script` is changed for Challenge model or not.
If `evaluation_script` of Challenge model is changed, then simply print a message stating the change. This print statement will later on act as a logging statement in case we need to debug something.
The above requested thing goes same for `test_annotation` field of ChallengePhase model.
__Why__ ?
Submission worker when starts loads all `evaluation_script` and `test_annotation` file in advance. In case if the Challenge Host updates any of the file, there is no way to communicate this to the worker
__Impact__ ?
It actually guarantees that whenever any submission is evaluated, it is evaluated against the write set of parameters.
__How__ ?
As of now, I think it can be done via following ways
* Either overriding the save method of each model and adding a hook over there to check if the instance is being updated or added. May be there is a flag or something which tells whether the instance is added or updated
* Either by [signals](https://docs.djangoproject.com/en/1.10/topics/signals/). Reason I am asking to use signal because we need to carry out the same set of action for more than one models(presently _Challenge_ and _ChallengePhase_). Doing it by signal will help us achieve more modularized code.
* Your suggestion ??
__TakeAways__
About how django model is saved, how signal works, and a brief intro about RabbitMQ and submission worker
Also I will prefer that __before coding__, its better to __discuss__ the approach over here only, so that we can get an idea and update if something is not right.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/base/models.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.db import models
4
5
6 class TimeStampedModel(models.Model):
7 """
8 An abstract base class model that provides self-managed `created_at` and
9 `modified_at` fields.
10 """
11 created_at = models.DateTimeField(auto_now_add=True)
12 modified_at = models.DateTimeField(auto_now=True)
13
14 class Meta:
15 abstract = True
16 app_label = 'base'
17
```
Path: `apps/challenges/models.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.contrib.auth.models import User
4 from django.utils import timezone
5 from django.contrib.postgres.fields import JSONField
6 from django.db import models
7
8 from base.models import (TimeStampedModel, )
9 from base.utils import RandomFileName
10 from participants.models import (ParticipantTeam, )
11
12
13 class Challenge(TimeStampedModel):
14
15 """Model representing a hosted Challenge"""
16 title = models.CharField(max_length=100)
17 short_description = models.TextField(null=True, blank=True)
18 description = models.TextField(null=True, blank=True)
19 terms_and_conditions = models.TextField(null=True, blank=True)
20 submission_guidelines = models.TextField(null=True, blank=True)
21 evaluation_details = models.TextField(null=True, blank=True)
22 image = models.ImageField(
23 upload_to='logos', null=True, blank=True, verbose_name="Logo")
24 start_date = models.DateTimeField(
25 null=True, blank=True, verbose_name="Start Date (UTC)")
26 end_date = models.DateTimeField(
27 null=True, blank=True, verbose_name="End Date (UTC)")
28 creator = models.ForeignKey(
29 'hosts.ChallengeHostTeam', related_name='challenge_creator')
30 published = models.BooleanField(
31 default=False, verbose_name="Publicly Available")
32 enable_forum = models.BooleanField(default=True)
33 anonymous_leaderboard = models.BooleanField(default=False)
34 participant_teams = models.ManyToManyField(ParticipantTeam, blank=True)
35 is_disabled = models.BooleanField(default=False)
36 evaluation_script = models.FileField(
37 default=False, upload_to=RandomFileName("evaluation_scripts")) # should be zip format
38 approved_by_admin = models.BooleanField(
39 default=False, verbose_name="Approved By Admin")
40
41 class Meta:
42 app_label = 'challenges'
43 db_table = 'challenge'
44
45 def __unicode__(self):
46 """Returns the title of Challenge"""
47 return self.title
48
49 def get_image_url(self):
50 """Returns the url of logo of Challenge"""
51 if self.image:
52 return self.image.url
53 return None
54
55 def get_evaluation_script_path(self):
56 """Returns the path of evaluation script"""
57 if self.evaluation_script:
58 return self.evaluation_script.url
59 return None
60
61 def get_start_date(self):
62 """Returns the start date of Challenge"""
63 return self.start_date
64
65 def get_end_date(self):
66 """Returns the end date of Challenge"""
67 return self.end_date
68
69 @property
70 def is_active(self):
71 """Returns if the challenge is active or not"""
72 if self.start_date < timezone.now() and self.end_date > timezone.now():
73 return True
74 return False
75
76
77 class DatasetSplit(TimeStampedModel):
78 name = models.CharField(max_length=100)
79 codename = models.CharField(max_length=100, unique=True)
80
81 def __unicode__(self):
82 return self.name
83
84 class Meta:
85 app_label = 'challenges'
86 db_table = 'dataset_split'
87
88
89 class ChallengePhase(TimeStampedModel):
90
91 """Model representing a Challenge Phase"""
92 name = models.CharField(max_length=100)
93 description = models.TextField()
94 leaderboard_public = models.BooleanField(default=False)
95 start_date = models.DateTimeField(
96 null=True, blank=True, verbose_name="Start Date (UTC)")
97 end_date = models.DateTimeField(
98 null=True, blank=True, verbose_name="End Date (UTC)")
99 challenge = models.ForeignKey('Challenge')
100 is_public = models.BooleanField(default=False)
101 is_submission_public = models.BooleanField(default=False)
102 test_annotation = models.FileField(upload_to=RandomFileName("test_annotations"), default=False)
103 max_submissions_per_day = models.PositiveIntegerField(default=100000)
104 max_submissions = models.PositiveIntegerField(default=100000)
105 codename = models.CharField(max_length=100, default="Phase Code Name")
106 dataset_split = models.ManyToManyField(DatasetSplit, blank=True, through='ChallengePhaseSplit')
107
108 class Meta:
109 app_label = 'challenges'
110 db_table = 'challenge_phase'
111 unique_together = (('codename', 'challenge'),)
112
113 def __unicode__(self):
114 """Returns the name of Phase"""
115 return self.name
116
117 def get_start_date(self):
118 """Returns the start date of Phase"""
119 return self.start_date
120
121 def get_end_date(self):
122 """Returns the end date of Challenge"""
123 return self.end_date
124
125 @property
126 def is_active(self):
127 """Returns if the challenge is active or not"""
128 if self.start_date < timezone.now() and self.end_date > timezone.now():
129 return True
130 return False
131
132
133 class Leaderboard(TimeStampedModel):
134
135 schema = JSONField()
136
137 def __unicode__(self):
138 return '{}'.format(self.id)
139
140 class Meta:
141 app_label = 'challenges'
142 db_table = 'leaderboard'
143
144
145 class ChallengePhaseSplit(TimeStampedModel):
146
147 # visibility options
148 HOST = 1
149 OWNER_AND_HOST = 2
150 PUBLIC = 3
151
152 VISIBILITY_OPTIONS = (
153 (HOST, 'host'),
154 (OWNER_AND_HOST, 'owner and host'),
155 (PUBLIC, 'public'),
156 )
157
158 challenge_phase = models.ForeignKey('ChallengePhase')
159 dataset_split = models.ForeignKey('DatasetSplit')
160 leaderboard = models.ForeignKey('Leaderboard')
161 visibility = models.PositiveSmallIntegerField(
162 choices=VISIBILITY_OPTIONS,
163 default=PUBLIC
164 )
165
166 def __unicode__(self):
167 return '{0} : {1}'.format(self.challenge_phase.name, self.dataset_split.name)
168
169 class Meta:
170 app_label = 'challenges'
171 db_table = 'challenge_phase_split'
172
173
174 class LeaderboardData(TimeStampedModel):
175
176 challenge_phase_split = models.ForeignKey('ChallengePhaseSplit')
177 submission = models.ForeignKey('jobs.Submission')
178 leaderboard = models.ForeignKey('Leaderboard')
179 result = JSONField()
180
181 def __unicode__(self):
182 return '{0} : {1}'.format(self.challenge_phase_split, self.submission)
183
184 class Meta:
185 app_label = 'challenges'
186 db_table = 'leaderboard_data'
187
188
189 class ChallengeConfiguration(TimeStampedModel):
190 """
191 Model to store zip file for challenge creation.
192 """
193 user = models.ForeignKey(User)
194 challenge = models.OneToOneField(Challenge, null=True, blank=True)
195 zip_configuration = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'))
196 is_created = models.BooleanField(default=False)
197 stdout_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),
198 null=True, blank=True)
199 stderr_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),
200 null=True, blank=True)
201
202 class Meta:
203 app_label = 'challenges'
204 db_table = 'challenge_zip_configuration'
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/base/models.py b/apps/base/models.py
--- a/apps/base/models.py
+++ b/apps/base/models.py
@@ -1,7 +1,12 @@
from __future__ import unicode_literals
+import logging
+
from django.db import models
+# Get an instance of a logger
+logger = logging.getLogger(__name__)
+
class TimeStampedModel(models.Model):
"""
@@ -14,3 +19,24 @@
class Meta:
abstract = True
app_label = 'base'
+
+
+def model_field_name(field_name, *args, **kwargs):
+ """
+ The decorator is used to pass model field names to create_post_model_field function for logging change.
+ """
+ def model_field_name_decorator(f, *args, **kwargs):
+ def model_field_name_wrapper(sender, instance, **kwargs):
+ f(sender, instance, field_name=field_name, **kwargs)
+ return model_field_name_wrapper
+ return model_field_name_decorator
+
+
+def create_post_model_field(sender, instance, field_name, **kwargs):
+ """
+ When any model field value changes, it is used to log the change.
+ """
+ if getattr(instance, '_original_{}'.format(field_name)) is False:
+ logger.info('{} for {} is added first time !'.format(field_name, instance.pk))
+ else:
+ logger.info('{} for {} changed !'.format(field_name, instance.pk))
diff --git a/apps/challenges/models.py b/apps/challenges/models.py
--- a/apps/challenges/models.py
+++ b/apps/challenges/models.py
@@ -4,8 +4,9 @@
from django.utils import timezone
from django.contrib.postgres.fields import JSONField
from django.db import models
+from django.db.models import signals
-from base.models import (TimeStampedModel, )
+from base.models import (TimeStampedModel, model_field_name, create_post_model_field, )
from base.utils import RandomFileName
from participants.models import (ParticipantTeam, )
@@ -13,6 +14,11 @@
class Challenge(TimeStampedModel):
"""Model representing a hosted Challenge"""
+
+ def __init__(self, *args, **kwargs):
+ super(Challenge, self).__init__(*args, **kwargs)
+ self._original_evaluation_script = self.evaluation_script
+
title = models.CharField(max_length=100)
short_description = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
@@ -74,6 +80,10 @@
return False
+signals.post_save.connect(model_field_name(field_name='evaluation_script')(create_post_model_field),
+ sender=Challenge, weak=False)
+
+
class DatasetSplit(TimeStampedModel):
name = models.CharField(max_length=100)
codename = models.CharField(max_length=100, unique=True)
@@ -89,6 +99,10 @@
class ChallengePhase(TimeStampedModel):
"""Model representing a Challenge Phase"""
+ def __init__(self, *args, **kwargs):
+ super(ChallengePhase, self).__init__(*args, **kwargs)
+ self._original_test_annotation = self.test_annotation
+
name = models.CharField(max_length=100)
description = models.TextField()
leaderboard_public = models.BooleanField(default=False)
@@ -130,6 +144,10 @@
return False
+signals.post_save.connect(model_field_name(field_name='test_annotation')(create_post_model_field),
+ sender=ChallengePhase, weak=False)
+
+
class Leaderboard(TimeStampedModel):
schema = JSONField()
|
{"golden_diff": "diff --git a/apps/base/models.py b/apps/base/models.py\n--- a/apps/base/models.py\n+++ b/apps/base/models.py\n@@ -1,7 +1,12 @@\n from __future__ import unicode_literals\n \n+import logging\n+\n from django.db import models\n \n+# Get an instance of a logger\n+logger = logging.getLogger(__name__)\n+\n \n class TimeStampedModel(models.Model):\n \"\"\"\n@@ -14,3 +19,24 @@\n class Meta:\n abstract = True\n app_label = 'base'\n+\n+\n+def model_field_name(field_name, *args, **kwargs):\n+ \"\"\"\n+ The decorator is used to pass model field names to create_post_model_field function for logging change.\n+ \"\"\"\n+ def model_field_name_decorator(f, *args, **kwargs):\n+ def model_field_name_wrapper(sender, instance, **kwargs):\n+ f(sender, instance, field_name=field_name, **kwargs)\n+ return model_field_name_wrapper\n+ return model_field_name_decorator\n+\n+\n+def create_post_model_field(sender, instance, field_name, **kwargs):\n+ \"\"\"\n+ When any model field value changes, it is used to log the change.\n+ \"\"\"\n+ if getattr(instance, '_original_{}'.format(field_name)) is False:\n+ logger.info('{} for {} is added first time !'.format(field_name, instance.pk))\n+ else:\n+ logger.info('{} for {} changed !'.format(field_name, instance.pk))\ndiff --git a/apps/challenges/models.py b/apps/challenges/models.py\n--- a/apps/challenges/models.py\n+++ b/apps/challenges/models.py\n@@ -4,8 +4,9 @@\n from django.utils import timezone\n from django.contrib.postgres.fields import JSONField\n from django.db import models\n+from django.db.models import signals\n \n-from base.models import (TimeStampedModel, )\n+from base.models import (TimeStampedModel, model_field_name, create_post_model_field, )\n from base.utils import RandomFileName\n from participants.models import (ParticipantTeam, )\n \n@@ -13,6 +14,11 @@\n class Challenge(TimeStampedModel):\n \n \"\"\"Model representing a hosted Challenge\"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super(Challenge, self).__init__(*args, **kwargs)\n+ self._original_evaluation_script = self.evaluation_script\n+\n title = models.CharField(max_length=100)\n short_description = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n@@ -74,6 +80,10 @@\n return False\n \n \n+signals.post_save.connect(model_field_name(field_name='evaluation_script')(create_post_model_field),\n+ sender=Challenge, weak=False)\n+\n+\n class DatasetSplit(TimeStampedModel):\n name = models.CharField(max_length=100)\n codename = models.CharField(max_length=100, unique=True)\n@@ -89,6 +99,10 @@\n class ChallengePhase(TimeStampedModel):\n \n \"\"\"Model representing a Challenge Phase\"\"\"\n+ def __init__(self, *args, **kwargs):\n+ super(ChallengePhase, self).__init__(*args, **kwargs)\n+ self._original_test_annotation = self.test_annotation\n+\n name = models.CharField(max_length=100)\n description = models.TextField()\n leaderboard_public = models.BooleanField(default=False)\n@@ -130,6 +144,10 @@\n return False\n \n \n+signals.post_save.connect(model_field_name(field_name='test_annotation')(create_post_model_field),\n+ sender=ChallengePhase, weak=False)\n+\n+\n class Leaderboard(TimeStampedModel):\n \n schema = JSONField()\n", "issue": "Log change in Challenge and Challenge Phase model when file fields are updated\n__Problem__\r\n\r\nWe need a way to figure out if the `evaluation_script`field value is changed for Challenge Model or not. Same goes for `test_annotation` field in ChallengePhase Model.\r\n\r\n__Please be more clear__ ?\r\n\r\nWhen ever a model is saved, its saved function is called. So this task involves figuring out a way in save model to detect if the model is updated or added. If the model is updated, then further it should be figured out whether the `evaluation_script` is changed for Challenge model or not.\r\n\r\nIf `evaluation_script` of Challenge model is changed, then simply print a message stating the change. This print statement will later on act as a logging statement in case we need to debug something.\r\n\r\nThe above requested thing goes same for `test_annotation` field of ChallengePhase model.\r\n\r\n__Why__ ?\r\n\r\nSubmission worker when starts loads all `evaluation_script` and `test_annotation` file in advance. In case if the Challenge Host updates any of the file, there is no way to communicate this to the worker\r\n\r\n__Impact__ ?\r\n\r\nIt actually guarantees that whenever any submission is evaluated, it is evaluated against the write set of parameters.\r\n\r\n__How__ ?\r\n\r\nAs of now, I think it can be done via following ways\r\n\r\n* Either overriding the save method of each model and adding a hook over there to check if the instance is being updated or added. May be there is a flag or something which tells whether the instance is added or updated\r\n\r\n* Either by [signals](https://docs.djangoproject.com/en/1.10/topics/signals/). Reason I am asking to use signal because we need to carry out the same set of action for more than one models(presently _Challenge_ and _ChallengePhase_). Doing it by signal will help us achieve more modularized code.\r\n\r\n* Your suggestion ??\r\n\r\n__TakeAways__\r\n\r\nAbout how django model is saved, how signal works, and a brief intro about RabbitMQ and submission worker\r\n\r\nAlso I will prefer that __before coding__, its better to __discuss__ the approach over here only, so that we can get an idea and update if something is not right.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass TimeStampedModel(models.Model):\n \"\"\"\n An abstract base class model that provides self-managed `created_at` and\n `modified_at` fields.\n \"\"\"\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n app_label = 'base'\n", "path": "apps/base/models.py"}, {"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\n\nfrom base.models import (TimeStampedModel, )\nfrom base.utils import RandomFileName\nfrom participants.models import (ParticipantTeam, )\n\n\nclass Challenge(TimeStampedModel):\n\n \"\"\"Model representing a hosted Challenge\"\"\"\n title = models.CharField(max_length=100)\n short_description = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n terms_and_conditions = models.TextField(null=True, blank=True)\n submission_guidelines = models.TextField(null=True, blank=True)\n evaluation_details = models.TextField(null=True, blank=True)\n image = models.ImageField(\n upload_to='logos', null=True, blank=True, verbose_name=\"Logo\")\n start_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"Start Date (UTC)\")\n end_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"End Date (UTC)\")\n creator = models.ForeignKey(\n 'hosts.ChallengeHostTeam', related_name='challenge_creator')\n published = models.BooleanField(\n default=False, verbose_name=\"Publicly Available\")\n enable_forum = models.BooleanField(default=True)\n anonymous_leaderboard = models.BooleanField(default=False)\n participant_teams = models.ManyToManyField(ParticipantTeam, blank=True)\n is_disabled = models.BooleanField(default=False)\n evaluation_script = models.FileField(\n default=False, upload_to=RandomFileName(\"evaluation_scripts\")) # should be zip format\n approved_by_admin = models.BooleanField(\n default=False, verbose_name=\"Approved By Admin\")\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge'\n\n def __unicode__(self):\n \"\"\"Returns the title of Challenge\"\"\"\n return self.title\n\n def get_image_url(self):\n \"\"\"Returns the url of logo of Challenge\"\"\"\n if self.image:\n return self.image.url\n return None\n\n def get_evaluation_script_path(self):\n \"\"\"Returns the path of evaluation script\"\"\"\n if self.evaluation_script:\n return self.evaluation_script.url\n return None\n\n def get_start_date(self):\n \"\"\"Returns the start date of Challenge\"\"\"\n return self.start_date\n\n def get_end_date(self):\n \"\"\"Returns the end date of Challenge\"\"\"\n return self.end_date\n\n @property\n def is_active(self):\n \"\"\"Returns if the challenge is active or not\"\"\"\n if self.start_date < timezone.now() and self.end_date > timezone.now():\n return True\n return False\n\n\nclass DatasetSplit(TimeStampedModel):\n name = models.CharField(max_length=100)\n codename = models.CharField(max_length=100, unique=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'challenges'\n db_table = 'dataset_split'\n\n\nclass ChallengePhase(TimeStampedModel):\n\n \"\"\"Model representing a Challenge Phase\"\"\"\n name = models.CharField(max_length=100)\n description = models.TextField()\n leaderboard_public = models.BooleanField(default=False)\n start_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"Start Date (UTC)\")\n end_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"End Date (UTC)\")\n challenge = models.ForeignKey('Challenge')\n is_public = models.BooleanField(default=False)\n is_submission_public = models.BooleanField(default=False)\n test_annotation = models.FileField(upload_to=RandomFileName(\"test_annotations\"), default=False)\n max_submissions_per_day = models.PositiveIntegerField(default=100000)\n max_submissions = models.PositiveIntegerField(default=100000)\n codename = models.CharField(max_length=100, default=\"Phase Code Name\")\n dataset_split = models.ManyToManyField(DatasetSplit, blank=True, through='ChallengePhaseSplit')\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_phase'\n unique_together = (('codename', 'challenge'),)\n\n def __unicode__(self):\n \"\"\"Returns the name of Phase\"\"\"\n return self.name\n\n def get_start_date(self):\n \"\"\"Returns the start date of Phase\"\"\"\n return self.start_date\n\n def get_end_date(self):\n \"\"\"Returns the end date of Challenge\"\"\"\n return self.end_date\n\n @property\n def is_active(self):\n \"\"\"Returns if the challenge is active or not\"\"\"\n if self.start_date < timezone.now() and self.end_date > timezone.now():\n return True\n return False\n\n\nclass Leaderboard(TimeStampedModel):\n\n schema = JSONField()\n\n def __unicode__(self):\n return '{}'.format(self.id)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'leaderboard'\n\n\nclass ChallengePhaseSplit(TimeStampedModel):\n\n # visibility options\n HOST = 1\n OWNER_AND_HOST = 2\n PUBLIC = 3\n\n VISIBILITY_OPTIONS = (\n (HOST, 'host'),\n (OWNER_AND_HOST, 'owner and host'),\n (PUBLIC, 'public'),\n )\n\n challenge_phase = models.ForeignKey('ChallengePhase')\n dataset_split = models.ForeignKey('DatasetSplit')\n leaderboard = models.ForeignKey('Leaderboard')\n visibility = models.PositiveSmallIntegerField(\n choices=VISIBILITY_OPTIONS,\n default=PUBLIC\n )\n\n def __unicode__(self):\n return '{0} : {1}'.format(self.challenge_phase.name, self.dataset_split.name)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_phase_split'\n\n\nclass LeaderboardData(TimeStampedModel):\n\n challenge_phase_split = models.ForeignKey('ChallengePhaseSplit')\n submission = models.ForeignKey('jobs.Submission')\n leaderboard = models.ForeignKey('Leaderboard')\n result = JSONField()\n\n def __unicode__(self):\n return '{0} : {1}'.format(self.challenge_phase_split, self.submission)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'leaderboard_data'\n\n\nclass ChallengeConfiguration(TimeStampedModel):\n \"\"\"\n Model to store zip file for challenge creation.\n \"\"\"\n user = models.ForeignKey(User)\n challenge = models.OneToOneField(Challenge, null=True, blank=True)\n zip_configuration = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'))\n is_created = models.BooleanField(default=False)\n stdout_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),\n null=True, blank=True)\n stderr_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),\n null=True, blank=True)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_zip_configuration'\n", "path": "apps/challenges/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom django.db import models\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\nclass TimeStampedModel(models.Model):\n \"\"\"\n An abstract base class model that provides self-managed `created_at` and\n `modified_at` fields.\n \"\"\"\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n app_label = 'base'\n\n\ndef model_field_name(field_name, *args, **kwargs):\n \"\"\"\n The decorator is used to pass model field names to create_post_model_field function for logging change.\n \"\"\"\n def model_field_name_decorator(f, *args, **kwargs):\n def model_field_name_wrapper(sender, instance, **kwargs):\n f(sender, instance, field_name=field_name, **kwargs)\n return model_field_name_wrapper\n return model_field_name_decorator\n\n\ndef create_post_model_field(sender, instance, field_name, **kwargs):\n \"\"\"\n When any model field value changes, it is used to log the change.\n \"\"\"\n if getattr(instance, '_original_{}'.format(field_name)) is False:\n logger.info('{} for {} is added first time !'.format(field_name, instance.pk))\n else:\n logger.info('{} for {} changed !'.format(field_name, instance.pk))\n", "path": "apps/base/models.py"}, {"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.db.models import signals\n\nfrom base.models import (TimeStampedModel, model_field_name, create_post_model_field, )\nfrom base.utils import RandomFileName\nfrom participants.models import (ParticipantTeam, )\n\n\nclass Challenge(TimeStampedModel):\n\n \"\"\"Model representing a hosted Challenge\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Challenge, self).__init__(*args, **kwargs)\n self._original_evaluation_script = self.evaluation_script\n\n title = models.CharField(max_length=100)\n short_description = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n terms_and_conditions = models.TextField(null=True, blank=True)\n submission_guidelines = models.TextField(null=True, blank=True)\n evaluation_details = models.TextField(null=True, blank=True)\n image = models.ImageField(\n upload_to='logos', null=True, blank=True, verbose_name=\"Logo\")\n start_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"Start Date (UTC)\")\n end_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"End Date (UTC)\")\n creator = models.ForeignKey(\n 'hosts.ChallengeHostTeam', related_name='challenge_creator')\n published = models.BooleanField(\n default=False, verbose_name=\"Publicly Available\")\n enable_forum = models.BooleanField(default=True)\n anonymous_leaderboard = models.BooleanField(default=False)\n participant_teams = models.ManyToManyField(ParticipantTeam, blank=True)\n is_disabled = models.BooleanField(default=False)\n evaluation_script = models.FileField(\n default=False, upload_to=RandomFileName(\"evaluation_scripts\")) # should be zip format\n approved_by_admin = models.BooleanField(\n default=False, verbose_name=\"Approved By Admin\")\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge'\n\n def __unicode__(self):\n \"\"\"Returns the title of Challenge\"\"\"\n return self.title\n\n def get_image_url(self):\n \"\"\"Returns the url of logo of Challenge\"\"\"\n if self.image:\n return self.image.url\n return None\n\n def get_evaluation_script_path(self):\n \"\"\"Returns the path of evaluation script\"\"\"\n if self.evaluation_script:\n return self.evaluation_script.url\n return None\n\n def get_start_date(self):\n \"\"\"Returns the start date of Challenge\"\"\"\n return self.start_date\n\n def get_end_date(self):\n \"\"\"Returns the end date of Challenge\"\"\"\n return self.end_date\n\n @property\n def is_active(self):\n \"\"\"Returns if the challenge is active or not\"\"\"\n if self.start_date < timezone.now() and self.end_date > timezone.now():\n return True\n return False\n\n\nsignals.post_save.connect(model_field_name(field_name='evaluation_script')(create_post_model_field),\n sender=Challenge, weak=False)\n\n\nclass DatasetSplit(TimeStampedModel):\n name = models.CharField(max_length=100)\n codename = models.CharField(max_length=100, unique=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'challenges'\n db_table = 'dataset_split'\n\n\nclass ChallengePhase(TimeStampedModel):\n\n \"\"\"Model representing a Challenge Phase\"\"\"\n def __init__(self, *args, **kwargs):\n super(ChallengePhase, self).__init__(*args, **kwargs)\n self._original_test_annotation = self.test_annotation\n\n name = models.CharField(max_length=100)\n description = models.TextField()\n leaderboard_public = models.BooleanField(default=False)\n start_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"Start Date (UTC)\")\n end_date = models.DateTimeField(\n null=True, blank=True, verbose_name=\"End Date (UTC)\")\n challenge = models.ForeignKey('Challenge')\n is_public = models.BooleanField(default=False)\n is_submission_public = models.BooleanField(default=False)\n test_annotation = models.FileField(upload_to=RandomFileName(\"test_annotations\"), default=False)\n max_submissions_per_day = models.PositiveIntegerField(default=100000)\n max_submissions = models.PositiveIntegerField(default=100000)\n codename = models.CharField(max_length=100, default=\"Phase Code Name\")\n dataset_split = models.ManyToManyField(DatasetSplit, blank=True, through='ChallengePhaseSplit')\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_phase'\n unique_together = (('codename', 'challenge'),)\n\n def __unicode__(self):\n \"\"\"Returns the name of Phase\"\"\"\n return self.name\n\n def get_start_date(self):\n \"\"\"Returns the start date of Phase\"\"\"\n return self.start_date\n\n def get_end_date(self):\n \"\"\"Returns the end date of Challenge\"\"\"\n return self.end_date\n\n @property\n def is_active(self):\n \"\"\"Returns if the challenge is active or not\"\"\"\n if self.start_date < timezone.now() and self.end_date > timezone.now():\n return True\n return False\n\n\nsignals.post_save.connect(model_field_name(field_name='test_annotation')(create_post_model_field),\n sender=ChallengePhase, weak=False)\n\n\nclass Leaderboard(TimeStampedModel):\n\n schema = JSONField()\n\n def __unicode__(self):\n return '{}'.format(self.id)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'leaderboard'\n\n\nclass ChallengePhaseSplit(TimeStampedModel):\n\n # visibility options\n HOST = 1\n OWNER_AND_HOST = 2\n PUBLIC = 3\n\n VISIBILITY_OPTIONS = (\n (HOST, 'host'),\n (OWNER_AND_HOST, 'owner and host'),\n (PUBLIC, 'public'),\n )\n\n challenge_phase = models.ForeignKey('ChallengePhase')\n dataset_split = models.ForeignKey('DatasetSplit')\n leaderboard = models.ForeignKey('Leaderboard')\n visibility = models.PositiveSmallIntegerField(\n choices=VISIBILITY_OPTIONS,\n default=PUBLIC\n )\n\n def __unicode__(self):\n return '{0} : {1}'.format(self.challenge_phase.name, self.dataset_split.name)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_phase_split'\n\n\nclass LeaderboardData(TimeStampedModel):\n\n challenge_phase_split = models.ForeignKey('ChallengePhaseSplit')\n submission = models.ForeignKey('jobs.Submission')\n leaderboard = models.ForeignKey('Leaderboard')\n result = JSONField()\n\n def __unicode__(self):\n return '{0} : {1}'.format(self.challenge_phase_split, self.submission)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'leaderboard_data'\n\n\nclass ChallengeConfiguration(TimeStampedModel):\n \"\"\"\n Model to store zip file for challenge creation.\n \"\"\"\n user = models.ForeignKey(User)\n challenge = models.OneToOneField(Challenge, null=True, blank=True)\n zip_configuration = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'))\n is_created = models.BooleanField(default=False)\n stdout_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),\n null=True, blank=True)\n stderr_file = models.FileField(upload_to=RandomFileName('zip_configuration_files/challenge_zip'),\n null=True, blank=True)\n\n class Meta:\n app_label = 'challenges'\n db_table = 'challenge_zip_configuration'\n", "path": "apps/challenges/models.py"}]}
| 2,806 | 790 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.