problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_29875
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1727
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Request: Add more functions to facebook plugin
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Reminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.
</issue>
<code>
[start of src/streamlink/plugins/facebook.py]
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.stream import HLSStream
5
6 _playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
7
8 _url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
9
10
11 class Facebook(Plugin):
12 @classmethod
13 def can_handle_url(cls, url):
14 return _url_re.match(url)
15
16 @Plugin.broken(990)
17 def _get_streams(self):
18 match = _url_re.match(self.url)
19 video = match.group("video_id")
20
21 playlist = _playlist_url.format(video)
22
23 return HLSStream.parse_variant_playlist(self.session, playlist)
24
25
26 __plugin__ = Facebook
27
[end of src/streamlink/plugins/facebook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py
--- a/src/streamlink/plugins/facebook.py
+++ b/src/streamlink/plugins/facebook.py
@@ -1,26 +1,42 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HLSStream
-
-_playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
-
-_url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
+from streamlink.plugin.api import http, useragents
+from streamlink.stream import DASHStream, HTTPStream
+from streamlink.utils import parse_json
class Facebook(Plugin):
+ _url_re = re.compile(r"https?://(?:www\.)?facebook\.com/[^/]+/videos")
+ _mpd_re = re.compile(r'''(sd|hd)_src["']?\s*:\s*(?P<quote>["'])(?P<url>.+?)(?P=quote)''')
+ _playlist_re = re.compile(r'''video:\[({url:".+?}\])''')
+ _plurl_re = re.compile(r'''url:"(.*?)"''')
+
@classmethod
def can_handle_url(cls, url):
- return _url_re.match(url)
+ return cls._url_re.match(url)
- @Plugin.broken(990)
def _get_streams(self):
- match = _url_re.match(self.url)
- video = match.group("video_id")
+ res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
+ with open("temp.html", "w") as f:
+ f.write(res.text)
+
+ for match in self._mpd_re.finditer(res.text):
+ manifest_url = match.group("url")
+ if "\\/" in manifest_url:
+ # if the URL is json encoded, decode it
+ manifest_url = parse_json("\"{}\"".format(manifest_url))
+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():
+ yield s
+ else:
+ match = self._playlist_re.search(res.text)
+ playlist = match and match.group(1)
+ if playlist:
+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:
+ yield "live", HTTPStream(self.session, url)
+
- playlist = _playlist_url.format(video)
- return HLSStream.parse_variant_playlist(self.session, playlist)
__plugin__ = Facebook
|
{"golden_diff": "diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py\n--- a/src/streamlink/plugins/facebook.py\n+++ b/src/streamlink/plugins/facebook.py\n@@ -1,26 +1,42 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HLSStream\n-\n-_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n-\n-_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n+from streamlink.plugin.api import http, useragents\n+from streamlink.stream import DASHStream, HTTPStream\n+from streamlink.utils import parse_json\n \n \n class Facebook(Plugin):\n+ _url_re = re.compile(r\"https?://(?:www\\.)?facebook\\.com/[^/]+/videos\")\n+ _mpd_re = re.compile(r'''(sd|hd)_src[\"']?\\s*:\\s*(?P<quote>[\"'])(?P<url>.+?)(?P=quote)''')\n+ _playlist_re = re.compile(r'''video:\\[({url:\".+?}\\])''')\n+ _plurl_re = re.compile(r'''url:\"(.*?)\"''')\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url)\n \n- @Plugin.broken(990)\n def _get_streams(self):\n- match = _url_re.match(self.url)\n- video = match.group(\"video_id\")\n+ res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n+ with open(\"temp.html\", \"w\") as f:\n+ f.write(res.text)\n+\n+ for match in self._mpd_re.finditer(res.text):\n+ manifest_url = match.group(\"url\")\n+ if \"\\\\/\" in manifest_url:\n+ # if the URL is json encoded, decode it\n+ manifest_url = parse_json(\"\\\"{}\\\"\".format(manifest_url))\n+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():\n+ yield s\n+ else:\n+ match = self._playlist_re.search(res.text)\n+ playlist = match and match.group(1)\n+ if playlist:\n+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:\n+ yield \"live\", HTTPStream(self.session, url)\n+\n \n- playlist = _playlist_url.format(video)\n \n- return HLSStream.parse_variant_playlist(self.session, playlist)\n \n \n __plugin__ = Facebook\n", "issue": "Request: Add more functions to facebook plugin\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [x] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\nReminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\n\n_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n\n_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n\n\nclass Facebook(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @Plugin.broken(990)\n def _get_streams(self):\n match = _url_re.match(self.url)\n video = match.group(\"video_id\")\n\n playlist = _playlist_url.format(video)\n\n return HLSStream.parse_variant_playlist(self.session, playlist)\n\n\n__plugin__ = Facebook\n", "path": "src/streamlink/plugins/facebook.py"}]}
| 867 | 584 |
gh_patches_debug_7669
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-460
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FieldError when accessing event
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When opening the editor page for an event, a `FieldError` occurs.
### Steps to Reproduce
1. Go to 'Events'
2. Click on an event or on the 'Create event' button
3. Error occurs (see screenshot below)
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
Event editor page opens up
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
Error page is loaded
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->

</issue>
<code>
[start of src/cms/views/events/event_view.py]
1 import logging
2
3 from django.contrib import messages
4 from django.contrib.auth.decorators import login_required
5 from django.contrib.auth.mixins import PermissionRequiredMixin
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import render, redirect
8 from django.utils.decorators import method_decorator
9 from django.utils.translation import ugettext as _
10 from django.views.generic import TemplateView
11
12 from ...constants import status
13 from ...decorators import region_permission_required
14 from ...forms.events import EventForm, EventTranslationForm, RecurrenceRuleForm
15 from ...models import Region, Language, Event, EventTranslation, RecurrenceRule, POI
16
17 logger = logging.getLogger(__name__)
18
19
20 @method_decorator(login_required, name="dispatch")
21 @method_decorator(region_permission_required, name="dispatch")
22 class EventView(PermissionRequiredMixin, TemplateView):
23 permission_required = "cms.view_events"
24 raise_exception = True
25
26 template_name = "events/event_form.html"
27
28 # pylint: disable=too-many-locals
29 def get(self, request, *args, **kwargs):
30 language = Language.objects.get(code=kwargs.get("language_code"))
31
32 # get event and event translation objects if they exist, otherwise objects are None
33 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
34 event_translation_instance = EventTranslation.objects.filter(
35 event=event_instance, language=language
36 ).first()
37 recurrence_rule_instance = RecurrenceRule.objects.filter(
38 event=event_instance
39 ).first()
40 poi_instance = POI.objects.filter(event=event_instance).first()
41
42 # Make form disabled if user has no permission to edit the page
43 if not request.user.has_perm("cms.edit_events"):
44 disabled = True
45 messages.warning(
46 request, _("You don't have the permission to edit this event.")
47 )
48 elif event_instance and event_instance.archived:
49 disabled = True
50 messages.warning(
51 request, _("You cannot edit this event because it is archived.")
52 )
53 else:
54 disabled = False
55
56 event_form = EventForm(instance=event_instance, disabled=disabled)
57 event_translation_form = EventTranslationForm(
58 instance=event_translation_instance, disabled=disabled
59 )
60 recurrence_rule_form = RecurrenceRuleForm(
61 instance=recurrence_rule_instance, disabled=disabled
62 )
63
64 return render(
65 request,
66 self.template_name,
67 {
68 "current_menu_item": "events",
69 "event_form": event_form,
70 "event_translation_form": event_translation_form,
71 "recurrence_rule_form": recurrence_rule_form,
72 "poi": poi_instance,
73 "language": language,
74 "languages": Region.get_current_region(request).languages
75 if event_instance
76 else [language],
77 },
78 )
79
80 # pylint: disable=too-many-locals,too-many-branches
81 def post(self, request, **kwargs):
82 region = Region.objects.get(slug=kwargs.get("region_slug"))
83 language = Language.objects.get(code=kwargs.get("language_code"))
84 poi = POI.objects.filter(id=request.POST.get("poi_id")).first()
85
86 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
87 recurrence_rule_instance = RecurrenceRule.objects.filter(
88 event=event_instance
89 ).first()
90 event_translation_instance = EventTranslation.objects.filter(
91 event=event_instance, language=language
92 ).first()
93
94 if not request.user.has_perm("cms.edit_events"):
95 raise PermissionDenied
96
97 event_form = EventForm(data=request.POST, instance=event_instance,)
98 # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation
99 event_form_valid = event_form.is_valid()
100 recurrence_rule_form = RecurrenceRuleForm(
101 data=request.POST,
102 instance=recurrence_rule_instance,
103 event_start_date=event_form.cleaned_data.get("start_date", None),
104 )
105 event_translation_form = EventTranslationForm(
106 data=request.POST,
107 instance=event_translation_instance,
108 region=region,
109 language=language,
110 )
111
112 if (
113 not event_form_valid
114 or not event_translation_form.is_valid()
115 or (
116 event_form.cleaned_data["is_recurring"]
117 and not recurrence_rule_form.is_valid()
118 )
119 ):
120 forms = [event_form, event_translation_form]
121 if event_form.cleaned_data["is_recurring"]:
122 forms.append(recurrence_rule_form)
123 # Add error messages
124 for form in forms:
125 for field in form:
126 for error in field.errors:
127 messages.error(request, _(error))
128 for error in form.non_field_errors():
129 messages.error(request, _(error))
130
131 elif (
132 not event_form.has_changed()
133 and not event_translation_form.has_changed()
134 and (
135 not event_form.cleaned_data["is_recurring"]
136 or not recurrence_rule_form.has_changed()
137 )
138 and poi == event_instance.location
139 ):
140
141 messages.info(request, _("No changes detected."))
142
143 else:
144
145 if event_translation_form.instance.status == status.PUBLIC:
146 if not request.user.has_perm("cms.publish_events"):
147 raise PermissionDenied
148
149 if event_form.cleaned_data["is_recurring"]:
150 recurrence_rule = recurrence_rule_form.save()
151 else:
152 recurrence_rule = None
153
154 event = event_form.save(
155 region=region, recurrence_rule=recurrence_rule, location=poi
156 )
157 event_translation = event_translation_form.save(
158 event=event, user=request.user
159 )
160
161 published = event_translation.status == status.PUBLIC
162 if not event_instance:
163 if published:
164 messages.success(
165 request, _("Event was successfully created and published.")
166 )
167 else:
168 messages.success(request, _("Event was successfully created."))
169 return redirect(
170 "edit_event",
171 **{
172 "event_id": event.id,
173 "region_slug": region.slug,
174 "language_code": language.code,
175 }
176 )
177 if not event_translation_instance:
178 if published:
179 messages.success(
180 request,
181 _("Event translation was successfully created and published."),
182 )
183 else:
184 messages.success(
185 request, _("Event translation was successfully created.")
186 )
187 else:
188 if published:
189 messages.success(request, _("Event was successfully published."))
190 else:
191 messages.success(request, _("Event was successfully saved."))
192
193 return render(
194 request,
195 self.template_name,
196 {
197 "current_menu_item": "events",
198 "event_form": event_form,
199 "event_translation_form": event_translation_form,
200 "recurrence_rule_form": recurrence_rule_form,
201 "poi": poi,
202 "language": language,
203 "languages": region.languages if event_instance else [language],
204 },
205 )
206
[end of src/cms/views/events/event_view.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cms/views/events/event_view.py b/src/cms/views/events/event_view.py
--- a/src/cms/views/events/event_view.py
+++ b/src/cms/views/events/event_view.py
@@ -37,7 +37,7 @@
recurrence_rule_instance = RecurrenceRule.objects.filter(
event=event_instance
).first()
- poi_instance = POI.objects.filter(event=event_instance).first()
+ poi_instance = POI.objects.filter(events=event_instance).first()
# Make form disabled if user has no permission to edit the page
if not request.user.has_perm("cms.edit_events"):
|
{"golden_diff": "diff --git a/src/cms/views/events/event_view.py b/src/cms/views/events/event_view.py\n--- a/src/cms/views/events/event_view.py\n+++ b/src/cms/views/events/event_view.py\n@@ -37,7 +37,7 @@\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n- poi_instance = POI.objects.filter(event=event_instance).first()\n+ poi_instance = POI.objects.filter(events=event_instance).first()\n \n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm(\"cms.edit_events\"):\n", "issue": "FieldError when accessing event\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen opening the editor page for an event, a `FieldError` occurs.\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to 'Events'\r\n2. Click on an event or on the 'Create event' button\r\n3. Error occurs (see screenshot below)\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nEvent editor page opens up\r\n\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nError page is loaded \r\n\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.events import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Region, Language, Event, EventTranslation, RecurrenceRule, POI\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\nclass EventView(PermissionRequiredMixin, TemplateView):\n permission_required = \"cms.view_events\"\n raise_exception = True\n\n template_name = \"events/event_form.html\"\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n poi_instance = POI.objects.filter(event=event_instance).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm(\"cms.edit_events\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit this event.\")\n )\n elif event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n else:\n disabled = False\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": Region.get_current_region(request).languages\n if event_instance\n else [language],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n region = Region.objects.get(slug=kwargs.get(\"region_slug\"))\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n poi = POI.objects.filter(id=request.POST.get(\"poi_id\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n if not request.user.has_perm(\"cms.edit_events\"):\n raise PermissionDenied\n\n event_form = EventForm(data=request.POST, instance=event_instance,)\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n region=region,\n language=language,\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n forms = [event_form, event_translation_form]\n if event_form.cleaned_data[\"is_recurring\"]:\n forms.append(recurrence_rule_form)\n # Add error messages\n for form in forms:\n for field in form:\n for error in field.errors:\n messages.error(request, _(error))\n for error in form.non_field_errors():\n messages.error(request, _(error))\n\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and (\n not event_form.cleaned_data[\"is_recurring\"]\n or not recurrence_rule_form.has_changed()\n )\n and poi == event_instance.location\n ):\n\n messages.info(request, _(\"No changes detected.\"))\n\n else:\n\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_events\"):\n raise PermissionDenied\n\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule = recurrence_rule_form.save()\n else:\n recurrence_rule = None\n\n event = event_form.save(\n region=region, recurrence_rule=recurrence_rule, location=poi\n )\n event_translation = event_translation_form.save(\n event=event, user=request.user\n )\n\n published = event_translation.status == status.PUBLIC\n if not event_instance:\n if published:\n messages.success(\n request, _(\"Event was successfully created and published.\")\n )\n else:\n messages.success(request, _(\"Event was successfully created.\"))\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event.id,\n \"region_slug\": region.slug,\n \"language_code\": language.code,\n }\n )\n if not event_translation_instance:\n if published:\n messages.success(\n request,\n _(\"Event translation was successfully created and published.\"),\n )\n else:\n messages.success(\n request, _(\"Event translation was successfully created.\")\n )\n else:\n if published:\n messages.success(request, _(\"Event was successfully published.\"))\n else:\n messages.success(request, _(\"Event was successfully saved.\"))\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.languages if event_instance else [language],\n },\n )\n", "path": "src/cms/views/events/event_view.py"}]}
| 2,680 | 130 |
gh_patches_debug_126
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-3990
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clearing value of a DatetimePicker
#### Description of expected behavior and the observed behavior
Not sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named "Edit", a DatetimePicker disabled with no default value, and a button named "Submit". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:
- Click "Edit" button, DatetimePicker is enabled so user can select a specific time value.
- Click "Submit" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.
I have tried several ways with no success in clearing the value of the DatetimePicker.
#### Complete, minimal, self-contained example code that reproduces the issue
```
time_widget = pn.widgets.DatetimePicker(disabled=True)
time_widget.value = now()
# how to set value back to None?
time_widget.value = None/pandas.NaT/np.nan => all causes error
```
</issue>
<code>
[start of panel/models/datetime_picker.py]
1 from bokeh.core.enums import CalendarPosition
2 from bokeh.core.properties import (
3 Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,
4 )
5 from bokeh.models.widgets.inputs import InputWidget
6
7
8 class DatetimePicker(InputWidget):
9 ''' Calendar-based date picker widget.
10
11 '''
12
13 value = String(help="""
14 The initial or picked date.
15 """)
16
17 min_date = Nullable(Either(Date, Datetime), help="""
18 Optional earliest allowable date.
19 """)
20
21 max_date = Nullable(Either(Date, Datetime), help="""
22 Optional latest allowable date.
23 """)
24
25 disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
26 A list of dates of ``(start, end)`` date ranges to make unavailable for
27 selection. All other dates will be avalable.
28
29 .. note::
30 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
31 """)
32
33 enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
34 A list of dates of ``(start, end)`` date ranges to make available for
35 selection. All other dates will be unavailable.
36
37 .. note::
38 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
39 """)
40
41 position = Enum(CalendarPosition, default="auto", help="""
42 Where the calendar is rendered relative to the input when ``inline`` is False.
43 """)
44
45 inline = Bool(default=False, help="""
46 Whether the calendar sholud be displayed inline.
47 """)
48
49 enable_time = Bool(default=True)
50
51 enable_seconds = Bool(default=True)
52
53 military_time = Bool(default=True)
54
55 date_format = String("Y-m-d H:i:S")
56
57 mode = String(default="single", help="""
58 Should either be "single" or "range".""")
59
[end of panel/models/datetime_picker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py
--- a/panel/models/datetime_picker.py
+++ b/panel/models/datetime_picker.py
@@ -10,7 +10,7 @@
'''
- value = String(help="""
+ value = Nullable(String, help="""
The initial or picked date.
""")
|
{"golden_diff": "diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py\n--- a/panel/models/datetime_picker.py\n+++ b/panel/models/datetime_picker.py\n@@ -10,7 +10,7 @@\n \n '''\n \n- value = String(help=\"\"\"\n+ value = Nullable(String, help=\"\"\"\n The initial or picked date.\n \"\"\")\n", "issue": "Clearing value of a DatetimePicker\n#### Description of expected behavior and the observed behavior\r\nNot sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named \"Edit\", a DatetimePicker disabled with no default value, and a button named \"Submit\". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:\r\n- Click \"Edit\" button, DatetimePicker is enabled so user can select a specific time value.\r\n- Click \"Submit\" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.\r\n\r\nI have tried several ways with no success in clearing the value of the DatetimePicker.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\ntime_widget = pn.widgets.DatetimePicker(disabled=True)\r\ntime_widget.value = now()\r\n\r\n# how to set value back to None?\r\ntime_widget.value = None/pandas.NaT/np.nan => all causes error\r\n```\r\n\n", "before_files": [{"content": "from bokeh.core.enums import CalendarPosition\nfrom bokeh.core.properties import (\n Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,\n)\nfrom bokeh.models.widgets.inputs import InputWidget\n\n\nclass DatetimePicker(InputWidget):\n ''' Calendar-based date picker widget.\n\n '''\n\n value = String(help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make unavailable for\n selection. All other dates will be avalable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make available for\n selection. All other dates will be unavailable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n position = Enum(CalendarPosition, default=\"auto\", help=\"\"\"\n Where the calendar is rendered relative to the input when ``inline`` is False.\n \"\"\")\n\n inline = Bool(default=False, help=\"\"\"\n Whether the calendar sholud be displayed inline.\n \"\"\")\n\n enable_time = Bool(default=True)\n\n enable_seconds = Bool(default=True)\n\n military_time = Bool(default=True)\n\n date_format = String(\"Y-m-d H:i:S\")\n\n mode = String(default=\"single\", help=\"\"\"\n Should either be \"single\" or \"range\".\"\"\")\n", "path": "panel/models/datetime_picker.py"}]}
| 1,301 | 84 |
gh_patches_debug_8704
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1557
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[regression] lsp_execute does nothing due to empty session
Since this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.
The consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.
</issue>
<code>
[start of plugin/execute_command.py]
1 import sublime
2 from .core.protocol import Error
3 from .core.protocol import ExecuteCommandParams
4 from .core.registry import LspTextCommand
5 from .core.registry import windows
6 from .core.typing import List, Optional, Any
7 from .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier
8
9
10 class LspExecuteCommand(LspTextCommand):
11
12 def run(self,
13 edit: sublime.Edit,
14 command_name: Optional[str] = None,
15 command_args: Optional[List[Any]] = None,
16 session_name: Optional[str] = None,
17 event: Optional[dict] = None) -> None:
18 # Handle VSCode-specific command for triggering AC/sighelp
19 if command_name == "editor.action.triggerSuggest":
20 # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.
21 return sublime.set_timeout(lambda: self.view.run_command("auto_complete"))
22 if command_name == "editor.action.triggerParameterHints":
23
24 def run_async() -> None:
25 listener = windows.listener_for_view(self.view)
26 if listener:
27 listener.do_signature_help_async(manual=False)
28
29 return sublime.set_timeout_async(run_async)
30 session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
31 if session and command_name:
32 if command_args:
33 self._expand_variables(command_args)
34 params = {"command": command_name} # type: ExecuteCommandParams
35 if command_args:
36 params["arguments"] = command_args
37
38 def handle_response(response: Any) -> None:
39 assert command_name
40 if isinstance(response, Error):
41 sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(response)))
42 return
43 msg = "command {} completed".format(command_name)
44 if response:
45 msg += "with response: {}".format(response)
46 window = self.view.window()
47 if window:
48 window.status_message(msg)
49
50 session.execute_command(params, progress=True).then(handle_response)
51
52 def _expand_variables(self, command_args: List[Any]) -> None:
53 region = self.view.sel()[0]
54 for i, arg in enumerate(command_args):
55 if arg in ["$document_id", "${document_id}"]:
56 command_args[i] = text_document_identifier(self.view)
57 if arg in ["$file_uri", "${file_uri}"]:
58 command_args[i] = uri_from_view(self.view)
59 elif arg in ["$selection", "${selection}"]:
60 command_args[i] = self.view.substr(region)
61 elif arg in ["$offset", "${offset}"]:
62 command_args[i] = region.b
63 elif arg in ["$selection_begin", "${selection_begin}"]:
64 command_args[i] = region.begin()
65 elif arg in ["$selection_end", "${selection_end}"]:
66 command_args[i] = region.end()
67 elif arg in ["$position", "${position}"]:
68 command_args[i] = offset_to_point(self.view, region.b).to_lsp()
69 elif arg in ["$range", "${range}"]:
70 command_args[i] = region_to_range(self.view, region).to_lsp()
71
[end of plugin/execute_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/execute_command.py b/plugin/execute_command.py
--- a/plugin/execute_command.py
+++ b/plugin/execute_command.py
@@ -27,7 +27,7 @@
listener.do_signature_help_async(manual=False)
return sublime.set_timeout_async(run_async)
- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
+ session = self.session_by_name(session_name if session_name else self.session_name)
if session and command_name:
if command_args:
self._expand_variables(command_args)
|
{"golden_diff": "diff --git a/plugin/execute_command.py b/plugin/execute_command.py\n--- a/plugin/execute_command.py\n+++ b/plugin/execute_command.py\n@@ -27,7 +27,7 @@\n listener.do_signature_help_async(manual=False)\n \n return sublime.set_timeout_async(run_async)\n- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n+ session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n", "issue": "[regression] lsp_execute does nothing due to empty session\nSince this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.\r\n\r\nThe consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.\r\n\r\n \r\n\n", "before_files": [{"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = command_args\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(self.view)\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n", "path": "plugin/execute_command.py"}]}
| 1,465 | 124 |
gh_patches_debug_12729
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairscale-86
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feat] OSS: Support nvidia's LARC
## 🚀 Feature
Make it possible to support LARC with OSS
## Motivation
LARC is a must have for large batch jobs, right now OSS will break on LARC because of the closure() being passed
## Pitch
Should be doable to gracefully handle optimizers with do not support closures in step()
## Alternatives
Not supporting LARC, reduces a lot of OSS interest
## Additional context
cc @mannatsingh @prigoyal @msbaines
</issue>
<code>
[start of fairscale/optim/oss.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import copy
7 from itertools import chain
8 import logging
9 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type
10
11 import torch
12 import torch.distributed as dist
13 from torch.optim import SGD, Optimizer
14
15 from .utils import broadcast_object, recursive_copy_to_device
16
17 if TYPE_CHECKING: # pragma: no cover
18 from torch.optim.optimizer import _params_t
19 else:
20 _params_t = Any
21
22
23 class OSS(Optimizer):
24 """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
25 optimizer and shards its state as described by ZeRO_.
26 ::
27 opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
28
29 .. _ZeRO: https://arxiv.org/abs/1910.02054
30
31 We use a greedy algorithm to pack a number of parameters
32 at each rank. Each parameter belongs to a single rank and
33 is not divided among rank.
34
35 After each rank completed their parameter update, they broadcast
36 the new version of the parameters to all other ranks to synchronize
37 the parameters for next round forward/backward computation.
38
39 Args:
40 params (list of tensors):
41 parameters to be optimized
42 Keyword Args:
43 optim (torch.nn.Optimizer):
44 optimizer to shard (default: SGD)
45 group (group):
46 torch.distributed group (default: group.WORLD)
47 """
48
49 optim: Optimizer
50 in_super_constructor: bool
51
52 def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):
53 # Hold all the model params in the root .param_groups
54 self.in_super_constructor = True
55 super().__init__(params, defaults)
56 self.in_super_constructor = False
57
58 # Build the wrapped optimizer, responsible for a shard of the params
59 self.group = group
60 self.rank = dist.get_rank(group)
61 split_param_groups = self.partition_parameters()
62 self.optim = optim(split_param_groups[self.rank], **defaults)
63
64 # Optional consolidated optimizer state
65 self._all_states: List[Dict[str, Any]] = []
66
67 # Current device is set by the parameters allocated to this rank
68 self._device = split_param_groups[self.rank][0]["params"][0].device
69
70 # Sync local and global param_groups keys
71 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
72 for k, v in local_group.items():
73 if k != "params":
74 global_group[k] = v
75
76 def partition_parameters(self) -> List[List[dict]]:
77 """Partitions parameters across distributed ranks.
78
79 Returns a list of param_groups (which is a list of dict) where each
80 element of the list contains the param_groups for a rank. Element 0
81 corresponds to rank 0, etc. We need all the ranks for the broadcast
82 inside step().
83 """
84 world_size = dist.get_world_size(self.group)
85 param_groups: List[List] = [list() for _ in range(world_size)]
86 sizes = [0] * world_size
87 for param_group in self.param_groups:
88 param_lists: List[List] = [list() for _ in range(world_size)]
89 for param in param_group["params"]:
90 # Add this param to rank with smallest size.
91 rank = sizes.index(min(sizes))
92 param_lists[rank].append(param)
93 sizes[rank] += param.numel()
94 for rank, params in enumerate(param_lists):
95 param_group_rank = copy.copy(param_group)
96 param_group_rank["params"] = params
97 param_groups[rank].append(param_group_rank)
98 return param_groups
99
100 # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
101 # For example, the apex library contains fused optimizers with a step that supports extra kwargs.
102 def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
103 # Sync oss param_groups attributes in case they've been updated by a scheduler.
104 self._sync_param_groups()
105
106 # Run the optimizer step on this shard only
107 loss = self.optim.step(closure=closure, **kwargs) # type: ignore
108
109 # Sync all the states. Broadcast requests are issued async, we check completeness before moving on
110 requests = []
111 for rank, param_groups in enumerate(self.partition_parameters()):
112 for param_group in param_groups:
113 for param in param_group["params"]:
114 requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))
115
116 _ = list(map(lambda x: x.wait(), requests))
117 return loss
118
119 def local_state_dict(self) -> dict:
120 """ Gets this rank's state_dict. """
121 return self.optim.state_dict()
122
123 def consolidate_state_dict(self, recipient_rank: int = 0) -> None:
124 """ Update the consolidated state_dict list, one per rank.
125
126 This needs to be called on all replicas """
127
128 # Sync lr and other attributes in case its been updated
129 self._sync_param_groups()
130
131 if self.rank == recipient_rank:
132 # Pull the sharded state from all the other replicas
133 # Store all the states in order, rank by rank
134 logging.debug("Pulling the sharded optimizer state from all replicas")
135 self._all_states = self._collect_sharded_states()
136 else:
137 # Acknowledge broadcasts, and send this rank's shard when needed
138 self._broadcast_state_dict()
139
140 def state_dict(self) -> Dict[str, Any]:
141 """
142 Return the last known global optimizer state, which consist of a list of the shards.
143
144 NOTE: This is limited to the replica which was responsible for the consolidation.
145 The state may also not be up to date, depending on when `consolidate_state_dict` was last called.
146 """
147
148 assert (
149 len(self._all_states) > 0
150 ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
151
152 # Flatten the param_groups, save the partition which logs the rank <> shard correspondence
153 partition: List[Tuple[int, int]] = []
154 param_groups: List[Dict[Any, Any]] = []
155
156 start = 0
157 for i, s in enumerate(self._all_states):
158 param_groups.extend(s["param_groups"])
159 end = start + len(s["param_groups"])
160 partition.append((start, end))
161 start = end
162
163 return {
164 "state": [s["state"] for s in self._all_states],
165 "param_groups": param_groups,
166 "partition": partition,
167 }
168
169 def load_local_state_dict(self, state_dict: dict) -> None:
170 """ Loads this rank's state_dict. """
171
172 self.optim.load_state_dict(state_dict)
173
174 # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)
175 # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268
176 groups = self.optim.param_groups
177 saved_groups = state_dict["param_groups"]
178 id_map = {
179 old_id: p
180 for old_id, p in zip(chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)))
181 }
182 for k, v in state_dict["state"].items():
183 if k in id_map:
184 param = id_map[k]
185 self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)
186
187 # Restore the global param_groups (the params themselves are already correct)
188 for global_group, local_group in zip(self.param_groups, groups):
189 for k, v in local_group.items():
190 if k != "params":
191 global_group[k] = v
192
193 def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
194 """ Restore the global parameter groups as well as the shard """
195
196 # Get this optimizer's param_groups shard
197 param_groups = state_dict["param_groups"][
198 state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1]
199 ]
200 # Dispatch this rank's state dictionary to the wrapped shard optimizer
201 self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups})
202
203 def add_param_group(self, param_group: dict) -> None:
204 super().add_param_group(param_group)
205 if not self.in_super_constructor:
206 param_groups = self.partition_parameters()[self.rank]
207 if len(param_groups) == len(self.optim.param_groups) + 1:
208 self.optim.add_param_group(param_groups[-1])
209
210 def _sync_param_groups(self) -> None:
211 """Sync learning rate and other optimizer attributes (needed to support schedulers)."""
212 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
213 for k in local_group.keys():
214 if k != "params":
215 # Params have been sharded and should not be synced here
216 local_group[k] = global_group[k]
217
218 def _collect_sharded_states(self) -> List[Dict[str, Any]]:
219 """
220 Collect all the state shards, in CPU memory.
221 """
222 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
223 all_states: List[Dict[str, Any]] = []
224
225 for rank in range(dist.get_world_size(group=self.group)):
226 if rank == self.rank:
227 logging.debug("Saving self state")
228 all_states.append(
229 recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device("cpu"))
230 )
231
232 # Sync with other replicas
233 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
234 else:
235 # Fetch the optim state from the other replicas
236 logging.debug("Receiving state from rank %s ", rank)
237 replica_state = broadcast_object(
238 empty_buffer, src_rank=rank, group=self.group, dist_device=self._device
239 )
240
241 all_states.append(
242 recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu"))
243 )
244
245 logging.debug("State from rank %s received", rank)
246
247 return all_states
248
249 def _broadcast_state_dict(self) -> None:
250 """
251 Broadcast this rank's state shard, discard others
252 """
253 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
254
255 for rank in range(dist.get_world_size(group=self.group)):
256 if rank == self.rank:
257 # Send the state to the reference replica
258 logging.debug(
259 "Sending the sharded optimizer state to the reference replica from rank %s", rank,
260 )
261 broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)
262 else:
263 # Discard this tensor/rank, broadcast necessary for syncing
264 logging.debug("Discarding broadcast from rank %s", rank)
265 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
266
[end of fairscale/optim/oss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py
--- a/fairscale/optim/oss.py
+++ b/fairscale/optim/oss.py
@@ -103,8 +103,11 @@
# Sync oss param_groups attributes in case they've been updated by a scheduler.
self._sync_param_groups()
- # Run the optimizer step on this shard only
- loss = self.optim.step(closure=closure, **kwargs) # type: ignore
+ # Run the optimizer step on this shard only:
+ if closure is not None:
+ loss = self.optim.step(closure=closure, **kwargs) # type: ignore
+ else:
+ loss = self.optim.step(**kwargs)
# Sync all the states. Broadcast requests are issued async, we check completeness before moving on
requests = []
|
{"golden_diff": "diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py\n--- a/fairscale/optim/oss.py\n+++ b/fairscale/optim/oss.py\n@@ -103,8 +103,11 @@\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n \n- # Run the optimizer step on this shard only\n- loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n+ # Run the optimizer step on this shard only:\n+ if closure is not None:\n+ loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n+ else:\n+ loss = self.optim.step(**kwargs)\n \n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n", "issue": "[feat] OSS: Support nvidia's LARC\n## \ud83d\ude80 Feature\r\nMake it possible to support LARC with OSS\r\n\r\n## Motivation\r\nLARC is a must have for large batch jobs, right now OSS will break on LARC because of the closure() being passed\r\n\r\n## Pitch\r\nShould be doable to gracefully handle optimizers with do not support closures in step()\r\n\r\n## Alternatives\r\nNot supporting LARC, reduces a lot of OSS interest\r\n\r\n## Additional context\r\n\r\ncc @mannatsingh @prigoyal @msbaines \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n\n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n\n _ = list(map(lambda x: x.wait(), requests))\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}]}
| 3,827 | 197 |
gh_patches_debug_34709
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3422
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove ObtainThaliaAuthToken view
### Describe the change
Remove https://github.com/svthalia/concrexit/blob/4b664220b406361245e153433288ffbcf9d6f4a9/website/members/views.py#L35
### Motivation
We don't use it
</issue>
<code>
[start of website/thaliawebsite/api/v1/urls.py]
1 from django.conf import settings
2 from django.urls import include, path
3
4 from rest_framework.schemas import get_schema_view
5
6 from members.views import ObtainThaliaAuthToken
7 from thaliawebsite.api.openapi import OAuthSchemaGenerator
8
9 app_name = "thaliawebsite"
10
11 urlpatterns = [
12 path("token-auth/", ObtainThaliaAuthToken.as_view()),
13 path("", include("activemembers.api.v1.urls")),
14 path("", include("announcements.api.v1.urls")),
15 path("", include("events.api.v1.urls")),
16 path("", include("members.api.v1.urls")),
17 path("", include("partners.api.v1.urls")),
18 path("", include("pizzas.api.v1.urls")),
19 path("", include("photos.api.v1.urls")),
20 path("", include("pushnotifications.api.v1.urls")),
21 path("", include("payments.api.v1.urls")),
22 path(
23 "schema",
24 get_schema_view(
25 title="API v1",
26 version=settings.SOURCE_COMMIT,
27 url="/api/v1/",
28 urlconf="thaliawebsite.api.v1.urls",
29 generator_class=OAuthSchemaGenerator,
30 ),
31 name="schema",
32 ),
33 ]
34
[end of website/thaliawebsite/api/v1/urls.py]
[start of website/members/views.py]
1 """Views provided by the members package."""
2 import json
3 from datetime import date, datetime
4
5 from django.contrib.auth.decorators import login_required
6 from django.contrib.messages.views import SuccessMessageMixin
7 from django.db.models import Q, QuerySet
8 from django.http import Http404, HttpResponse
9 from django.shortcuts import get_object_or_404
10 from django.template.response import TemplateResponse
11 from django.urls import reverse_lazy
12 from django.utils.decorators import method_decorator
13 from django.utils.translation import gettext_lazy as _
14 from django.views.generic import CreateView, DetailView, UpdateView
15 from django.views.generic.base import TemplateResponseMixin, TemplateView, View
16
17 from rest_framework.authtoken.models import Token
18 from rest_framework.authtoken.views import ObtainAuthToken
19 from rest_framework.response import Response
20
21 import activemembers.services as activemembers_services
22 import events.services as event_services
23 import pizzas.services
24 from members import emails, services
25 from members.decorators import membership_required
26 from members.models import EmailChange, Member, Membership, Profile
27 from thaliawebsite.views import PagedView
28 from utils.media.services import fetch_thumbnails_db
29 from utils.snippets import datetime_to_lectureyear
30
31 from . import models
32 from .forms import ProfileForm
33 from .services import member_achievements, member_societies
34
35
36 class ObtainThaliaAuthToken(ObtainAuthToken):
37 """Custom override of the AuthToken view to force lowercase the username."""
38
39 def post(self, request, *args, **kwargs) -> HttpResponse:
40 serializer = self.serializer_class(
41 data={
42 "username": request.data.get("username").lower()
43 if "username" in request.data
44 else None,
45 "password": request.data.get("password"),
46 },
47 context={"request": request},
48 )
49
50 if not serializer.is_valid():
51 return Response({"error": "Unauthorized"}, status=401)
52
53 user = serializer.validated_data["user"]
54 token, _ = Token.objects.get_or_create(user=user)
55 return Response({"token": token.key})
56
57
58 @method_decorator(login_required, "dispatch")
59 @method_decorator(membership_required, "dispatch")
60 class MembersIndex(PagedView):
61 """View that renders the members overview."""
62
63 model = Member
64 paginate_by = 28
65 template_name = "members/index.html"
66 context_object_name = "members"
67 keywords = None
68 query_filter = ""
69 year_range = []
70
71 def setup(self, request, *args, **kwargs) -> None:
72 super().setup(request, *args, **kwargs)
73 current_lectureyear = datetime_to_lectureyear(date.today())
74 self.year_range = list(
75 reversed(range(current_lectureyear - 5, current_lectureyear + 1))
76 )
77 self.keywords = request.GET.get("keywords", "").split() or None
78 self.query_filter = kwargs.get("filter", None)
79
80 def get_queryset(self) -> QuerySet:
81 memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
82 members_query = ~Q(id=None)
83
84 if self.query_filter and self.query_filter.isdigit():
85 members_query &= Q(profile__starting_year=int(self.query_filter))
86 memberships_query &= Q(type=Membership.MEMBER)
87 elif self.query_filter == "older":
88 members_query &= Q(profile__starting_year__lt=self.year_range[-1])
89 memberships_query &= Q(type=Membership.MEMBER)
90 elif self.query_filter == "former":
91 # Filter out all current active memberships
92 memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
93 memberships = Membership.objects.filter(memberships_query)
94 members_query &= ~Q(pk__in=memberships.values("user__pk"))
95 # Members_query contains users that are not currently (honorary)member
96 elif self.query_filter == "benefactors":
97 memberships_query &= Q(type=Membership.BENEFACTOR)
98 elif self.query_filter == "honorary":
99 memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)
100 memberships_query &= Q(type=Membership.HONORARY)
101
102 if self.keywords:
103 for key in self.keywords:
104 # Works because relevant options all have `nick` in their key
105 members_query &= (
106 (
107 Q(profile__nickname__icontains=key)
108 & Q(profile__display_name_preference__contains="nick")
109 )
110 | Q(first_name__icontains=key)
111 | Q(last_name__icontains=key)
112 | Q(username__icontains=key)
113 )
114
115 if self.query_filter == "former":
116 memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
117 memberships = Membership.objects.filter(memberships_query)
118 all_memberships = Membership.objects.all()
119 # Only keep members that were once members, or are legacy users
120 # that do not have any memberships at all
121 members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
122 pk__in=all_memberships.values("user__pk")
123 )
124 else:
125 memberships = Membership.objects.filter(memberships_query)
126 members_query &= Q(pk__in=memberships.values("user__pk"))
127 members = (
128 Member.objects.filter(members_query)
129 .order_by("first_name")
130 .select_related("profile")
131 )
132 return members
133
134 def get_context_data(self, **kwargs) -> dict:
135 context = super().get_context_data(**kwargs)
136
137 context.update(
138 {
139 "filter": self.query_filter,
140 "year_range": self.year_range,
141 "keywords": self.keywords,
142 }
143 )
144
145 fetch_thumbnails_db(
146 [x.profile.photo for x in context["object_list"] if x.profile.photo]
147 )
148
149 return context
150
151
152 @method_decorator(login_required, "dispatch")
153 class ProfileDetailView(DetailView):
154 """View that renders a member's profile."""
155
156 context_object_name = "member"
157 model = Member
158 template_name = "members/user/profile.html"
159
160 def setup(self, request, *args, **kwargs) -> None:
161 if "pk" not in kwargs and request.member:
162 kwargs["pk"] = request.member.pk
163 super().setup(request, *args, **kwargs)
164
165 def get_context_data(self, **kwargs) -> dict:
166 context = super().get_context_data(**kwargs)
167 member = context["member"]
168
169 achievements = member_achievements(member)
170 societies = member_societies(member)
171
172 membership = member.current_membership
173 membership_type = _("Unknown membership history")
174 if membership:
175 membership_type = membership.get_type_display()
176 elif member.has_been_honorary_member():
177 membership_type = _("Former honorary member")
178 elif member.has_been_member():
179 membership_type = _("Former member")
180 elif member.latest_membership:
181 membership_type = _("Former benefactor")
182
183 context.update(
184 {
185 "achievements": achievements,
186 "societies": societies,
187 "membership_type": membership_type,
188 }
189 )
190
191 return context
192
193
194 @method_decorator(login_required, "dispatch")
195 class UserProfileUpdateView(SuccessMessageMixin, UpdateView):
196 """View that allows a user to update their profile."""
197
198 template_name = "members/user/edit_profile.html"
199 model = Profile
200 form_class = ProfileForm
201 success_url = reverse_lazy("members:edit-profile")
202 success_message = _("Your profile has been updated successfully.")
203
204 def get_object(self, queryset=None) -> Profile:
205 return get_object_or_404(models.Profile, user=self.request.user)
206
207
208 @method_decorator(login_required, "dispatch")
209 class StatisticsView(TemplateView):
210 """View that renders the statistics page."""
211
212 template_name = "members/statistics.html"
213
214 def get_context_data(self, **kwargs) -> dict:
215 context = super().get_context_data(**kwargs)
216 context.update(
217 {
218 "total_members": models.Member.current_members.count(),
219 "cohort_sizes": json.dumps(services.gen_stats_year()),
220 "member_type_distribution": json.dumps(
221 services.gen_stats_member_type()
222 ),
223 "committee_sizes": json.dumps(
224 activemembers_services.generate_statistics()
225 ),
226 "event_categories": json.dumps(
227 event_services.generate_category_statistics()
228 ),
229 "total_pizza_orders": json.dumps(
230 pizzas.services.gen_stats_pizza_orders()
231 ),
232 "active_members": json.dumps(services.gen_stats_active_members()),
233 }
234 )
235
236 return context
237
238
239 @method_decorator(login_required, name="dispatch")
240 class EmailChangeFormView(CreateView):
241 """View that renders the email change form."""
242
243 model = EmailChange
244 fields = ["email", "member"]
245 template_name = "members/user/email_change.html"
246
247 def get_initial(self) -> dict:
248 initial = super().get_initial()
249 initial["email"] = self.request.member.email
250 return initial
251
252 def post(self, request, *args, **kwargs) -> HttpResponse:
253 request.POST = request.POST.dict()
254 request.POST["member"] = request.member.pk
255 return super().post(request, *args, **kwargs)
256
257 def form_valid(self, form) -> HttpResponse:
258 change_request = form.save()
259 emails.send_email_change_confirmation_messages(change_request)
260 return TemplateResponse(
261 request=self.request, template="members/user/email_change_requested.html"
262 )
263
264
265 @method_decorator(login_required, name="dispatch")
266 class EmailChangeConfirmView(View, TemplateResponseMixin):
267 """View that renders an HTML template and confirms the old email address."""
268
269 template_name = "members/user/email_change_confirmed.html"
270
271 def get(self, request, *args, **kwargs) -> HttpResponse:
272 if not EmailChange.objects.filter(confirm_key=kwargs["key"]).exists():
273 raise Http404
274
275 change_request = EmailChange.objects.get(confirm_key=kwargs["key"])
276
277 services.confirm_email_change(change_request)
278
279 return self.render_to_response({})
280
281
282 @method_decorator(login_required, name="dispatch")
283 class EmailChangeVerifyView(View, TemplateResponseMixin):
284 """View that renders an HTML template and verifies the new email address."""
285
286 template_name = "members/user/email_change_verified.html"
287
288 def get(self, request, *args, **kwargs) -> HttpResponse:
289 if not EmailChange.objects.filter(verify_key=kwargs["key"]).exists():
290 raise Http404
291
292 change_request = EmailChange.objects.get(verify_key=kwargs["key"])
293
294 services.verify_email_change(change_request)
295
296 return self.render_to_response({})
297
[end of website/members/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/members/views.py b/website/members/views.py
--- a/website/members/views.py
+++ b/website/members/views.py
@@ -14,10 +14,6 @@
from django.views.generic import CreateView, DetailView, UpdateView
from django.views.generic.base import TemplateResponseMixin, TemplateView, View
-from rest_framework.authtoken.models import Token
-from rest_framework.authtoken.views import ObtainAuthToken
-from rest_framework.response import Response
-
import activemembers.services as activemembers_services
import events.services as event_services
import pizzas.services
@@ -33,28 +29,6 @@
from .services import member_achievements, member_societies
-class ObtainThaliaAuthToken(ObtainAuthToken):
- """Custom override of the AuthToken view to force lowercase the username."""
-
- def post(self, request, *args, **kwargs) -> HttpResponse:
- serializer = self.serializer_class(
- data={
- "username": request.data.get("username").lower()
- if "username" in request.data
- else None,
- "password": request.data.get("password"),
- },
- context={"request": request},
- )
-
- if not serializer.is_valid():
- return Response({"error": "Unauthorized"}, status=401)
-
- user = serializer.validated_data["user"]
- token, _ = Token.objects.get_or_create(user=user)
- return Response({"token": token.key})
-
-
@method_decorator(login_required, "dispatch")
@method_decorator(membership_required, "dispatch")
class MembersIndex(PagedView):
diff --git a/website/thaliawebsite/api/v1/urls.py b/website/thaliawebsite/api/v1/urls.py
--- a/website/thaliawebsite/api/v1/urls.py
+++ b/website/thaliawebsite/api/v1/urls.py
@@ -3,13 +3,11 @@
from rest_framework.schemas import get_schema_view
-from members.views import ObtainThaliaAuthToken
from thaliawebsite.api.openapi import OAuthSchemaGenerator
app_name = "thaliawebsite"
urlpatterns = [
- path("token-auth/", ObtainThaliaAuthToken.as_view()),
path("", include("activemembers.api.v1.urls")),
path("", include("announcements.api.v1.urls")),
path("", include("events.api.v1.urls")),
|
{"golden_diff": "diff --git a/website/members/views.py b/website/members/views.py\n--- a/website/members/views.py\n+++ b/website/members/views.py\n@@ -14,10 +14,6 @@\n from django.views.generic import CreateView, DetailView, UpdateView\n from django.views.generic.base import TemplateResponseMixin, TemplateView, View\n \n-from rest_framework.authtoken.models import Token\n-from rest_framework.authtoken.views import ObtainAuthToken\n-from rest_framework.response import Response\n-\n import activemembers.services as activemembers_services\n import events.services as event_services\n import pizzas.services\n@@ -33,28 +29,6 @@\n from .services import member_achievements, member_societies\n \n \n-class ObtainThaliaAuthToken(ObtainAuthToken):\n- \"\"\"Custom override of the AuthToken view to force lowercase the username.\"\"\"\n-\n- def post(self, request, *args, **kwargs) -> HttpResponse:\n- serializer = self.serializer_class(\n- data={\n- \"username\": request.data.get(\"username\").lower()\n- if \"username\" in request.data\n- else None,\n- \"password\": request.data.get(\"password\"),\n- },\n- context={\"request\": request},\n- )\n-\n- if not serializer.is_valid():\n- return Response({\"error\": \"Unauthorized\"}, status=401)\n-\n- user = serializer.validated_data[\"user\"]\n- token, _ = Token.objects.get_or_create(user=user)\n- return Response({\"token\": token.key})\n-\n-\n @method_decorator(login_required, \"dispatch\")\n @method_decorator(membership_required, \"dispatch\")\n class MembersIndex(PagedView):\ndiff --git a/website/thaliawebsite/api/v1/urls.py b/website/thaliawebsite/api/v1/urls.py\n--- a/website/thaliawebsite/api/v1/urls.py\n+++ b/website/thaliawebsite/api/v1/urls.py\n@@ -3,13 +3,11 @@\n \n from rest_framework.schemas import get_schema_view\n \n-from members.views import ObtainThaliaAuthToken\n from thaliawebsite.api.openapi import OAuthSchemaGenerator\n \n app_name = \"thaliawebsite\"\n \n urlpatterns = [\n- path(\"token-auth/\", ObtainThaliaAuthToken.as_view()),\n path(\"\", include(\"activemembers.api.v1.urls\")),\n path(\"\", include(\"announcements.api.v1.urls\")),\n path(\"\", include(\"events.api.v1.urls\")),\n", "issue": "Remove ObtainThaliaAuthToken view\n### Describe the change\r\nRemove https://github.com/svthalia/concrexit/blob/4b664220b406361245e153433288ffbcf9d6f4a9/website/members/views.py#L35\r\n\r\n\r\n### Motivation\r\nWe don't use it \r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.urls import include, path\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom members.views import ObtainThaliaAuthToken\nfrom thaliawebsite.api.openapi import OAuthSchemaGenerator\n\napp_name = \"thaliawebsite\"\n\nurlpatterns = [\n path(\"token-auth/\", ObtainThaliaAuthToken.as_view()),\n path(\"\", include(\"activemembers.api.v1.urls\")),\n path(\"\", include(\"announcements.api.v1.urls\")),\n path(\"\", include(\"events.api.v1.urls\")),\n path(\"\", include(\"members.api.v1.urls\")),\n path(\"\", include(\"partners.api.v1.urls\")),\n path(\"\", include(\"pizzas.api.v1.urls\")),\n path(\"\", include(\"photos.api.v1.urls\")),\n path(\"\", include(\"pushnotifications.api.v1.urls\")),\n path(\"\", include(\"payments.api.v1.urls\")),\n path(\n \"schema\",\n get_schema_view(\n title=\"API v1\",\n version=settings.SOURCE_COMMIT,\n url=\"/api/v1/\",\n urlconf=\"thaliawebsite.api.v1.urls\",\n generator_class=OAuthSchemaGenerator,\n ),\n name=\"schema\",\n ),\n]\n", "path": "website/thaliawebsite/api/v1/urls.py"}, {"content": "\"\"\"Views provided by the members package.\"\"\"\nimport json\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import Q, QuerySet\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView, View\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.response import Response\n\nimport activemembers.services as activemembers_services\nimport events.services as event_services\nimport pizzas.services\nfrom members import emails, services\nfrom members.decorators import membership_required\nfrom members.models import EmailChange, Member, Membership, Profile\nfrom thaliawebsite.views import PagedView\nfrom utils.media.services import fetch_thumbnails_db\nfrom utils.snippets import datetime_to_lectureyear\n\nfrom . import models\nfrom .forms import ProfileForm\nfrom .services import member_achievements, member_societies\n\n\nclass ObtainThaliaAuthToken(ObtainAuthToken):\n \"\"\"Custom override of the AuthToken view to force lowercase the username.\"\"\"\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n serializer = self.serializer_class(\n data={\n \"username\": request.data.get(\"username\").lower()\n if \"username\" in request.data\n else None,\n \"password\": request.data.get(\"password\"),\n },\n context={\"request\": request},\n )\n\n if not serializer.is_valid():\n return Response({\"error\": \"Unauthorized\"}, status=401)\n\n user = serializer.validated_data[\"user\"]\n token, _ = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key})\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass MembersIndex(PagedView):\n \"\"\"View that renders the members overview.\"\"\"\n\n model = Member\n paginate_by = 28\n template_name = \"members/index.html\"\n context_object_name = \"members\"\n keywords = None\n query_filter = \"\"\n year_range = []\n\n def setup(self, request, *args, **kwargs) -> None:\n super().setup(request, *args, **kwargs)\n current_lectureyear = datetime_to_lectureyear(date.today())\n self.year_range = list(\n reversed(range(current_lectureyear - 5, current_lectureyear + 1))\n )\n self.keywords = request.GET.get(\"keywords\", \"\").split() or None\n self.query_filter = kwargs.get(\"filter\", None)\n\n def get_queryset(self) -> QuerySet:\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n if self.query_filter and self.query_filter.isdigit():\n members_query &= Q(profile__starting_year=int(self.query_filter))\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"older\":\n members_query &= Q(profile__starting_year__lt=self.year_range[-1])\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"former\":\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n # Members_query contains users that are not currently (honorary)member\n elif self.query_filter == \"benefactors\":\n memberships_query &= Q(type=Membership.BENEFACTOR)\n elif self.query_filter == \"honorary\":\n memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)\n memberships_query &= Q(type=Membership.HONORARY)\n\n if self.keywords:\n for key in self.keywords:\n # Works because relevant options all have `nick` in their key\n members_query &= (\n (\n Q(profile__nickname__icontains=key)\n & Q(profile__display_name_preference__contains=\"nick\")\n )\n | Q(first_name__icontains=key)\n | Q(last_name__icontains=key)\n | Q(username__icontains=key)\n )\n\n if self.query_filter == \"former\":\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n else:\n memberships = Membership.objects.filter(memberships_query)\n members_query &= Q(pk__in=memberships.values(\"user__pk\"))\n members = (\n Member.objects.filter(members_query)\n .order_by(\"first_name\")\n .select_related(\"profile\")\n )\n return members\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"filter\": self.query_filter,\n \"year_range\": self.year_range,\n \"keywords\": self.keywords,\n }\n )\n\n fetch_thumbnails_db(\n [x.profile.photo for x in context[\"object_list\"] if x.profile.photo]\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass ProfileDetailView(DetailView):\n \"\"\"View that renders a member's profile.\"\"\"\n\n context_object_name = \"member\"\n model = Member\n template_name = \"members/user/profile.html\"\n\n def setup(self, request, *args, **kwargs) -> None:\n if \"pk\" not in kwargs and request.member:\n kwargs[\"pk\"] = request.member.pk\n super().setup(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n member = context[\"member\"]\n\n achievements = member_achievements(member)\n societies = member_societies(member)\n\n membership = member.current_membership\n membership_type = _(\"Unknown membership history\")\n if membership:\n membership_type = membership.get_type_display()\n elif member.has_been_honorary_member():\n membership_type = _(\"Former honorary member\")\n elif member.has_been_member():\n membership_type = _(\"Former member\")\n elif member.latest_membership:\n membership_type = _(\"Former benefactor\")\n\n context.update(\n {\n \"achievements\": achievements,\n \"societies\": societies,\n \"membership_type\": membership_type,\n }\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserProfileUpdateView(SuccessMessageMixin, UpdateView):\n \"\"\"View that allows a user to update their profile.\"\"\"\n\n template_name = \"members/user/edit_profile.html\"\n model = Profile\n form_class = ProfileForm\n success_url = reverse_lazy(\"members:edit-profile\")\n success_message = _(\"Your profile has been updated successfully.\")\n\n def get_object(self, queryset=None) -> Profile:\n return get_object_or_404(models.Profile, user=self.request.user)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass StatisticsView(TemplateView):\n \"\"\"View that renders the statistics page.\"\"\"\n\n template_name = \"members/statistics.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"total_members\": models.Member.current_members.count(),\n \"cohort_sizes\": json.dumps(services.gen_stats_year()),\n \"member_type_distribution\": json.dumps(\n services.gen_stats_member_type()\n ),\n \"committee_sizes\": json.dumps(\n activemembers_services.generate_statistics()\n ),\n \"event_categories\": json.dumps(\n event_services.generate_category_statistics()\n ),\n \"total_pizza_orders\": json.dumps(\n pizzas.services.gen_stats_pizza_orders()\n ),\n \"active_members\": json.dumps(services.gen_stats_active_members()),\n }\n )\n\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeFormView(CreateView):\n \"\"\"View that renders the email change form.\"\"\"\n\n model = EmailChange\n fields = [\"email\", \"member\"]\n template_name = \"members/user/email_change.html\"\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"email\"] = self.request.member.email\n return initial\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form) -> HttpResponse:\n change_request = form.save()\n emails.send_email_change_confirmation_messages(change_request)\n return TemplateResponse(\n request=self.request, template=\"members/user/email_change_requested.html\"\n )\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeConfirmView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the old email address.\"\"\"\n\n template_name = \"members/user/email_change_confirmed.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(confirm_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(confirm_key=kwargs[\"key\"])\n\n services.confirm_email_change(change_request)\n\n return self.render_to_response({})\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeVerifyView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and verifies the new email address.\"\"\"\n\n template_name = \"members/user/email_change_verified.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(verify_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(verify_key=kwargs[\"key\"])\n\n services.verify_email_change(change_request)\n\n return self.render_to_response({})\n", "path": "website/members/views.py"}]}
| 3,980 | 523 |
gh_patches_debug_38287
|
rasdani/github-patches
|
git_diff
|
microsoft__MLOS-358
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
flake8 and/or prettier setup for devcontainer
Per comments in #354 and #340
We should "just" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.
</issue>
<code>
[start of mlos_core/setup.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_core package.
7 """
8
9 from itertools import chain
10 from logging import warning
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 'emukit': ['emukit'],
30 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
31 }
32
33 # construct special 'full' extra that adds requirements for all built-in
34 # backend integrations and additional extra features.
35 extra_requires['full'] = list(set(chain(*extra_requires.values())))
36
37 extra_requires['full-tests'] = extra_requires['full'] + [
38 'pytest',
39 'pytest-forked',
40 'pytest-xdist',
41 'pytest-cov',
42 'pytest-local-badge',
43 ]
44
45 # pylint: disable=duplicate-code
46 MODULE_BASE_NAME = 'mlos_core'
47 setup(
48 name='mlos-core',
49 version=_VERSION,
50 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
51 package_data={
52 '': ['py.typed', '**/*.pyi'],
53 },
54 install_requires=[
55 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
56 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
57 'scipy>=1.3.2',
58 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
59 'pandas>=1.0.3',
60 'ConfigSpace>=0.6.1',
61 ],
62 extras_require=extra_requires,
63 author='Microsoft',
64 author_email='mlos-maintainers@service.microsoft.com',
65 description=('MLOS Core Python interface for parameter optimization.'),
66 license='MIT',
67 keywords='',
68 url='https://aka.ms/mlos-core',
69 python_requires='>=3.8',
70 )
71
[end of mlos_core/setup.py]
[start of mlos_bench/setup.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_bench package.
7 """
8
9 from logging import warning
10 from itertools import chain
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 # Additional tools for extra functionality.
30 'azure': ['azure-storage-file-share'],
31 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
32 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],
33 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],
34 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.
35 # Transitive extra_requires from mlos-core.
36 'emukit': ['emukit'],
37 'skopt': ['scikit-optimize'],
38 }
39
40 # construct special 'full' extra that adds requirements for all built-in
41 # backend integrations and additional extra features.
42 extra_requires['full'] = list(set(chain(*extra_requires.values())))
43
44 extra_requires['full-tests'] = extra_requires['full'] + [
45 'pytest',
46 'pytest-forked',
47 'pytest-xdist',
48 'pytest-cov',
49 'pytest-local-badge',
50 ]
51
52 # pylint: disable=duplicate-code
53 MODULE_BASE_NAME = 'mlos_bench'
54 setup(
55 name='mlos-bench',
56 version=_VERSION,
57 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
58 package_data={
59 '': ['py.typed', '**/*.pyi'],
60 'mlos_bench': [
61 'config/**/*.md',
62 'config/**/*.jsonc',
63 'config/**/*.json',
64 'config/**/*.py',
65 'config/**/*.sh',
66 'config/**/*.cmd',
67 'config/**/*.ps1',
68 ],
69 },
70 entry_points={
71 'console_scripts': [
72 'mlos_bench = mlos_bench.run:_main',
73 ],
74 },
75 install_requires=[
76 'mlos-core==' + _VERSION,
77 'requests',
78 'json5',
79 'jsonschema',
80 'importlib_resources;python_version<"3.10"',
81 ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
82 extras_require=extra_requires,
83 author='Microsoft',
84 author_email='mlos-maintainers@service.microsoft.com',
85 description=('MLOS Bench Python interface for benchmark automation and optimization.'),
86 license='MIT',
87 keywords='',
88 url='https://aka.ms/mlos-core',
89 python_requires='>=3.8',
90 )
91
[end of mlos_bench/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py
--- a/mlos_bench/setup.py
+++ b/mlos_bench/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
# Additional tools for extra functionality.
'azure': ['azure-storage-file-share'],
'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
diff --git a/mlos_core/setup.py b/mlos_core/setup.py
--- a/mlos_core/setup.py
+++ b/mlos_core/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
'emukit': ['emukit'],
'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
}
@@ -52,10 +52,10 @@
'': ['py.typed', '**/*.pyi'],
},
install_requires=[
- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
'scipy>=1.3.2',
- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
'pandas>=1.0.3',
'ConfigSpace>=0.6.1',
],
|
{"golden_diff": "diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py\n--- a/mlos_bench/setup.py\n+++ b/mlos_bench/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\ndiff --git a/mlos_core/setup.py b/mlos_core/setup.py\n--- a/mlos_core/setup.py\n+++ b/mlos_core/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n }\n@@ -52,10 +52,10 @@\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n", "issue": "flake8 and/or prettier setup for devcontainer\nPer comments in #354 and #340 \r\nWe should \"just\" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_core package.\n\"\"\"\n\nfrom itertools import chain\nfrom logging import warning\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_core'\nsetup(\n name='mlos-core',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n extras_require=extra_requires,\n author='Microsoft',\n author_email='mlos-maintainers@service.microsoft.com',\n description=('MLOS Core Python interface for parameter optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_core/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='mlos-maintainers@service.microsoft.com',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}]}
| 2,213 | 744 |
gh_patches_debug_13487
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-2143
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imagefolder none descriptive error message
## 🐛 Bug
You get an weird python error:
`TypeError: can only join an iterable`
If you make an Imagefolder dataset with no samples in the directory and a is_valid_file. Because the extensions variable is set to none because is_valid_file which causes the TypeError.
## To Reproduce
Steps to reproduce the behavior:
1. Use Imagefolder in torchvision.datasets with no samples in the directory and a is_valid_file function.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
is_type = lambda x: x.startswith(name)
print(dir_in)
data_set = datasets.ImageFolder(root=dir_in,
transform=transforms.Compose([transforms.CenterCrop(IMG_SIZE),
# transforms.Resize(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)]),
is_valid_file=is_type)
```
## Expected behavior
Should display an RunTimeError saying that no samples were found.
## Environment
Please copy and paste the output from our
[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)
(or fill out the checklist below manually).
PyTorch version: 1.5.0
Is debug build: No
CUDA used to build PyTorch: 10.2
OS: Manjaro Linux
GCC version: (Arch Linux 9.3.0-1) 9.3.0
CMake version: version 3.17.1
Python version: 3.8
Is CUDA available: Yes
CUDA runtime version: 10.0.130
GPU models and configuration: GPU 0: Quadro M1200
Nvidia driver version: 440.82
cuDNN version: /usr/lib/libcudnn.so.7.6.5
Versions of relevant libraries:
[pip3] numpy==1.18.3
[pip3] torch==1.5.0
[pip3] torch-utils==0.1.2
[pip3] torchvision==0.5.0
[conda] Could not collect
</issue>
<code>
[start of torchvision/datasets/folder.py]
1 from .vision import VisionDataset
2
3 from PIL import Image
4
5 import os
6 import os.path
7
8
9 def has_file_allowed_extension(filename, extensions):
10 """Checks if a file is an allowed extension.
11
12 Args:
13 filename (string): path to a file
14 extensions (tuple of strings): extensions to consider (lowercase)
15
16 Returns:
17 bool: True if the filename ends with one of given extensions
18 """
19 return filename.lower().endswith(extensions)
20
21
22 def is_image_file(filename):
23 """Checks if a file is an allowed image extension.
24
25 Args:
26 filename (string): path to a file
27
28 Returns:
29 bool: True if the filename ends with a known image extension
30 """
31 return has_file_allowed_extension(filename, IMG_EXTENSIONS)
32
33
34 def make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):
35 instances = []
36 directory = os.path.expanduser(directory)
37 both_none = extensions is None and is_valid_file is None
38 both_something = extensions is not None and is_valid_file is not None
39 if both_none or both_something:
40 raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
41 if extensions is not None:
42 def is_valid_file(x):
43 return has_file_allowed_extension(x, extensions)
44 for target_class in sorted(class_to_idx.keys()):
45 class_index = class_to_idx[target_class]
46 target_dir = os.path.join(directory, target_class)
47 if not os.path.isdir(target_dir):
48 continue
49 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
50 for fname in sorted(fnames):
51 path = os.path.join(root, fname)
52 if is_valid_file(path):
53 item = path, class_index
54 instances.append(item)
55 return instances
56
57
58 class DatasetFolder(VisionDataset):
59 """A generic data loader where the samples are arranged in this way: ::
60
61 root/class_x/xxx.ext
62 root/class_x/xxy.ext
63 root/class_x/xxz.ext
64
65 root/class_y/123.ext
66 root/class_y/nsdf3.ext
67 root/class_y/asd932_.ext
68
69 Args:
70 root (string): Root directory path.
71 loader (callable): A function to load a sample given its path.
72 extensions (tuple[string]): A list of allowed extensions.
73 both extensions and is_valid_file should not be passed.
74 transform (callable, optional): A function/transform that takes in
75 a sample and returns a transformed version.
76 E.g, ``transforms.RandomCrop`` for images.
77 target_transform (callable, optional): A function/transform that takes
78 in the target and transforms it.
79 is_valid_file (callable, optional): A function that takes path of a file
80 and check if the file is a valid file (used to check of corrupt files)
81 both extensions and is_valid_file should not be passed.
82
83 Attributes:
84 classes (list): List of the class names sorted alphabetically.
85 class_to_idx (dict): Dict with items (class_name, class_index).
86 samples (list): List of (sample path, class_index) tuples
87 targets (list): The class_index value for each image in the dataset
88 """
89
90 def __init__(self, root, loader, extensions=None, transform=None,
91 target_transform=None, is_valid_file=None):
92 super(DatasetFolder, self).__init__(root, transform=transform,
93 target_transform=target_transform)
94 classes, class_to_idx = self._find_classes(self.root)
95 samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
96 if len(samples) == 0:
97 raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
98 "Supported extensions are: " + ",".join(extensions)))
99
100 self.loader = loader
101 self.extensions = extensions
102
103 self.classes = classes
104 self.class_to_idx = class_to_idx
105 self.samples = samples
106 self.targets = [s[1] for s in samples]
107
108 def _find_classes(self, dir):
109 """
110 Finds the class folders in a dataset.
111
112 Args:
113 dir (string): Root directory path.
114
115 Returns:
116 tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
117
118 Ensures:
119 No class is a subdirectory of another.
120 """
121 classes = [d.name for d in os.scandir(dir) if d.is_dir()]
122 classes.sort()
123 class_to_idx = {classes[i]: i for i in range(len(classes))}
124 return classes, class_to_idx
125
126 def __getitem__(self, index):
127 """
128 Args:
129 index (int): Index
130
131 Returns:
132 tuple: (sample, target) where target is class_index of the target class.
133 """
134 path, target = self.samples[index]
135 sample = self.loader(path)
136 if self.transform is not None:
137 sample = self.transform(sample)
138 if self.target_transform is not None:
139 target = self.target_transform(target)
140
141 return sample, target
142
143 def __len__(self):
144 return len(self.samples)
145
146
147 IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
148
149
150 def pil_loader(path):
151 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
152 with open(path, 'rb') as f:
153 img = Image.open(f)
154 return img.convert('RGB')
155
156
157 def accimage_loader(path):
158 import accimage
159 try:
160 return accimage.Image(path)
161 except IOError:
162 # Potentially a decoding problem, fall back to PIL.Image
163 return pil_loader(path)
164
165
166 def default_loader(path):
167 from torchvision import get_image_backend
168 if get_image_backend() == 'accimage':
169 return accimage_loader(path)
170 else:
171 return pil_loader(path)
172
173
174 class ImageFolder(DatasetFolder):
175 """A generic data loader where the images are arranged in this way: ::
176
177 root/dog/xxx.png
178 root/dog/xxy.png
179 root/dog/xxz.png
180
181 root/cat/123.png
182 root/cat/nsdf3.png
183 root/cat/asd932_.png
184
185 Args:
186 root (string): Root directory path.
187 transform (callable, optional): A function/transform that takes in an PIL image
188 and returns a transformed version. E.g, ``transforms.RandomCrop``
189 target_transform (callable, optional): A function/transform that takes in the
190 target and transforms it.
191 loader (callable, optional): A function to load an image given its path.
192 is_valid_file (callable, optional): A function that takes path of an Image file
193 and check if the file is a valid file (used to check of corrupt files)
194
195 Attributes:
196 classes (list): List of the class names sorted alphabetically.
197 class_to_idx (dict): Dict with items (class_name, class_index).
198 imgs (list): List of (image path, class_index) tuples
199 """
200
201 def __init__(self, root, transform=None, target_transform=None,
202 loader=default_loader, is_valid_file=None):
203 super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
204 transform=transform,
205 target_transform=target_transform,
206 is_valid_file=is_valid_file)
207 self.imgs = self.samples
208
[end of torchvision/datasets/folder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -94,8 +94,10 @@
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
- raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
- "Supported extensions are: " + ",".join(extensions)))
+ msg = "Found 0 files in subfolders of: {}\n".format(self.root)
+ if extensions is not None:
+ msg += "Supported extensions are: {}".format(",".join(extensions))
+ raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
|
{"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -94,8 +94,10 @@\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n- raise (RuntimeError(\"Found 0 files in subfolders of: \" + self.root + \"\\n\"\n- \"Supported extensions are: \" + \",\".join(extensions)))\n+ msg = \"Found 0 files in subfolders of: {}\\n\".format(self.root)\n+ if extensions is not None:\n+ msg += \"Supported extensions are: {}\".format(\",\".join(extensions))\n+ raise RuntimeError(msg)\n \n self.loader = loader\n self.extensions = extensions\n", "issue": "Imagefolder none descriptive error message\n## \ud83d\udc1b Bug\r\n\r\nYou get an weird python error:\r\n`TypeError: can only join an iterable`\r\n\r\nIf you make an Imagefolder dataset with no samples in the directory and a is_valid_file. Because the extensions variable is set to none because is_valid_file which causes the TypeError.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Use Imagefolder in torchvision.datasets with no samples in the directory and a is_valid_file function.\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\n is_type = lambda x: x.startswith(name)\r\n print(dir_in)\r\n data_set = datasets.ImageFolder(root=dir_in,\r\n transform=transforms.Compose([transforms.CenterCrop(IMG_SIZE),\r\n # transforms.Resize(IMAGE_SIZE),\r\n transforms.ToTensor(),\r\n transforms.Normalize(0.5, 0.5)]),\r\n is_valid_file=is_type)\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould display an RunTimeError saying that no samples were found.\r\n\r\n## Environment\r\n\r\nPlease copy and paste the output from our\r\n[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)\r\n(or fill out the checklist below manually).\r\nPyTorch version: 1.5.0\r\nIs debug build: No\r\nCUDA used to build PyTorch: 10.2\r\n\r\nOS: Manjaro Linux\r\nGCC version: (Arch Linux 9.3.0-1) 9.3.0\r\nCMake version: version 3.17.1\r\n\r\nPython version: 3.8\r\nIs CUDA available: Yes\r\nCUDA runtime version: 10.0.130\r\nGPU models and configuration: GPU 0: Quadro M1200\r\nNvidia driver version: 440.82\r\ncuDNN version: /usr/lib/libcudnn.so.7.6.5\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.18.3\r\n[pip3] torch==1.5.0\r\n[pip3] torch-utils==0.1.2\r\n[pip3] torchvision==0.5.0\r\n[conda] Could not collect\n", "before_files": [{"content": "from .vision import VisionDataset\n\nfrom PIL import Image\n\nimport os\nimport os.path\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return filename.lower().endswith(extensions)\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):\n instances = []\n directory = os.path.expanduser(directory)\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n def is_valid_file(x):\n return has_file_allowed_extension(x, extensions)\n for target_class in sorted(class_to_idx.keys()):\n class_index = class_to_idx[target_class]\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n item = path, class_index\n instances.append(item)\n return instances\n\n\nclass DatasetFolder(VisionDataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (tuple[string]): A list of allowed extensions.\n both extensions and is_valid_file should not be passed.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n is_valid_file (callable, optional): A function that takes path of a file\n and check if the file is a valid file (used to check of corrupt files)\n both extensions and is_valid_file should not be passed.\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, loader, extensions=None, transform=None,\n target_transform=None, is_valid_file=None):\n super(DatasetFolder, self).__init__(root, transform=transform,\n target_transform=target_transform)\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n raise (RuntimeError(\"Found 0 files in subfolders of: \" + self.root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(extensions)))\n\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples]\n\n def _find_classes(self, dir):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n def __len__(self):\n return len(self.samples)\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, is_valid_file=None):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform,\n is_valid_file=is_valid_file)\n self.imgs = self.samples\n", "path": "torchvision/datasets/folder.py"}]}
| 3,166 | 202 |
gh_patches_debug_13934
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1813
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: Provide command `reopen-closed-sheet` like Chrome's Ctrl-Shift-T
It often happens that I close a sheet by mistake and immediately curse because I've already done a bunch of manipulations on this tab that I'll now have to repeat. (I know I can guard a sheet but that doesn't fit my workflow.)
I wish VisiData had a feature like Chrome's Ctrl-Shift-T. Basically a command that just opens that sheet that was closed most recently, with all the manipulations that I've done to it.
Of course, this command is challenging because it means VisiData has to keep this information around. Maybe limit it to last 3 sheets.
</issue>
<code>
[start of visidata/_open.py]
1 import os
2 import os.path
3
4 from visidata import VisiData, vd, Path, BaseSheet, TableSheet, TextSheet, SettableColumn
5
6
7 vd.option('filetype', '', 'specify file type', replay=True)
8
9
10 @VisiData.api
11 def inputFilename(vd, prompt, *args, **kwargs):
12 return vd.input(prompt, type="filename", *args, completer=_completeFilename, **kwargs).strip()
13
14
15 @VisiData.api
16 def inputPath(vd, *args, **kwargs):
17 return Path(vd.inputFilename(*args, **kwargs))
18
19
20 def _completeFilename(val, state):
21 i = val.rfind('/')
22 if i < 0: # no /
23 base = ''
24 partial = val
25 elif i == 0: # root /
26 base = '/'
27 partial = val[1:]
28 else:
29 base = val[:i]
30 partial = val[i+1:]
31
32 files = []
33 for f in os.listdir(Path(base or '.')):
34 if f.startswith(partial):
35 files.append(os.path.join(base, f))
36
37 files.sort()
38 return files[state%len(files)]
39
40 @VisiData.api
41 def guessFiletype(vd, p):
42 '''Call all vd.guess_<filetype>(p) functions and return best candidate sheet based on file contents.'''
43
44 guessfuncs = [getattr(vd, x) for x in dir(vd) if x.startswith('guess_')]
45 filetypes = []
46 for f in guessfuncs:
47 try:
48 filetype = f(p)
49 if filetype:
50 filetype['_guesser'] = f.__name__
51 filetypes.append(filetype)
52 except FileNotFoundError:
53 pass
54 except Exception as e:
55 vd.debug(f'{f.__name__}: {e}')
56
57 if filetypes:
58 return sorted(filetypes, key=lambda r: -r.get('_likelihood', 1))[0]
59
60
61 @VisiData.api
62 def guess_extension(vd, path):
63 # try auto-detect from extension
64 ext = path.suffix[1:].lower()
65 openfunc = getattr(vd, f'open_{ext}', vd.getGlobals().get(f'open_{ext}'))
66 if openfunc:
67 return dict(filetype=ext, _likelihood=3)
68
69
70 @VisiData.api
71 def openPath(vd, p, filetype=None, create=False):
72 '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.
73 If True, *create* will return a new, blank **Sheet** if file does not exist.'''
74 if p.scheme and not p.has_fp():
75 schemes = p.scheme.split('+')
76 openfuncname = 'openurl_' + schemes[-1]
77
78 openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)
79 if not openfunc:
80 vd.fail(f'no loader for url scheme: {p.scheme}')
81
82 return openfunc(p, filetype=filetype)
83
84 if not p.exists() and not create:
85 return None
86
87 if not filetype:
88 filetype = p.ext or vd.options.filetype
89
90 filetype = filetype.lower()
91
92 if not p.exists():
93 if not create:
94 return None
95 newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))
96 if not newfunc:
97 vd.warning('%s does not exist, creating new sheet' % p)
98 return vd.newSheet(p.name, 1, source=p)
99
100 vd.status('creating blank %s' % (p.given))
101 return newfunc(p)
102
103 openfuncname = 'open_' + filetype
104 openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))
105 if not openfunc:
106 opts = vd.guessFiletype(p)
107 if opts and 'filetype' in opts:
108 filetype = opts['filetype']
109 openfuncname = 'open_' + filetype
110 openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))
111 if not openfunc:
112 vd.error(f'guessed {filetype} but no {openfuncname}')
113
114 vs = openfunc(p)
115 for k, v in opts.items():
116 if k != 'filetype' and not k.startswith('_'):
117 setattr(vs.options, k, v)
118 vd.warning('guessed "%s" filetype based on contents' % opts['filetype'])
119 return vs
120
121 vd.warning('unknown "%s" filetype' % filetype)
122
123 filetype = 'txt'
124 openfunc = vd.open_txt
125
126 vd.status('opening %s as %s' % (p.given, filetype))
127
128 return openfunc(p)
129
130
131 @VisiData.api
132 def openSource(vd, p, filetype=None, create=False, **kwargs):
133 '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or "-" for stdin).
134 when true, *create* will return a blank sheet, if file does not exist.'''
135
136 if isinstance(p, BaseSheet):
137 return p
138
139 filetype = filetype or vd.options.getonly('filetype', str(p), '') #1710
140 filetype = filetype or vd.options.getonly('filetype', 'global', '')
141
142 vs = None
143 if isinstance(p, str):
144 if '://' in p:
145 vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse
146 elif p == '-':
147 vs = vd.openPath(vd.stdinSource, filetype=filetype)
148 else:
149 vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse
150 else:
151 vs = vd.openPath(p, filetype=filetype, create=create)
152
153 for optname, optval in kwargs.items():
154 vs.options[optname] = optval
155
156 return vs
157
158
159 #### enable external addons
160 @VisiData.api
161 def open_txt(vd, p):
162 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'
163 if p.exists(): #1611
164 with p.open_text(encoding=vd.options.encoding) as fp:
165 delimiter = vd.options.delimiter
166 try:
167 if delimiter and delimiter in next(fp): # peek at the first line
168 return vd.open_tsv(p) # TSV often have .txt extension
169 except StopIteration:
170 return TableSheet(p.name, columns=[SettableColumn()], source=p)
171 return TextSheet(p.name, source=p)
172
173
174 @VisiData.api
175 def loadInternalSheet(vd, cls, p, **kwargs):
176 'Load internal sheet of given class.'
177 vs = cls(p.name, source=p, **kwargs)
178 vd.options._set('encoding', 'utf8', vs)
179 if p.exists():
180 # vd.sheets.insert(0, vs) # broke replay with macros.reload()
181 vs.reload.__wrapped__(vs)
182 # vd.sheets.pop(0)
183 return vs
184
185
186 BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
187 TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f"file {cursorDisplay} does not exist"))', 'Open file or URL from path in current cell')
188
189 vd.addMenuItems('''
190 File > Open file/url > open-file
191 ''')
192
[end of visidata/_open.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/visidata/_open.py b/visidata/_open.py
--- a/visidata/_open.py
+++ b/visidata/_open.py
@@ -185,7 +185,9 @@
BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f"file {cursorDisplay} does not exist"))', 'Open file or URL from path in current cell')
+BaseSheet.addCommand('gU', 'undo-last-quit', 'push(allSheets[-1])', 'reopen most recently closed sheet')
vd.addMenuItems('''
File > Open file/url > open-file
+ File > Reopen last closed > undo-last-quit
''')
|
{"golden_diff": "diff --git a/visidata/_open.py b/visidata/_open.py\n--- a/visidata/_open.py\n+++ b/visidata/_open.py\n@@ -185,7 +185,9 @@\n \n BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\n TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n+BaseSheet.addCommand('gU', 'undo-last-quit', 'push(allSheets[-1])', 'reopen most recently closed sheet')\n \n vd.addMenuItems('''\n File > Open file/url > open-file\n+ File > Reopen last closed > undo-last-quit\n ''')\n", "issue": "Feature request: Provide command `reopen-closed-sheet` like Chrome's Ctrl-Shift-T\nIt often happens that I close a sheet by mistake and immediately curse because I've already done a bunch of manipulations on this tab that I'll now have to repeat. (I know I can guard a sheet but that doesn't fit my workflow.)\r\n\r\nI wish VisiData had a feature like Chrome's Ctrl-Shift-T. Basically a command that just opens that sheet that was closed most recently, with all the manipulations that I've done to it. \r\n\r\nOf course, this command is challenging because it means VisiData has to keep this information around. Maybe limit it to last 3 sheets.\n", "before_files": [{"content": "import os\nimport os.path\n\nfrom visidata import VisiData, vd, Path, BaseSheet, TableSheet, TextSheet, SettableColumn\n\n\nvd.option('filetype', '', 'specify file type', replay=True)\n\n\n@VisiData.api\ndef inputFilename(vd, prompt, *args, **kwargs):\n return vd.input(prompt, type=\"filename\", *args, completer=_completeFilename, **kwargs).strip()\n\n\n@VisiData.api\ndef inputPath(vd, *args, **kwargs):\n return Path(vd.inputFilename(*args, **kwargs))\n\n\ndef _completeFilename(val, state):\n i = val.rfind('/')\n if i < 0: # no /\n base = ''\n partial = val\n elif i == 0: # root /\n base = '/'\n partial = val[1:]\n else:\n base = val[:i]\n partial = val[i+1:]\n\n files = []\n for f in os.listdir(Path(base or '.')):\n if f.startswith(partial):\n files.append(os.path.join(base, f))\n\n files.sort()\n return files[state%len(files)]\n\n@VisiData.api\ndef guessFiletype(vd, p):\n '''Call all vd.guess_<filetype>(p) functions and return best candidate sheet based on file contents.'''\n\n guessfuncs = [getattr(vd, x) for x in dir(vd) if x.startswith('guess_')]\n filetypes = []\n for f in guessfuncs:\n try:\n filetype = f(p)\n if filetype:\n filetype['_guesser'] = f.__name__\n filetypes.append(filetype)\n except FileNotFoundError:\n pass\n except Exception as e:\n vd.debug(f'{f.__name__}: {e}')\n\n if filetypes:\n return sorted(filetypes, key=lambda r: -r.get('_likelihood', 1))[0]\n\n\n@VisiData.api\ndef guess_extension(vd, path):\n # try auto-detect from extension\n ext = path.suffix[1:].lower()\n openfunc = getattr(vd, f'open_{ext}', vd.getGlobals().get(f'open_{ext}'))\n if openfunc:\n return dict(filetype=ext, _likelihood=3)\n\n\n@VisiData.api\ndef openPath(vd, p, filetype=None, create=False):\n '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.\n If True, *create* will return a new, blank **Sheet** if file does not exist.'''\n if p.scheme and not p.has_fp():\n schemes = p.scheme.split('+')\n openfuncname = 'openurl_' + schemes[-1]\n\n openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)\n if not openfunc:\n vd.fail(f'no loader for url scheme: {p.scheme}')\n\n return openfunc(p, filetype=filetype)\n\n if not p.exists() and not create:\n return None\n\n if not filetype:\n filetype = p.ext or vd.options.filetype\n\n filetype = filetype.lower()\n\n if not p.exists():\n if not create:\n return None\n newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))\n if not newfunc:\n vd.warning('%s does not exist, creating new sheet' % p)\n return vd.newSheet(p.name, 1, source=p)\n\n vd.status('creating blank %s' % (p.given))\n return newfunc(p)\n\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n opts = vd.guessFiletype(p)\n if opts and 'filetype' in opts:\n filetype = opts['filetype']\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n vd.error(f'guessed {filetype} but no {openfuncname}')\n\n vs = openfunc(p)\n for k, v in opts.items():\n if k != 'filetype' and not k.startswith('_'):\n setattr(vs.options, k, v)\n vd.warning('guessed \"%s\" filetype based on contents' % opts['filetype'])\n return vs\n\n vd.warning('unknown \"%s\" filetype' % filetype)\n\n filetype = 'txt'\n openfunc = vd.open_txt\n\n vd.status('opening %s as %s' % (p.given, filetype))\n\n return openfunc(p)\n\n\n@VisiData.api\ndef openSource(vd, p, filetype=None, create=False, **kwargs):\n '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or \"-\" for stdin).\n when true, *create* will return a blank sheet, if file does not exist.'''\n\n if isinstance(p, BaseSheet):\n return p\n\n filetype = filetype or vd.options.getonly('filetype', str(p), '') #1710\n filetype = filetype or vd.options.getonly('filetype', 'global', '')\n\n vs = None\n if isinstance(p, str):\n if '://' in p:\n vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse\n elif p == '-':\n vs = vd.openPath(vd.stdinSource, filetype=filetype)\n else:\n vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse\n else:\n vs = vd.openPath(p, filetype=filetype, create=create)\n\n for optname, optval in kwargs.items():\n vs.options[optname] = optval\n\n return vs\n\n\n#### enable external addons\n@VisiData.api\ndef open_txt(vd, p):\n 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'\n if p.exists(): #1611\n with p.open_text(encoding=vd.options.encoding) as fp:\n delimiter = vd.options.delimiter\n try:\n if delimiter and delimiter in next(fp): # peek at the first line\n return vd.open_tsv(p) # TSV often have .txt extension\n except StopIteration:\n return TableSheet(p.name, columns=[SettableColumn()], source=p)\n return TextSheet(p.name, source=p)\n\n\n@VisiData.api\ndef loadInternalSheet(vd, cls, p, **kwargs):\n 'Load internal sheet of given class.'\n vs = cls(p.name, source=p, **kwargs)\n vd.options._set('encoding', 'utf8', vs)\n if p.exists():\n# vd.sheets.insert(0, vs) # broke replay with macros.reload()\n vs.reload.__wrapped__(vs)\n# vd.sheets.pop(0)\n return vs\n\n\nBaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\nTableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n\nvd.addMenuItems('''\n File > Open file/url > open-file\n''')\n", "path": "visidata/_open.py"}]}
| 2,816 | 192 |
gh_patches_debug_31882
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-191
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enrollment page is accessible without passing eligibility verification
If a user knows the `/enrollment` endpoint, and has configured their session with a Transit Agency, they can navigate to the credit card enrollment page and completely bypass the EV check. This is a serious bug!
## Steps to reproduce
1. Visit https://test-benefits.calitp.org
2. Click either of the Agency buttons
3. Manually update the URL to https://test-benefits.calitp.org/enrollment
4. Profit
## Remediation
This page must ensure the user has previously verified eligibility criteria
</issue>
<code>
[start of benefits/core/middleware.py]
1 """
2 The core application: middleware definitions for request/response cycle.
3 """
4 import logging
5
6 from django.http import HttpResponse
7 from django.utils.decorators import decorator_from_middleware
8 from django.utils.deprecation import MiddlewareMixin
9 from django.views import i18n
10
11 from benefits.settings import DEBUG
12 from . import analytics, session
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class AgencySessionRequired(MiddlewareMixin):
19 """Middleware raises an exception for sessions lacking an agency configuration."""
20
21 def process_request(self, request):
22 if session.active_agency(request):
23 logger.debug("Session configured with agency")
24 return None
25 else:
26 raise AttributeError("Session not configured with agency")
27
28
29 class DebugSession(MiddlewareMixin):
30 """Middleware to configure debug context in the request session."""
31
32 def process_request(self, request):
33 session.update(request, debug=DEBUG)
34 return None
35
36
37 class Healthcheck:
38 """Middleware intercepts and accepts /healthcheck requests."""
39
40 def __init__(self, get_response):
41 self.get_response = get_response
42
43 def __call__(self, request):
44 if request.path == "/healthcheck":
45 return HttpResponse("Healthy", content_type="text/plain")
46 return self.get_response(request)
47
48
49 class ViewedPageEvent(MiddlewareMixin):
50 """Middleware sends an analytics event for page views."""
51
52 def process_response(self, request, response):
53 event = analytics.ViewedPageEvent(request)
54 try:
55 analytics.send_event(event)
56 except Exception:
57 logger.warning(f"Failed to send event: {event}")
58 finally:
59 return response
60
61
62 pageview_decorator = decorator_from_middleware(ViewedPageEvent)
63
64
65 class ChangedLanguageEvent(MiddlewareMixin):
66 """Middleware hooks into django.views.i18n.set_language to send an analytics event."""
67
68 def process_view(self, request, view_func, view_args, view_kwargs):
69 if view_func == i18n.set_language:
70 new_lang = request.POST["language"]
71 event = analytics.ChangedLanguageEvent(request, new_lang)
72 analytics.send_event(event)
73 return None
74
[end of benefits/core/middleware.py]
[start of benefits/enrollment/views.py]
1 """
2 The enrollment application: view definitions for the benefits enrollment flow.
3 """
4 import logging
5
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.decorators import decorator_from_middleware
9 from django.utils.translation import pgettext, ugettext as _
10
11 from benefits.core import middleware, models, session, viewmodels
12 from benefits.core.views import PageTemplateResponse
13 from . import api, forms
14
15
16 logger = logging.getLogger(__name__)
17
18
19 def _check_access_token(request, agency):
20 """
21 Ensure the request's session is configured with an access token.
22 """
23 if not session.valid_token(request):
24 response = api.Client(agency).access_token()
25 session.update(request, token=response.access_token, token_exp=response.expiry)
26
27
28 def _index(request):
29 """Helper handles GET requests to enrollment index."""
30 agency = session.agency(request)
31
32 _check_access_token(request, agency)
33
34 tokenize_button = "tokenize_card"
35 tokenize_retry_form = forms.CardTokenizeFailForm("enrollment:retry")
36 tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix="")
37
38 page = viewmodels.Page(
39 title=_("enrollment.index.title"),
40 content_title=_("enrollment.index.content_title"),
41 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
42 paragraphs=[_("enrollment.index.p1"), _("enrollment.index.p2")],
43 classes="text-lg-center",
44 forms=[tokenize_retry_form, tokenize_success_form],
45 buttons=[
46 viewmodels.Button.primary(
47 text=_("enrollment.buttons.paymentpartner"), id=tokenize_button, url=f"#{tokenize_button}"
48 ),
49 viewmodels.Button.link(
50 classes="btn-sm", text=_("enrollment.buttons.paymentoptions"), url=reverse("core:payment_options")
51 ),
52 ],
53 )
54 context = {}
55 context.update(page.context_dict())
56
57 # add agency details
58 agency_vm = viewmodels.TransitAgency(agency)
59 context.update(agency_vm.context_dict())
60
61 # and payment processor details
62 processor_vm = viewmodels.PaymentProcessor(
63 model=agency.payment_processor,
64 access_token=session.token(request),
65 element_id=f"#{tokenize_button}",
66 color="#046b99",
67 name=f"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}",
68 )
69 context.update(processor_vm.context_dict())
70 logger.warn(f"card_tokenize_url: {context['payment_processor'].card_tokenize_url}")
71
72 # the tokenize form URLs are injected to page-generated Javascript
73 context["forms"] = {
74 "tokenize_retry": reverse(tokenize_retry_form.action_url),
75 "tokenize_success": reverse(tokenize_success_form.action_url),
76 }
77
78 return TemplateResponse(request, "enrollment/index.html", context)
79
80
81 @decorator_from_middleware(middleware.AgencySessionRequired)
82 def index(request):
83 """View handler for the enrollment landing page."""
84 if request.method == "POST":
85 response = _enroll(request)
86 else:
87 response = _index(request)
88
89 return response
90
91
92 def _enroll(request):
93 """Helper calls the enrollment APIs."""
94 logger.debug("Read tokenized card")
95 form = forms.CardTokenizeSuccessForm(request.POST)
96 if not form.is_valid():
97 raise Exception("Invalid card token form")
98 card_token = form.cleaned_data.get("card_token")
99
100 eligibility = session.eligibility(request)
101 if eligibility:
102 logger.debug(f"Session contains an {models.EligibilityType.__name__}")
103 else:
104 raise Exception("Session contains no eligibility information")
105
106 agency = session.agency(request)
107
108 response = api.Client(agency).enroll(card_token, eligibility.group_id)
109
110 if response.success:
111 return success(request)
112 else:
113 raise Exception("Updated customer_id does not match enrolled customer_id")
114
115
116 @decorator_from_middleware(middleware.AgencySessionRequired)
117 def retry(request):
118 """View handler for a recoverable failure condition."""
119 if request.method == "POST":
120 form = forms.CardTokenizeFailForm(request.POST)
121 if form.is_valid():
122 agency = session.agency(request)
123 page = viewmodels.Page(
124 title=_("enrollment.retry.title"),
125 icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")),
126 content_title=_("enrollment.retry.title"),
127 paragraphs=[_("enrollment.retry.p1")],
128 buttons=viewmodels.Button.agency_contact_links(agency),
129 )
130 page.buttons.append(viewmodels.Button.primary(text=_("enrollment.retry.button"), url=session.origin(request)))
131 return PageTemplateResponse(request, page)
132 else:
133 raise Exception("Invalid retry submission.")
134 else:
135 raise Exception("This view method only supports POST.")
136
137
138 @middleware.pageview_decorator
139 def success(request):
140 """View handler for the final success page."""
141 request.path = "/enrollment/success"
142
143 page = viewmodels.Page(
144 title=_("enrollment.success.title"),
145 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
146 content_title=_("enrollment.success.title"),
147 paragraphs=[_("enrollment.success.p1"), _("enrollment.success.p2")],
148 )
149
150 return TemplateResponse(request, "enrollment/success.html", page.context_dict())
151
[end of benefits/enrollment/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/core/middleware.py b/benefits/core/middleware.py
--- a/benefits/core/middleware.py
+++ b/benefits/core/middleware.py
@@ -26,6 +26,17 @@
raise AttributeError("Session not configured with agency")
+class EligibleSessionRequired(MiddlewareMixin):
+ """Middleware raises an exception for sessions lacking confirmed eligibility."""
+
+ def process_request(self, request):
+ if session.eligible(request):
+ logger.debug("Session has confirmed eligibility")
+ return None
+ else:
+ raise AttributeError("Session has no confirmed eligibility")
+
+
class DebugSession(MiddlewareMixin):
"""Middleware to configure debug context in the request session."""
diff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py
--- a/benefits/enrollment/views.py
+++ b/benefits/enrollment/views.py
@@ -78,17 +78,6 @@
return TemplateResponse(request, "enrollment/index.html", context)
-@decorator_from_middleware(middleware.AgencySessionRequired)
-def index(request):
- """View handler for the enrollment landing page."""
- if request.method == "POST":
- response = _enroll(request)
- else:
- response = _index(request)
-
- return response
-
-
def _enroll(request):
"""Helper calls the enrollment APIs."""
logger.debug("Read tokenized card")
@@ -113,7 +102,18 @@
raise Exception("Updated customer_id does not match enrolled customer_id")
-@decorator_from_middleware(middleware.AgencySessionRequired)
+@decorator_from_middleware(middleware.EligibleSessionRequired)
+def index(request):
+ """View handler for the enrollment landing page."""
+ if request.method == "POST":
+ response = _enroll(request)
+ else:
+ response = _index(request)
+
+ return response
+
+
+@decorator_from_middleware(middleware.EligibleSessionRequired)
def retry(request):
"""View handler for a recoverable failure condition."""
if request.method == "POST":
|
{"golden_diff": "diff --git a/benefits/core/middleware.py b/benefits/core/middleware.py\n--- a/benefits/core/middleware.py\n+++ b/benefits/core/middleware.py\n@@ -26,6 +26,17 @@\n raise AttributeError(\"Session not configured with agency\")\n \n \n+class EligibleSessionRequired(MiddlewareMixin):\n+ \"\"\"Middleware raises an exception for sessions lacking confirmed eligibility.\"\"\"\n+\n+ def process_request(self, request):\n+ if session.eligible(request):\n+ logger.debug(\"Session has confirmed eligibility\")\n+ return None\n+ else:\n+ raise AttributeError(\"Session has no confirmed eligibility\")\n+\n+\n class DebugSession(MiddlewareMixin):\n \"\"\"Middleware to configure debug context in the request session.\"\"\"\n \ndiff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py\n--- a/benefits/enrollment/views.py\n+++ b/benefits/enrollment/views.py\n@@ -78,17 +78,6 @@\n return TemplateResponse(request, \"enrollment/index.html\", context)\n \n \n-@decorator_from_middleware(middleware.AgencySessionRequired)\n-def index(request):\n- \"\"\"View handler for the enrollment landing page.\"\"\"\n- if request.method == \"POST\":\n- response = _enroll(request)\n- else:\n- response = _index(request)\n-\n- return response\n-\n-\n def _enroll(request):\n \"\"\"Helper calls the enrollment APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n@@ -113,7 +102,18 @@\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n \n \n-@decorator_from_middleware(middleware.AgencySessionRequired)\n+@decorator_from_middleware(middleware.EligibleSessionRequired)\n+def index(request):\n+ \"\"\"View handler for the enrollment landing page.\"\"\"\n+ if request.method == \"POST\":\n+ response = _enroll(request)\n+ else:\n+ response = _index(request)\n+\n+ return response\n+\n+\n+@decorator_from_middleware(middleware.EligibleSessionRequired)\n def retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n", "issue": "Enrollment page is accessible without passing eligibility verification\nIf a user knows the `/enrollment` endpoint, and has configured their session with a Transit Agency, they can navigate to the credit card enrollment page and completely bypass the EV check. This is a serious bug! \r\n\r\n## Steps to reproduce\r\n\r\n1. Visit https://test-benefits.calitp.org\r\n2. Click either of the Agency buttons\r\n3. Manually update the URL to https://test-benefits.calitp.org/enrollment\r\n4. Profit\r\n\r\n## Remediation\r\n\r\nThis page must ensure the user has previously verified eligibility criteria\n", "before_files": [{"content": "\"\"\"\nThe core application: middleware definitions for request/response cycle.\n\"\"\"\nimport logging\n\nfrom django.http import HttpResponse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.views import i18n\n\nfrom benefits.settings import DEBUG\nfrom . import analytics, session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AgencySessionRequired(MiddlewareMixin):\n \"\"\"Middleware raises an exception for sessions lacking an agency configuration.\"\"\"\n\n def process_request(self, request):\n if session.active_agency(request):\n logger.debug(\"Session configured with agency\")\n return None\n else:\n raise AttributeError(\"Session not configured with agency\")\n\n\nclass DebugSession(MiddlewareMixin):\n \"\"\"Middleware to configure debug context in the request session.\"\"\"\n\n def process_request(self, request):\n session.update(request, debug=DEBUG)\n return None\n\n\nclass Healthcheck:\n \"\"\"Middleware intercepts and accepts /healthcheck requests.\"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.path == \"/healthcheck\":\n return HttpResponse(\"Healthy\", content_type=\"text/plain\")\n return self.get_response(request)\n\n\nclass ViewedPageEvent(MiddlewareMixin):\n \"\"\"Middleware sends an analytics event for page views.\"\"\"\n\n def process_response(self, request, response):\n event = analytics.ViewedPageEvent(request)\n try:\n analytics.send_event(event)\n except Exception:\n logger.warning(f\"Failed to send event: {event}\")\n finally:\n return response\n\n\npageview_decorator = decorator_from_middleware(ViewedPageEvent)\n\n\nclass ChangedLanguageEvent(MiddlewareMixin):\n \"\"\"Middleware hooks into django.views.i18n.set_language to send an analytics event.\"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func == i18n.set_language:\n new_lang = request.POST[\"language\"]\n event = analytics.ChangedLanguageEvent(request, new_lang)\n analytics.send_event(event)\n return None\n", "path": "benefits/core/middleware.py"}, {"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, ugettext as _\n\nfrom benefits.core import middleware, models, session, viewmodels\nfrom benefits.core.views import PageTemplateResponse\nfrom . import api, forms\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_access_token(request, agency):\n \"\"\"\n Ensure the request's session is configured with an access token.\n \"\"\"\n if not session.valid_token(request):\n response = api.Client(agency).access_token()\n session.update(request, token=response.access_token, token_exp=response.expiry)\n\n\ndef _index(request):\n \"\"\"Helper handles GET requests to enrollment index.\"\"\"\n agency = session.agency(request)\n\n _check_access_token(request, agency)\n\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(\"enrollment:retry\")\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"enrollment.index.title\"),\n content_title=_(\"enrollment.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"enrollment.index.p1\"), _(\"enrollment.index.p2\")],\n classes=\"text-lg-center\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.paymentpartner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n viewmodels.Button.link(\n classes=\"btn-sm\", text=_(\"enrollment.buttons.paymentoptions\"), url=reverse(\"core:payment_options\")\n ),\n ],\n )\n context = {}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token=session.token(request),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warn(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, \"enrollment/index.html\", context)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n if request.method == \"POST\":\n response = _enroll(request)\n else:\n response = _index(request)\n\n return response\n\n\ndef _enroll(request):\n \"\"\"Helper calls the enrollment APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n eligibility = session.eligibility(request)\n if eligibility:\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n else:\n raise Exception(\"Session contains no eligibility information\")\n\n agency = session.agency(request)\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n\n if response.success:\n return success(request)\n else:\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n title=_(\"enrollment.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"enrollment.retry.title\"),\n paragraphs=[_(\"enrollment.retry.p1\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"enrollment.retry.button\"), url=session.origin(request)))\n return PageTemplateResponse(request, page)\n else:\n raise Exception(\"Invalid retry submission.\")\n else:\n raise Exception(\"This view method only supports POST.\")\n\n\n@middleware.pageview_decorator\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n\n page = viewmodels.Page(\n title=_(\"enrollment.success.title\"),\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n content_title=_(\"enrollment.success.title\"),\n paragraphs=[_(\"enrollment.success.p1\"), _(\"enrollment.success.p2\")],\n )\n\n return TemplateResponse(request, \"enrollment/success.html\", page.context_dict())\n", "path": "benefits/enrollment/views.py"}]}
| 2,732 | 467 |
gh_patches_debug_20127
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-591
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sign In Failed - TypeError
Good evening!
I'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message
> **Sign In Failed**
> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
Now when I attempt to go back to 1.0.4 I get
> **Sign In Failed**
> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.
No big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!
</issue>
<code>
[start of rotkehlchen/db/settings.py]
1 from typing import Any, Dict, NamedTuple, Union
2
3 from rotkehlchen.constants.assets import S_USD
4 from rotkehlchen.constants.timing import YEAR_IN_SECONDS
5 from rotkehlchen.db.utils import str_to_bool
6 from rotkehlchen.errors import DeserializationError
7 from rotkehlchen.typing import FiatAsset, Timestamp
8 from rotkehlchen.user_messages import MessagesAggregator
9
10 ROTKEHLCHEN_DB_VERSION = 8
11 DEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS
12 DEFAULT_INCLUDE_CRYPTO2CRYPTO = True
13 DEFAULT_INCLUDE_GAS_COSTS = True
14 DEFAULT_ANONYMIZED_LOGS = False
15 DEFAULT_PREMIUM_SHOULD_SYNC = False
16 DEFAULT_START_DATE = '01/08/2015'
17 DEFAULT_UI_FLOATING_PRECISION = 2
18 DEFAULT_BALANCE_SAVE_FREQUENCY = 24
19 DEFAULT_MAIN_CURRENCY = S_USD
20 DEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'
21 DEFAULT_SUBMIT_USAGE_ANALYTICS = True
22
23
24 class DBSettings(NamedTuple):
25 version: int = ROTKEHLCHEN_DB_VERSION
26 last_write_ts: Timestamp = Timestamp(0)
27 premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC
28 include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO
29 anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS
30 last_data_upload_ts: Timestamp = Timestamp(0)
31 ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION
32 taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD
33 balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY
34 include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS
35 historical_data_start: str = DEFAULT_START_DATE
36 eth_rpc_endpoint: str = 'http://localhost:8545'
37 main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY
38 date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT
39 last_balance_save: Timestamp = Timestamp(0)
40 submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS
41
42
43 def read_boolean(value: Union[str, bool]) -> bool:
44 if isinstance(value, bool):
45 return value
46 elif isinstance(value, str):
47 return str_to_bool(value)
48
49 raise DeserializationError(
50 f'Failed to read a boolean from {value} which is of type {type(value)}',
51 )
52
53
54 def db_settings_from_dict(
55 settings_dict: Dict[str, Any],
56 msg_aggregator: MessagesAggregator,
57 ) -> DBSettings:
58 specified_args: Dict[str, Any] = {}
59 for key, value in settings_dict.items():
60 if key == 'version':
61 specified_args[key] = int(value)
62 elif key == 'historical_data_start':
63 specified_args[key] = str(value)
64 elif key == 'eth_rpc_endpoint':
65 specified_args[key] = str(value)
66 elif key == 'ui_floating_precision':
67 specified_args[key] = int(value)
68 elif key == 'include_crypto2crypto':
69 specified_args[key] = read_boolean(value)
70 elif key == 'taxfree_after_period':
71 specified_args[key] = int(value)
72 elif key == 'balance_save_frequency':
73 specified_args[key] = int(value)
74 elif key == 'main_currency':
75 specified_args[key] = FiatAsset(str(value))
76 elif key == 'anonymized_logs':
77 specified_args[key] = read_boolean(value)
78 elif key == 'include_gas_costs':
79 specified_args[key] = read_boolean(value)
80 elif key == 'date_display_format':
81 specified_args[key] = str(value)
82 elif key == 'premium_should_sync':
83 specified_args[key] = read_boolean(value)
84 elif key == 'last_write_ts':
85 specified_args[key] = Timestamp(int(value))
86 elif key == 'last_data_upload_ts':
87 specified_args[key] = Timestamp(int(value))
88 elif key == 'last_balance_save':
89 specified_args[key] = Timestamp(int(value))
90 elif key == 'submit_usage_analytics':
91 specified_args[key] = read_boolean(value)
92 else:
93 msg_aggregator.add_warning(
94 f'Unknown DB setting {key} given. Ignoring it. Should not '
95 f'happen so please open an issue in Github.',
96 )
97
98 return DBSettings(**specified_args)
99
[end of rotkehlchen/db/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py
--- a/rotkehlchen/db/settings.py
+++ b/rotkehlchen/db/settings.py
@@ -68,7 +68,23 @@
elif key == 'include_crypto2crypto':
specified_args[key] = read_boolean(value)
elif key == 'taxfree_after_period':
- specified_args[key] = int(value)
+ # taxfree_after_period can also be None, to signify disabled setting
+ if value is None:
+ specified_args[key] = value
+ else:
+ int_value = int(value)
+ if int_value <= 0:
+ value = None
+ msg_aggregator.add_warning(
+ f'A negative or zero value ({int_value}) for taxfree_after_period '
+ f'ended up in the DB. Setting it to None. Please open an issue in '
+ f'Github: https://github.com/rotki/rotki/issues/new/choose',
+ )
+
+ else:
+ value = int_value
+
+ specified_args[key] = value
elif key == 'balance_save_frequency':
specified_args[key] = int(value)
elif key == 'main_currency':
|
{"golden_diff": "diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py\n--- a/rotkehlchen/db/settings.py\n+++ b/rotkehlchen/db/settings.py\n@@ -68,7 +68,23 @@\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n- specified_args[key] = int(value)\n+ # taxfree_after_period can also be None, to signify disabled setting\n+ if value is None:\n+ specified_args[key] = value\n+ else:\n+ int_value = int(value)\n+ if int_value <= 0:\n+ value = None\n+ msg_aggregator.add_warning(\n+ f'A negative or zero value ({int_value}) for taxfree_after_period '\n+ f'ended up in the DB. Setting it to None. Please open an issue in '\n+ f'Github: https://github.com/rotki/rotki/issues/new/choose',\n+ )\n+\n+ else:\n+ value = int_value\n+\n+ specified_args[key] = value\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n", "issue": "Sign In Failed - TypeError\nGood evening!\r\n\r\nI'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message\r\n\r\n> **Sign In Failed**\r\n> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'\r\n\r\nNow when I attempt to go back to 1.0.4 I get\r\n> **Sign In Failed**\r\n> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.\r\n\r\nNo big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!\n", "before_files": [{"content": "from typing import Any, Dict, NamedTuple, Union\n\nfrom rotkehlchen.constants.assets import S_USD\nfrom rotkehlchen.constants.timing import YEAR_IN_SECONDS\nfrom rotkehlchen.db.utils import str_to_bool\nfrom rotkehlchen.errors import DeserializationError\nfrom rotkehlchen.typing import FiatAsset, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nROTKEHLCHEN_DB_VERSION = 8\nDEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS\nDEFAULT_INCLUDE_CRYPTO2CRYPTO = True\nDEFAULT_INCLUDE_GAS_COSTS = True\nDEFAULT_ANONYMIZED_LOGS = False\nDEFAULT_PREMIUM_SHOULD_SYNC = False\nDEFAULT_START_DATE = '01/08/2015'\nDEFAULT_UI_FLOATING_PRECISION = 2\nDEFAULT_BALANCE_SAVE_FREQUENCY = 24\nDEFAULT_MAIN_CURRENCY = S_USD\nDEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'\nDEFAULT_SUBMIT_USAGE_ANALYTICS = True\n\n\nclass DBSettings(NamedTuple):\n version: int = ROTKEHLCHEN_DB_VERSION\n last_write_ts: Timestamp = Timestamp(0)\n premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC\n include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO\n anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS\n last_data_upload_ts: Timestamp = Timestamp(0)\n ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION\n taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD\n balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY\n include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS\n historical_data_start: str = DEFAULT_START_DATE\n eth_rpc_endpoint: str = 'http://localhost:8545'\n main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY\n date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT\n last_balance_save: Timestamp = Timestamp(0)\n submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS\n\n\ndef read_boolean(value: Union[str, bool]) -> bool:\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n return str_to_bool(value)\n\n raise DeserializationError(\n f'Failed to read a boolean from {value} which is of type {type(value)}',\n )\n\n\ndef db_settings_from_dict(\n settings_dict: Dict[str, Any],\n msg_aggregator: MessagesAggregator,\n) -> DBSettings:\n specified_args: Dict[str, Any] = {}\n for key, value in settings_dict.items():\n if key == 'version':\n specified_args[key] = int(value)\n elif key == 'historical_data_start':\n specified_args[key] = str(value)\n elif key == 'eth_rpc_endpoint':\n specified_args[key] = str(value)\n elif key == 'ui_floating_precision':\n specified_args[key] = int(value)\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n specified_args[key] = int(value)\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n specified_args[key] = FiatAsset(str(value))\n elif key == 'anonymized_logs':\n specified_args[key] = read_boolean(value)\n elif key == 'include_gas_costs':\n specified_args[key] = read_boolean(value)\n elif key == 'date_display_format':\n specified_args[key] = str(value)\n elif key == 'premium_should_sync':\n specified_args[key] = read_boolean(value)\n elif key == 'last_write_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_data_upload_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_balance_save':\n specified_args[key] = Timestamp(int(value))\n elif key == 'submit_usage_analytics':\n specified_args[key] = read_boolean(value)\n else:\n msg_aggregator.add_warning(\n f'Unknown DB setting {key} given. Ignoring it. Should not '\n f'happen so please open an issue in Github.',\n )\n\n return DBSettings(**specified_args)\n", "path": "rotkehlchen/db/settings.py"}]}
| 1,866 | 276 |
gh_patches_debug_16550
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-2819
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PR stuck after approval and automated "Update Documentation & Code Style" commit
**Describe the bug**
See https://github.com/deepset-ai/haystack/pull/2838
This PR has approval, all tests passed and is ready to merge. However due to the "Update Documentation & Code Style" commit, the required tests have not been executed and it's stuck. Manually executing the Test workflow on this branch does not help: see https://github.com/deepset-ai/haystack/actions/runs/2692003395.
All I can do, is make a minimal change to trigger the Test workflow correctly. That's cumbersome.
**Expected behavior**
Being able to merge after "Update Documentation & Code Style" commit when all tests of the previous commit are green or maybe easier: being able to manually trigger the workflow to run all required tests for merging.
**To Reproduce**
- Create PR with docs change
- Wait till workflows pass correctly and someone approves
</issue>
<code>
[start of .github/utils/generate_openapi_specs.py]
1 import json
2 from pathlib import Path
3 import os
4 import sys
5 import shutil
6
7 sys.path.append(".")
8 from rest_api.utils import get_openapi_specs, get_app, get_pipelines # pylint: disable=wrong-import-position
9 from haystack import __version__ # pylint: disable=wrong-import-position
10
11 REST_PATH = Path("./rest_api").absolute()
12 PIPELINE_PATH = str(REST_PATH / "pipeline" / "pipeline_empty.haystack-pipeline.yml")
13 APP_PATH = str(REST_PATH / "application.py")
14 DOCS_PATH = Path("./docs") / "_src" / "api" / "openapi"
15
16 os.environ["PIPELINE_YAML_PATH"] = PIPELINE_PATH
17
18 print(f"Loading OpenAPI specs from {APP_PATH} with pipeline at {PIPELINE_PATH}")
19
20 # To initialize the app and the pipelines
21 get_app()
22 get_pipelines()
23
24 # Generate the openapi specs
25 specs = get_openapi_specs()
26
27 # Dump the specs into a JSON file
28 with open(DOCS_PATH / "openapi.json", "w") as f:
29 json.dump(specs, f, indent=4)
30
31 # Remove rc versions of the specs from the folder
32 for specs_file in os.listdir():
33 if os.path.isfile(specs_file) and "rc" in specs_file and Path(specs_file).suffix == ".json":
34 os.remove(specs_file)
35
36 # Add versioned copy
37 shutil.copy(DOCS_PATH / "openapi.json", DOCS_PATH / f"openapi-{__version__}.json")
38
[end of .github/utils/generate_openapi_specs.py]
[start of .github/utils/generate_json_schema.py]
1 import sys
2 import logging
3 from pathlib import Path
4
5 logging.basicConfig(level=logging.INFO)
6
7
8 sys.path.append(".")
9 from haystack.nodes._json_schema import update_json_schema
10
11 update_json_schema(destination_path=Path(__file__).parent.parent.parent / "haystack" / "json-schemas")
12
[end of .github/utils/generate_json_schema.py]
[start of .github/utils/convert_notebooks_into_webpages.py]
1 import re
2
3 from nbconvert import MarkdownExporter
4 import os
5 from pathlib import Path
6
7 headers = {
8 1: """<!---
9 title: "Tutorial 1"
10 metaTitle: "Build Your First QA System"
11 metaDescription: ""
12 slug: "/docs/tutorial1"
13 date: "2020-09-03"
14 id: "tutorial1md"
15 --->""",
16 2: """<!---
17 title: "Tutorial 2"
18 metaTitle: "Fine-tuning a model on your own data"
19 metaDescription: ""
20 slug: "/docs/tutorial2"
21 date: "2020-09-03"
22 id: "tutorial2md"
23 --->""",
24 3: """<!---
25 title: "Tutorial 3"
26 metaTitle: "Build a QA System Without Elasticsearch"
27 metaDescription: ""
28 slug: "/docs/tutorial3"
29 date: "2020-09-03"
30 id: "tutorial3md"
31 --->""",
32 4: """<!---
33 title: "Tutorial 4"
34 metaTitle: "Utilizing existing FAQs for Question Answering"
35 metaDescription: ""
36 slug: "/docs/tutorial4"
37 date: "2020-09-03"
38 id: "tutorial4md"
39 --->""",
40 5: """<!---
41 title: "Tutorial 5"
42 metaTitle: "Evaluation of a QA System"
43 metaDescription: ""
44 slug: "/docs/tutorial5"
45 date: "2020-09-03"
46 id: "tutorial5md"
47 --->""",
48 6: """<!---
49 title: "Tutorial 6"
50 metaTitle: "Better retrieval via Dense Passage Retrieval"
51 metaDescription: ""
52 slug: "/docs/tutorial6"
53 date: "2020-09-03"
54 id: "tutorial6md"
55 --->""",
56 7: """<!---
57 title: "Tutorial 7"
58 metaTitle: "Generative QA with RAG"
59 metaDescription: ""
60 slug: "/docs/tutorial7"
61 date: "2020-11-12"
62 id: "tutorial7md"
63 --->""",
64 8: """<!---
65 title: "Tutorial 8"
66 metaTitle: "Preprocessing"
67 metaDescription: ""
68 slug: "/docs/tutorial8"
69 date: "2021-01-08"
70 id: "tutorial8md"
71 --->""",
72 9: """<!---
73 title: "Tutorial 9"
74 metaTitle: "Training a Dense Passage Retrieval model"
75 metaDescription: ""
76 slug: "/docs/tutorial9"
77 date: "2021-01-08"
78 id: "tutorial9md"
79 --->""",
80 10: """<!---
81 title: "Tutorial 10"
82 metaTitle: "Knowledge Graph QA"
83 metaDescription: ""
84 slug: "/docs/tutorial10"
85 date: "2021-04-06"
86 id: "tutorial10md"
87 --->""",
88 11: """<!---
89 title: "Tutorial 11"
90 metaTitle: "Pipelines"
91 metaDescription: ""
92 slug: "/docs/tutorial11"
93 date: "2021-04-06"
94 id: "tutorial11md"
95 --->""",
96 12: """<!---
97 title: "Tutorial 12"
98 metaTitle: "Generative QA with LFQA"
99 metaDescription: ""
100 slug: "/docs/tutorial12"
101 date: "2021-04-06"
102 id: "tutorial12md"
103 --->""",
104 13: """<!---
105 title: "Tutorial 13"
106 metaTitle: "Question Generation"
107 metaDescription: ""
108 slug: "/docs/tutorial13"
109 date: "2021-08-23"
110 id: "tutorial13md"
111 --->""",
112 14: """<!---
113 title: "Tutorial 14"
114 metaTitle: "Query Classifier Tutorial"
115 metaDescription: ""
116 slug: "/docs/tutorial14"
117 date: "2021-08-23"
118 id: "tutorial14md"
119 --->""",
120 15: """<!---
121 title: "Tutorial 15"
122 metaTitle: "TableQA Tutorial"
123 metaDescription: ""
124 slug: "/docs/tutorial15"
125 date: "2021-10-28"
126 id: "tutorial15md"
127 --->""",
128 16: """<!---
129 title: "Tutorial 16"
130 metaTitle: "DocumentClassifier at Index Time Tutorial"
131 metaDescription: ""
132 slug: "/docs/tutorial16"
133 date: "2021-11-05"
134 id: "tutorial16md"
135 --->""",
136 17: """<!---
137 title: "Tutorial 17"
138 metaTitle: "Audio Tutorial"
139 metaDescription: ""
140 slug: "/docs/tutorial17"
141 date: "2022-06-15"
142 id: "tutorial17md"
143 --->""",
144 18: """<!---
145 title: "Tutorial 18"
146 metaTitle: "GPL Domain Adaptation"
147 metaDescription: ""
148 slug: "/docs/tutorial18"
149 date: "2022-06-22"
150 id: "tutorial18md"
151 --->""",
152 }
153
154
155 def atoi(text):
156 return int(text) if text.isdigit() else text
157
158
159 def natural_keys(text):
160 test = [atoi(c) for c in re.split("(\d+)", text)]
161 return test
162
163
164 dir = Path(__file__).parent.parent.parent / "tutorials"
165
166 notebooks = [x for x in os.listdir(dir) if x[-6:] == ".ipynb"]
167 # sort notebooks based on numbers within name of notebook
168 notebooks = sorted(notebooks, key=lambda x: natural_keys(x))
169
170
171 e = MarkdownExporter(exclude_output=True)
172 for i, nb in enumerate(notebooks):
173 body, resources = e.from_filename(dir / nb)
174 print(f"Processing {dir}/{nb}")
175
176 tutorials_path = Path(__file__).parent.parent.parent / "docs" / "_src" / "tutorials" / "tutorials"
177 with open(tutorials_path / f"{i + 1}.md", "w", encoding="utf-8") as f:
178 try:
179 f.write(headers[i + 1] + "\n\n")
180 except IndexError as e:
181 raise IndexError(
182 "Can't find the header for this tutorial. Have you added it in '.github/utils/convert_notebooks_into_webpages.py'?"
183 )
184 f.write(body)
185
[end of .github/utils/convert_notebooks_into_webpages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/.github/utils/convert_notebooks_into_webpages.py b/.github/utils/convert_notebooks_into_webpages.py
old mode 100644
new mode 100755
--- a/.github/utils/convert_notebooks_into_webpages.py
+++ b/.github/utils/convert_notebooks_into_webpages.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import re
from nbconvert import MarkdownExporter
@@ -142,7 +144,7 @@
id: "tutorial17md"
--->""",
18: """<!---
-title: "Tutorial 18"
+title: "Tutorial 18"
metaTitle: "GPL Domain Adaptation"
metaDescription: ""
slug: "/docs/tutorial18"
diff --git a/.github/utils/generate_json_schema.py b/.github/utils/generate_json_schema.py
old mode 100644
new mode 100755
--- a/.github/utils/generate_json_schema.py
+++ b/.github/utils/generate_json_schema.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import sys
import logging
from pathlib import Path
diff --git a/.github/utils/generate_openapi_specs.py b/.github/utils/generate_openapi_specs.py
old mode 100644
new mode 100755
--- a/.github/utils/generate_openapi_specs.py
+++ b/.github/utils/generate_openapi_specs.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import json
from pathlib import Path
import os
|
{"golden_diff": "diff --git a/.github/utils/convert_notebooks_into_webpages.py b/.github/utils/convert_notebooks_into_webpages.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/convert_notebooks_into_webpages.py\n+++ b/.github/utils/convert_notebooks_into_webpages.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import re\n \n from nbconvert import MarkdownExporter\n@@ -142,7 +144,7 @@\n id: \"tutorial17md\"\n --->\"\"\",\n 18: \"\"\"<!---\n-title: \"Tutorial 18\" \n+title: \"Tutorial 18\"\n metaTitle: \"GPL Domain Adaptation\"\n metaDescription: \"\"\n slug: \"/docs/tutorial18\"\ndiff --git a/.github/utils/generate_json_schema.py b/.github/utils/generate_json_schema.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/generate_json_schema.py\n+++ b/.github/utils/generate_json_schema.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import sys\n import logging\n from pathlib import Path\ndiff --git a/.github/utils/generate_openapi_specs.py b/.github/utils/generate_openapi_specs.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/generate_openapi_specs.py\n+++ b/.github/utils/generate_openapi_specs.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import json\n from pathlib import Path\n import os\n", "issue": "PR stuck after approval and automated \"Update Documentation & Code Style\" commit\n**Describe the bug**\r\nSee https://github.com/deepset-ai/haystack/pull/2838\r\nThis PR has approval, all tests passed and is ready to merge. However due to the \"Update Documentation & Code Style\" commit, the required tests have not been executed and it's stuck. Manually executing the Test workflow on this branch does not help: see https://github.com/deepset-ai/haystack/actions/runs/2692003395.\r\n\r\nAll I can do, is make a minimal change to trigger the Test workflow correctly. That's cumbersome.\r\n\r\n**Expected behavior**\r\nBeing able to merge after \"Update Documentation & Code Style\" commit when all tests of the previous commit are green or maybe easier: being able to manually trigger the workflow to run all required tests for merging.\r\n\r\n**To Reproduce**\r\n- Create PR with docs change\r\n- Wait till workflows pass correctly and someone approves\n", "before_files": [{"content": "import json\nfrom pathlib import Path\nimport os\nimport sys\nimport shutil\n\nsys.path.append(\".\")\nfrom rest_api.utils import get_openapi_specs, get_app, get_pipelines # pylint: disable=wrong-import-position\nfrom haystack import __version__ # pylint: disable=wrong-import-position\n\nREST_PATH = Path(\"./rest_api\").absolute()\nPIPELINE_PATH = str(REST_PATH / \"pipeline\" / \"pipeline_empty.haystack-pipeline.yml\")\nAPP_PATH = str(REST_PATH / \"application.py\")\nDOCS_PATH = Path(\"./docs\") / \"_src\" / \"api\" / \"openapi\"\n\nos.environ[\"PIPELINE_YAML_PATH\"] = PIPELINE_PATH\n\nprint(f\"Loading OpenAPI specs from {APP_PATH} with pipeline at {PIPELINE_PATH}\")\n\n# To initialize the app and the pipelines\nget_app()\nget_pipelines()\n\n# Generate the openapi specs\nspecs = get_openapi_specs()\n\n# Dump the specs into a JSON file\nwith open(DOCS_PATH / \"openapi.json\", \"w\") as f:\n json.dump(specs, f, indent=4)\n\n# Remove rc versions of the specs from the folder\nfor specs_file in os.listdir():\n if os.path.isfile(specs_file) and \"rc\" in specs_file and Path(specs_file).suffix == \".json\":\n os.remove(specs_file)\n\n# Add versioned copy\nshutil.copy(DOCS_PATH / \"openapi.json\", DOCS_PATH / f\"openapi-{__version__}.json\")\n", "path": ".github/utils/generate_openapi_specs.py"}, {"content": "import sys\nimport logging\nfrom pathlib import Path\n\nlogging.basicConfig(level=logging.INFO)\n\n\nsys.path.append(\".\")\nfrom haystack.nodes._json_schema import update_json_schema\n\nupdate_json_schema(destination_path=Path(__file__).parent.parent.parent / \"haystack\" / \"json-schemas\")\n", "path": ".github/utils/generate_json_schema.py"}, {"content": "import re\n\nfrom nbconvert import MarkdownExporter\nimport os\nfrom pathlib import Path\n\nheaders = {\n 1: \"\"\"<!---\ntitle: \"Tutorial 1\"\nmetaTitle: \"Build Your First QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial1\"\ndate: \"2020-09-03\"\nid: \"tutorial1md\"\n--->\"\"\",\n 2: \"\"\"<!---\ntitle: \"Tutorial 2\"\nmetaTitle: \"Fine-tuning a model on your own data\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial2\"\ndate: \"2020-09-03\"\nid: \"tutorial2md\"\n--->\"\"\",\n 3: \"\"\"<!---\ntitle: \"Tutorial 3\"\nmetaTitle: \"Build a QA System Without Elasticsearch\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial3\"\ndate: \"2020-09-03\"\nid: \"tutorial3md\"\n--->\"\"\",\n 4: \"\"\"<!---\ntitle: \"Tutorial 4\"\nmetaTitle: \"Utilizing existing FAQs for Question Answering\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial4\"\ndate: \"2020-09-03\"\nid: \"tutorial4md\"\n--->\"\"\",\n 5: \"\"\"<!---\ntitle: \"Tutorial 5\"\nmetaTitle: \"Evaluation of a QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial5\"\ndate: \"2020-09-03\"\nid: \"tutorial5md\"\n--->\"\"\",\n 6: \"\"\"<!---\ntitle: \"Tutorial 6\"\nmetaTitle: \"Better retrieval via Dense Passage Retrieval\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial6\"\ndate: \"2020-09-03\"\nid: \"tutorial6md\"\n--->\"\"\",\n 7: \"\"\"<!---\ntitle: \"Tutorial 7\"\nmetaTitle: \"Generative QA with RAG\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial7\"\ndate: \"2020-11-12\"\nid: \"tutorial7md\"\n--->\"\"\",\n 8: \"\"\"<!---\ntitle: \"Tutorial 8\"\nmetaTitle: \"Preprocessing\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial8\"\ndate: \"2021-01-08\"\nid: \"tutorial8md\"\n--->\"\"\",\n 9: \"\"\"<!---\ntitle: \"Tutorial 9\"\nmetaTitle: \"Training a Dense Passage Retrieval model\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial9\"\ndate: \"2021-01-08\"\nid: \"tutorial9md\"\n--->\"\"\",\n 10: \"\"\"<!---\ntitle: \"Tutorial 10\"\nmetaTitle: \"Knowledge Graph QA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial10\"\ndate: \"2021-04-06\"\nid: \"tutorial10md\"\n--->\"\"\",\n 11: \"\"\"<!---\ntitle: \"Tutorial 11\"\nmetaTitle: \"Pipelines\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial11\"\ndate: \"2021-04-06\"\nid: \"tutorial11md\"\n--->\"\"\",\n 12: \"\"\"<!---\ntitle: \"Tutorial 12\"\nmetaTitle: \"Generative QA with LFQA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial12\"\ndate: \"2021-04-06\"\nid: \"tutorial12md\"\n--->\"\"\",\n 13: \"\"\"<!---\ntitle: \"Tutorial 13\"\nmetaTitle: \"Question Generation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial13\"\ndate: \"2021-08-23\"\nid: \"tutorial13md\"\n--->\"\"\",\n 14: \"\"\"<!---\ntitle: \"Tutorial 14\"\nmetaTitle: \"Query Classifier Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial14\"\ndate: \"2021-08-23\"\nid: \"tutorial14md\"\n--->\"\"\",\n 15: \"\"\"<!---\ntitle: \"Tutorial 15\"\nmetaTitle: \"TableQA Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial15\"\ndate: \"2021-10-28\"\nid: \"tutorial15md\"\n--->\"\"\",\n 16: \"\"\"<!---\ntitle: \"Tutorial 16\"\nmetaTitle: \"DocumentClassifier at Index Time Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial16\"\ndate: \"2021-11-05\"\nid: \"tutorial16md\"\n--->\"\"\",\n 17: \"\"\"<!---\ntitle: \"Tutorial 17\"\nmetaTitle: \"Audio Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial17\"\ndate: \"2022-06-15\"\nid: \"tutorial17md\"\n--->\"\"\",\n 18: \"\"\"<!---\ntitle: \"Tutorial 18\" \nmetaTitle: \"GPL Domain Adaptation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial18\"\ndate: \"2022-06-22\"\nid: \"tutorial18md\"\n--->\"\"\",\n}\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n test = [atoi(c) for c in re.split(\"(\\d+)\", text)]\n return test\n\n\ndir = Path(__file__).parent.parent.parent / \"tutorials\"\n\nnotebooks = [x for x in os.listdir(dir) if x[-6:] == \".ipynb\"]\n# sort notebooks based on numbers within name of notebook\nnotebooks = sorted(notebooks, key=lambda x: natural_keys(x))\n\n\ne = MarkdownExporter(exclude_output=True)\nfor i, nb in enumerate(notebooks):\n body, resources = e.from_filename(dir / nb)\n print(f\"Processing {dir}/{nb}\")\n\n tutorials_path = Path(__file__).parent.parent.parent / \"docs\" / \"_src\" / \"tutorials\" / \"tutorials\"\n with open(tutorials_path / f\"{i + 1}.md\", \"w\", encoding=\"utf-8\") as f:\n try:\n f.write(headers[i + 1] + \"\\n\\n\")\n except IndexError as e:\n raise IndexError(\n \"Can't find the header for this tutorial. Have you added it in '.github/utils/convert_notebooks_into_webpages.py'?\"\n )\n f.write(body)\n", "path": ".github/utils/convert_notebooks_into_webpages.py"}]}
| 3,097 | 368 |
gh_patches_debug_16283
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-6279
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]bug in ConvFCBBoxHead's init cfg.
https://github.com/open-mmlab/mmdetection/blob/c88509cb9a73d6bd1edcba64eb924d3cf3cfe85d/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L103
This line will override initializers for fc_cls and fc_reg because they are also nn.Linear.
Or is it what's intended? But I see the old way to initialize fc_cls and fc_reg is using Normal.
</issue>
<code>
[start of mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import torch.nn as nn
3 from mmcv.cnn import ConvModule
4
5 from mmdet.models.builder import HEADS
6 from mmdet.models.utils import build_linear_layer
7 from .bbox_head import BBoxHead
8
9
10 @HEADS.register_module()
11 class ConvFCBBoxHead(BBoxHead):
12 r"""More general bbox head, with shared conv and fc layers and two optional
13 separated branches.
14
15 .. code-block:: none
16
17 /-> cls convs -> cls fcs -> cls
18 shared convs -> shared fcs
19 \-> reg convs -> reg fcs -> reg
20 """ # noqa: W605
21
22 def __init__(self,
23 num_shared_convs=0,
24 num_shared_fcs=0,
25 num_cls_convs=0,
26 num_cls_fcs=0,
27 num_reg_convs=0,
28 num_reg_fcs=0,
29 conv_out_channels=256,
30 fc_out_channels=1024,
31 conv_cfg=None,
32 norm_cfg=None,
33 init_cfg=None,
34 *args,
35 **kwargs):
36 super(ConvFCBBoxHead, self).__init__(
37 *args, init_cfg=init_cfg, **kwargs)
38 assert (num_shared_convs + num_shared_fcs + num_cls_convs +
39 num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
40 if num_cls_convs > 0 or num_reg_convs > 0:
41 assert num_shared_fcs == 0
42 if not self.with_cls:
43 assert num_cls_convs == 0 and num_cls_fcs == 0
44 if not self.with_reg:
45 assert num_reg_convs == 0 and num_reg_fcs == 0
46 self.num_shared_convs = num_shared_convs
47 self.num_shared_fcs = num_shared_fcs
48 self.num_cls_convs = num_cls_convs
49 self.num_cls_fcs = num_cls_fcs
50 self.num_reg_convs = num_reg_convs
51 self.num_reg_fcs = num_reg_fcs
52 self.conv_out_channels = conv_out_channels
53 self.fc_out_channels = fc_out_channels
54 self.conv_cfg = conv_cfg
55 self.norm_cfg = norm_cfg
56
57 # add shared convs and fcs
58 self.shared_convs, self.shared_fcs, last_layer_dim = \
59 self._add_conv_fc_branch(
60 self.num_shared_convs, self.num_shared_fcs, self.in_channels,
61 True)
62 self.shared_out_channels = last_layer_dim
63
64 # add cls specific branch
65 self.cls_convs, self.cls_fcs, self.cls_last_dim = \
66 self._add_conv_fc_branch(
67 self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
68
69 # add reg specific branch
70 self.reg_convs, self.reg_fcs, self.reg_last_dim = \
71 self._add_conv_fc_branch(
72 self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
73
74 if self.num_shared_fcs == 0 and not self.with_avg_pool:
75 if self.num_cls_fcs == 0:
76 self.cls_last_dim *= self.roi_feat_area
77 if self.num_reg_fcs == 0:
78 self.reg_last_dim *= self.roi_feat_area
79
80 self.relu = nn.ReLU(inplace=True)
81 # reconstruct fc_cls and fc_reg since input channels are changed
82 if self.with_cls:
83 if self.custom_cls_channels:
84 cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
85 else:
86 cls_channels = self.num_classes + 1
87 self.fc_cls = build_linear_layer(
88 self.cls_predictor_cfg,
89 in_features=self.cls_last_dim,
90 out_features=cls_channels)
91 if self.with_reg:
92 out_dim_reg = (4 if self.reg_class_agnostic else 4 *
93 self.num_classes)
94 self.fc_reg = build_linear_layer(
95 self.reg_predictor_cfg,
96 in_features=self.reg_last_dim,
97 out_features=out_dim_reg)
98
99 if init_cfg is None:
100 self.init_cfg += [
101 dict(
102 type='Xavier',
103 layer='Linear',
104 override=[
105 dict(name='shared_fcs'),
106 dict(name='cls_fcs'),
107 dict(name='reg_fcs')
108 ])
109 ]
110
111 def _add_conv_fc_branch(self,
112 num_branch_convs,
113 num_branch_fcs,
114 in_channels,
115 is_shared=False):
116 """Add shared or separable branch.
117
118 convs -> avg pool (optional) -> fcs
119 """
120 last_layer_dim = in_channels
121 # add branch specific conv layers
122 branch_convs = nn.ModuleList()
123 if num_branch_convs > 0:
124 for i in range(num_branch_convs):
125 conv_in_channels = (
126 last_layer_dim if i == 0 else self.conv_out_channels)
127 branch_convs.append(
128 ConvModule(
129 conv_in_channels,
130 self.conv_out_channels,
131 3,
132 padding=1,
133 conv_cfg=self.conv_cfg,
134 norm_cfg=self.norm_cfg))
135 last_layer_dim = self.conv_out_channels
136 # add branch specific fc layers
137 branch_fcs = nn.ModuleList()
138 if num_branch_fcs > 0:
139 # for shared branch, only consider self.with_avg_pool
140 # for separated branches, also consider self.num_shared_fcs
141 if (is_shared
142 or self.num_shared_fcs == 0) and not self.with_avg_pool:
143 last_layer_dim *= self.roi_feat_area
144 for i in range(num_branch_fcs):
145 fc_in_channels = (
146 last_layer_dim if i == 0 else self.fc_out_channels)
147 branch_fcs.append(
148 nn.Linear(fc_in_channels, self.fc_out_channels))
149 last_layer_dim = self.fc_out_channels
150 return branch_convs, branch_fcs, last_layer_dim
151
152 def forward(self, x):
153 # shared part
154 if self.num_shared_convs > 0:
155 for conv in self.shared_convs:
156 x = conv(x)
157
158 if self.num_shared_fcs > 0:
159 if self.with_avg_pool:
160 x = self.avg_pool(x)
161
162 x = x.flatten(1)
163
164 for fc in self.shared_fcs:
165 x = self.relu(fc(x))
166 # separate branches
167 x_cls = x
168 x_reg = x
169
170 for conv in self.cls_convs:
171 x_cls = conv(x_cls)
172 if x_cls.dim() > 2:
173 if self.with_avg_pool:
174 x_cls = self.avg_pool(x_cls)
175 x_cls = x_cls.flatten(1)
176 for fc in self.cls_fcs:
177 x_cls = self.relu(fc(x_cls))
178
179 for conv in self.reg_convs:
180 x_reg = conv(x_reg)
181 if x_reg.dim() > 2:
182 if self.with_avg_pool:
183 x_reg = self.avg_pool(x_reg)
184 x_reg = x_reg.flatten(1)
185 for fc in self.reg_fcs:
186 x_reg = self.relu(fc(x_reg))
187
188 cls_score = self.fc_cls(x_cls) if self.with_cls else None
189 bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
190 return cls_score, bbox_pred
191
192
193 @HEADS.register_module()
194 class Shared2FCBBoxHead(ConvFCBBoxHead):
195
196 def __init__(self, fc_out_channels=1024, *args, **kwargs):
197 super(Shared2FCBBoxHead, self).__init__(
198 num_shared_convs=0,
199 num_shared_fcs=2,
200 num_cls_convs=0,
201 num_cls_fcs=0,
202 num_reg_convs=0,
203 num_reg_fcs=0,
204 fc_out_channels=fc_out_channels,
205 *args,
206 **kwargs)
207
208
209 @HEADS.register_module()
210 class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
211
212 def __init__(self, fc_out_channels=1024, *args, **kwargs):
213 super(Shared4Conv1FCBBoxHead, self).__init__(
214 num_shared_convs=4,
215 num_shared_fcs=1,
216 num_cls_convs=0,
217 num_cls_fcs=0,
218 num_reg_convs=0,
219 num_reg_fcs=0,
220 fc_out_channels=fc_out_channels,
221 *args,
222 **kwargs)
223
[end of mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
--- a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
+++ b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
@@ -97,10 +97,16 @@
out_features=out_dim_reg)
if init_cfg is None:
+ # when init_cfg is None,
+ # It has been set to
+ # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
+ # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
+ # after `super(ConvFCBBoxHead, self).__init__()`
+ # we only need to append additional configuration
+ # for `shared_fcs`, `cls_fcs` and `reg_fcs`
self.init_cfg += [
dict(
type='Xavier',
- layer='Linear',
override=[
dict(name='shared_fcs'),
dict(name='cls_fcs'),
|
{"golden_diff": "diff --git a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n--- a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n+++ b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n@@ -97,10 +97,16 @@\n out_features=out_dim_reg)\n \n if init_cfg is None:\n+ # when init_cfg is None,\n+ # It has been set to\n+ # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n+ # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n+ # after `super(ConvFCBBoxHead, self).__init__()`\n+ # we only need to append additional configuration\n+ # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n self.init_cfg += [\n dict(\n type='Xavier',\n- layer='Linear',\n override=[\n dict(name='shared_fcs'),\n dict(name='cls_fcs'),\n", "issue": "[BUG]bug in ConvFCBBoxHead's init cfg.\nhttps://github.com/open-mmlab/mmdetection/blob/c88509cb9a73d6bd1edcba64eb924d3cf3cfe85d/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L103\r\n\r\nThis line will override initializers for fc_cls and fc_reg because they are also nn.Linear. \r\nOr is it what's intended? But I see the old way to initialize fc_cls and fc_reg is using Normal.\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import build_linear_layer\nfrom .bbox_head import BBoxHead\n\n\n@HEADS.register_module()\nclass ConvFCBBoxHead(BBoxHead):\n r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n conv_out_channels=256,\n fc_out_channels=1024,\n conv_cfg=None,\n norm_cfg=None,\n init_cfg=None,\n *args,\n **kwargs):\n super(ConvFCBBoxHead, self).__init__(\n *args, init_cfg=init_cfg, **kwargs)\n assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n if num_cls_convs > 0 or num_reg_convs > 0:\n assert num_shared_fcs == 0\n if not self.with_cls:\n assert num_cls_convs == 0 and num_cls_fcs == 0\n if not self.with_reg:\n assert num_reg_convs == 0 and num_reg_fcs == 0\n self.num_shared_convs = num_shared_convs\n self.num_shared_fcs = num_shared_fcs\n self.num_cls_convs = num_cls_convs\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_convs = num_reg_convs\n self.num_reg_fcs = num_reg_fcs\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n # add shared convs and fcs\n self.shared_convs, self.shared_fcs, last_layer_dim = \\\n self._add_conv_fc_branch(\n self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n True)\n self.shared_out_channels = last_layer_dim\n\n # add cls specific branch\n self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n self._add_conv_fc_branch(\n self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n # add reg specific branch\n self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n self._add_conv_fc_branch(\n self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n if self.num_shared_fcs == 0 and not self.with_avg_pool:\n if self.num_cls_fcs == 0:\n self.cls_last_dim *= self.roi_feat_area\n if self.num_reg_fcs == 0:\n self.reg_last_dim *= self.roi_feat_area\n\n self.relu = nn.ReLU(inplace=True)\n # reconstruct fc_cls and fc_reg since input channels are changed\n if self.with_cls:\n if self.custom_cls_channels:\n cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n else:\n cls_channels = self.num_classes + 1\n self.fc_cls = build_linear_layer(\n self.cls_predictor_cfg,\n in_features=self.cls_last_dim,\n out_features=cls_channels)\n if self.with_reg:\n out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n self.num_classes)\n self.fc_reg = build_linear_layer(\n self.reg_predictor_cfg,\n in_features=self.reg_last_dim,\n out_features=out_dim_reg)\n\n if init_cfg is None:\n self.init_cfg += [\n dict(\n type='Xavier',\n layer='Linear',\n override=[\n dict(name='shared_fcs'),\n dict(name='cls_fcs'),\n dict(name='reg_fcs')\n ])\n ]\n\n def _add_conv_fc_branch(self,\n num_branch_convs,\n num_branch_fcs,\n in_channels,\n is_shared=False):\n \"\"\"Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n \"\"\"\n last_layer_dim = in_channels\n # add branch specific conv layers\n branch_convs = nn.ModuleList()\n if num_branch_convs > 0:\n for i in range(num_branch_convs):\n conv_in_channels = (\n last_layer_dim if i == 0 else self.conv_out_channels)\n branch_convs.append(\n ConvModule(\n conv_in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n last_layer_dim = self.conv_out_channels\n # add branch specific fc layers\n branch_fcs = nn.ModuleList()\n if num_branch_fcs > 0:\n # for shared branch, only consider self.with_avg_pool\n # for separated branches, also consider self.num_shared_fcs\n if (is_shared\n or self.num_shared_fcs == 0) and not self.with_avg_pool:\n last_layer_dim *= self.roi_feat_area\n for i in range(num_branch_fcs):\n fc_in_channels = (\n last_layer_dim if i == 0 else self.fc_out_channels)\n branch_fcs.append(\n nn.Linear(fc_in_channels, self.fc_out_channels))\n last_layer_dim = self.fc_out_channels\n return branch_convs, branch_fcs, last_layer_dim\n\n def forward(self, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n\n x = x.flatten(1)\n\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.flatten(1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.flatten(1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n\n\n@HEADS.register_module()\nclass Shared2FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared2FCBBoxHead, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=2,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n\n\n@HEADS.register_module()\nclass Shared4Conv1FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared4Conv1FCBBoxHead, self).__init__(\n num_shared_convs=4,\n num_shared_fcs=1,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n", "path": "mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py"}]}
| 3,077 | 261 |
gh_patches_debug_6341
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-2214
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Straighten out commit_veto situation
In https://github.com/Kinto/kinto-signer/issues/591, we discovered that failing requests do not necessarily abort the transaction. (Exceptions do, but non-exceptional responses with e.g. 503 status codes don't.) There seems to be some confusion about this throughout the codebase; for example see https://github.com/Kinto/kinto-signer/blob/master/kinto_signer/listeners.py#L23-L24. At some point it must have seemed like Pyramid's [commit_veto hook](https://docs.pylonsproject.org/projects/pyramid_tm/en/latest/#adding-a-commit-veto-hook) would solve this automatically for us, but actually rereading this I see that "By default, `pyramid_tm` does not configure a commit veto into the system; you must do it explicitly."
However, the docstring for our [notify_resource_events_before](https://github.com/Kinto/kinto/blob/master/kinto/core/events.py#L152) *claims* to be a commit veto hook, muddying the waters somewhat. This code was added in https://github.com/Kinto/kinto/pull/1263, when we discovered that running "before resource changed" events as a "before commit" hook was invalid and we had to figure out a better approach.
- It seems like at a very minimum, the docstring here is invalid. I believe this is not really a veto but rather just part of the events machinery.
- Should there be a `commit_veto` hook? Personally I would find it surprising that 4xx/5xx responses always automatically abort the transaction, even if they were not driven by exceptions.
</issue>
<code>
[start of kinto/core/events.py]
1 import logging
2 import warnings
3 from collections import OrderedDict
4
5 import transaction
6 from pyramid.events import NewRequest
7 import pyramid.tweens
8 from enum import Enum
9
10 from kinto.core.utils import strip_uri_prefix
11
12
13 logger = logging.getLogger(__name__)
14
15
16 class ACTIONS(Enum):
17 CREATE = "create"
18 DELETE = "delete"
19 READ = "read"
20 UPDATE = "update"
21
22 @staticmethod
23 def from_string_list(elements):
24 return tuple(ACTIONS(el) for el in elements)
25
26
27 class _ResourceEvent:
28 def __init__(self, payload, request):
29 self.payload = payload
30 self.request = request
31
32 def __repr__(self):
33 return f"<{self.__class__.__name__} action={self.payload['action']} uri={self.payload['uri']}>"
34
35 @property
36 def read_records(self):
37 message = "`read_records` is deprecated, use `read_objects` instead."
38 warnings.warn(message, DeprecationWarning)
39 return self.read_objects
40
41 @property
42 def impacted_records(self):
43 message = "`impacted_records` is deprecated, use `impacted_objects` instead."
44 warnings.warn(message, DeprecationWarning)
45 return self.impacted_objects
46
47
48 class ResourceRead(_ResourceEvent):
49 """Triggered when a resource is being read.
50 """
51
52 def __init__(self, payload, read_objects, request):
53 super().__init__(payload, request)
54 self.read_objects = read_objects
55
56
57 class ResourceChanged(_ResourceEvent):
58 """Triggered when a resource is being changed.
59 """
60
61 def __init__(self, payload, impacted_objects, request):
62 super().__init__(payload, request)
63 self.impacted_objects = impacted_objects
64
65
66 class AfterResourceRead(_ResourceEvent):
67 """Triggered after a resource was successfully read.
68 """
69
70 def __init__(self, payload, read_objects, request):
71 super().__init__(payload, request)
72 self.read_objects = read_objects
73
74
75 class AfterResourceChanged(_ResourceEvent):
76 """Triggered after a resource was successfully changed.
77 """
78
79 def __init__(self, payload, impacted_objects, request):
80 super().__init__(payload, request)
81 self.impacted_objects = impacted_objects
82
83
84 class EventCollector(object):
85 """A collection to gather events emitted over the course of a request.
86
87 Events are gathered by parent id, resource type, and event
88 type. This serves as a primitive normalization so that we can emit
89 fewer events.
90 """
91
92 def __init__(self):
93 self.event_dict = OrderedDict()
94 """The events as collected so far.
95
96 The key of the event_dict is a triple (resource_name,
97 parent_id, action). The value is a triple (impacted, request,
98 payload). If the same (resource_name, parent_id, action) is
99 encountered, we just extend the existing impacted with the new
100 impacted. N.B. this means all values in the payload must not
101 be specific to a single impacted_object. See
102 https://github.com/Kinto/kinto/issues/945 and
103 https://github.com/Kinto/kinto/issues/1731.
104 """
105
106 def add_event(self, resource_name, parent_id, action, payload, impacted, request):
107 key = (resource_name, parent_id, action)
108 if key not in self.event_dict:
109 value = (payload, impacted, request)
110 self.event_dict[key] = value
111 else:
112 old_value = self.event_dict[key]
113 (old_payload, old_impacted, old_request) = old_value
114 # May be a good idea to assert that old_payload == payload here.
115 self.event_dict[key] = (old_payload, old_impacted + impacted, old_request)
116
117 def drain(self):
118 """Return an iterator that removes elements from this EventCollector.
119
120 This can be used to process events while still allowing events
121 to be added (for instance, as part of a cascade where events
122 add other events).
123
124 Items yielded will be of a tuple suitable for using as
125 arguments to EventCollector.add_event.
126 """
127 return EventCollectorDrain(self)
128
129
130 class EventCollectorDrain(object):
131 """An iterator that drains an EventCollector.
132
133 Get one using EventCollector.drain()."""
134
135 def __init__(self, event_collector):
136 self.event_collector = event_collector
137
138 def __iter__(self):
139 return self
140
141 def __next__(self):
142 if self.event_collector.event_dict:
143 # Get the "first" key in insertion order, so as to process
144 # events in the same order they were queued.
145 key = next(iter(self.event_collector.event_dict.keys()))
146 value = self.event_collector.event_dict.pop(key)
147 return key + value
148 else:
149 raise StopIteration
150
151
152 def notify_resource_events_before(handler, registry):
153 """pyramid_tm "commit veto" hook to run ResourceChanged events.
154
155 This hook being a "commit veto" let us tell pyramid_tm to abort
156 the transaction if the ResourceChanged listeners raise.
157 """
158
159 def tween(request):
160 response = handler(request)
161 for event in request.get_resource_events():
162 request.registry.notify(event)
163
164 return response
165
166 return tween
167
168
169 def setup_transaction_hook(config):
170 """
171 Resource events are plugged with the transactions of ``pyramid_tm``.
172
173 Once a transaction is committed, ``AfterResourceRead`` and
174 ``AfterResourceChanged`` events are sent.
175 """
176
177 def _notify_resource_events_after(success, request):
178 """Notify the accumulated resource events if transaction succeeds.
179 """
180 if not success: # pragma: no cover
181 return
182
183 for event in request.get_resource_events(after_commit=True):
184 try:
185 request.registry.notify(event)
186 except Exception:
187 logger.error("Unable to notify", exc_info=True)
188
189 def on_new_request(event):
190 """When a new request comes in, hook on transaction commit.
191 """
192 # Since there is one transaction per batch, ignore subrequests.
193 if hasattr(event.request, "parent"):
194 return
195 current = transaction.get()
196 current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))
197
198 config.add_subscriber(on_new_request, NewRequest)
199 config.add_tween(
200 "kinto.core.events.notify_resource_events_before", under=pyramid.tweens.EXCVIEW
201 )
202
203
204 def get_resource_events(request, after_commit=False):
205 """Generator to iterate the list of events triggered on resources.
206
207 The list is sorted chronologically (see OrderedDict).
208
209 This drains the resource_events currently in the request, which
210 allows us to process new events as they are added by current
211 events. However, once the iteration is over, we merge all the
212 events we've emitted into a new resource_events, which we store on
213 the request so we can reprocess the same events in an after-commit
214 tween.
215
216 This generator must be completely consumed!
217 """
218 by_resource = request.bound_data.get("resource_events", EventCollector())
219 afterwards = EventCollector()
220
221 for event_call in by_resource.drain():
222 afterwards.add_event(*event_call)
223 (_, _, action, payload, impacted, request) = event_call
224
225 if after_commit:
226 if action == ACTIONS.READ:
227 event_cls = AfterResourceRead
228 else:
229 event_cls = AfterResourceChanged
230 else:
231 if action == ACTIONS.READ:
232 event_cls = ResourceRead
233 else:
234 event_cls = ResourceChanged
235
236 yield event_cls(payload, impacted, request)
237
238 request.bound_data["resource_events"] = afterwards
239
240
241 def notify_resource_event(
242 request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None
243 ):
244 """Request helper to stack a resource event.
245
246 If a similar event (same resource, same action) already occured during the
247 current transaction (e.g. batch) then just extend the impacted objects of
248 the previous one.
249
250 :param resource_name: The name of the resource on which the event
251 happened (taken from the request if not provided).
252 :param resource_data: Information about the resource on which the
253 event is being emitted. Usually contains information about how
254 to find this object in the hierarchy (for instance,
255 ``bucket_id`` and ``collection_id`` for a record). Taken from
256 the request matchdict if absent.
257 :type resource_data: dict
258
259 """
260 if action == ACTIONS.READ:
261 if not isinstance(data, list):
262 data = [data]
263 impacted = data
264 elif action == ACTIONS.CREATE:
265 impacted = [{"new": data}]
266 elif action == ACTIONS.DELETE:
267 if not isinstance(data, list):
268 impacted = [{"new": data, "old": old}]
269 else:
270 impacted = []
271 for i, new in enumerate(data):
272 impacted.append({"new": new, "old": old[i]})
273 else: # ACTIONS.UPDATE:
274 impacted = [{"new": data, "old": old}]
275
276 # Get previously triggered events.
277 events = request.bound_data.setdefault("resource_events", EventCollector())
278
279 resource_name = resource_name or request.current_resource_name
280 matchdict = resource_data or dict(request.matchdict)
281
282 payload = {
283 "timestamp": timestamp,
284 "action": action.value,
285 # Deprecated: don't actually use URI (see #945).
286 "uri": strip_uri_prefix(request.path),
287 "user_id": request.prefixed_userid,
288 "resource_name": resource_name,
289 }
290
291 # Deprecated: don't actually use `resource_name_id` either (see #945).
292 if "id" in request.matchdict:
293 matchdict[resource_name + "_id"] = matchdict.pop("id")
294
295 payload.update(**matchdict)
296
297 events.add_event(resource_name, parent_id, action, payload, impacted, request)
298
[end of kinto/core/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/core/events.py b/kinto/core/events.py
--- a/kinto/core/events.py
+++ b/kinto/core/events.py
@@ -150,10 +150,12 @@
def notify_resource_events_before(handler, registry):
- """pyramid_tm "commit veto" hook to run ResourceChanged events.
+ """Tween that runs ResourceChanged events.
+
+ This tween runs under EXCVIEW, so exceptions raised by
+ ResourceChanged events are caught and dealt the same as any other
+ exceptions.
- This hook being a "commit veto" let us tell pyramid_tm to abort
- the transaction if the ResourceChanged listeners raise.
"""
def tween(request):
|
{"golden_diff": "diff --git a/kinto/core/events.py b/kinto/core/events.py\n--- a/kinto/core/events.py\n+++ b/kinto/core/events.py\n@@ -150,10 +150,12 @@\n \n \n def notify_resource_events_before(handler, registry):\n- \"\"\"pyramid_tm \"commit veto\" hook to run ResourceChanged events.\n+ \"\"\"Tween that runs ResourceChanged events.\n+\n+ This tween runs under EXCVIEW, so exceptions raised by\n+ ResourceChanged events are caught and dealt the same as any other\n+ exceptions.\n \n- This hook being a \"commit veto\" let us tell pyramid_tm to abort\n- the transaction if the ResourceChanged listeners raise.\n \"\"\"\n \n def tween(request):\n", "issue": "Straighten out commit_veto situation\nIn https://github.com/Kinto/kinto-signer/issues/591, we discovered that failing requests do not necessarily abort the transaction. (Exceptions do, but non-exceptional responses with e.g. 503 status codes don't.) There seems to be some confusion about this throughout the codebase; for example see https://github.com/Kinto/kinto-signer/blob/master/kinto_signer/listeners.py#L23-L24. At some point it must have seemed like Pyramid's [commit_veto hook](https://docs.pylonsproject.org/projects/pyramid_tm/en/latest/#adding-a-commit-veto-hook) would solve this automatically for us, but actually rereading this I see that \"By default, `pyramid_tm` does not configure a commit veto into the system; you must do it explicitly.\"\r\n\r\nHowever, the docstring for our [notify_resource_events_before](https://github.com/Kinto/kinto/blob/master/kinto/core/events.py#L152) *claims* to be a commit veto hook, muddying the waters somewhat. This code was added in https://github.com/Kinto/kinto/pull/1263, when we discovered that running \"before resource changed\" events as a \"before commit\" hook was invalid and we had to figure out a better approach.\r\n\r\n- It seems like at a very minimum, the docstring here is invalid. I believe this is not really a veto but rather just part of the events machinery. \r\n- Should there be a `commit_veto` hook? Personally I would find it surprising that 4xx/5xx responses always automatically abort the transaction, even if they were not driven by exceptions.\r\n\n", "before_files": [{"content": "import logging\nimport warnings\nfrom collections import OrderedDict\n\nimport transaction\nfrom pyramid.events import NewRequest\nimport pyramid.tweens\nfrom enum import Enum\n\nfrom kinto.core.utils import strip_uri_prefix\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ACTIONS(Enum):\n CREATE = \"create\"\n DELETE = \"delete\"\n READ = \"read\"\n UPDATE = \"update\"\n\n @staticmethod\n def from_string_list(elements):\n return tuple(ACTIONS(el) for el in elements)\n\n\nclass _ResourceEvent:\n def __init__(self, payload, request):\n self.payload = payload\n self.request = request\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} action={self.payload['action']} uri={self.payload['uri']}>\"\n\n @property\n def read_records(self):\n message = \"`read_records` is deprecated, use `read_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.read_objects\n\n @property\n def impacted_records(self):\n message = \"`impacted_records` is deprecated, use `impacted_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.impacted_objects\n\n\nclass ResourceRead(_ResourceEvent):\n \"\"\"Triggered when a resource is being read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass ResourceChanged(_ResourceEvent):\n \"\"\"Triggered when a resource is being changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass AfterResourceRead(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass AfterResourceChanged(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass EventCollector(object):\n \"\"\"A collection to gather events emitted over the course of a request.\n\n Events are gathered by parent id, resource type, and event\n type. This serves as a primitive normalization so that we can emit\n fewer events.\n \"\"\"\n\n def __init__(self):\n self.event_dict = OrderedDict()\n \"\"\"The events as collected so far.\n\n The key of the event_dict is a triple (resource_name,\n parent_id, action). The value is a triple (impacted, request,\n payload). If the same (resource_name, parent_id, action) is\n encountered, we just extend the existing impacted with the new\n impacted. N.B. this means all values in the payload must not\n be specific to a single impacted_object. See\n https://github.com/Kinto/kinto/issues/945 and\n https://github.com/Kinto/kinto/issues/1731.\n \"\"\"\n\n def add_event(self, resource_name, parent_id, action, payload, impacted, request):\n key = (resource_name, parent_id, action)\n if key not in self.event_dict:\n value = (payload, impacted, request)\n self.event_dict[key] = value\n else:\n old_value = self.event_dict[key]\n (old_payload, old_impacted, old_request) = old_value\n # May be a good idea to assert that old_payload == payload here.\n self.event_dict[key] = (old_payload, old_impacted + impacted, old_request)\n\n def drain(self):\n \"\"\"Return an iterator that removes elements from this EventCollector.\n\n This can be used to process events while still allowing events\n to be added (for instance, as part of a cascade where events\n add other events).\n\n Items yielded will be of a tuple suitable for using as\n arguments to EventCollector.add_event.\n \"\"\"\n return EventCollectorDrain(self)\n\n\nclass EventCollectorDrain(object):\n \"\"\"An iterator that drains an EventCollector.\n\n Get one using EventCollector.drain().\"\"\"\n\n def __init__(self, event_collector):\n self.event_collector = event_collector\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.event_collector.event_dict:\n # Get the \"first\" key in insertion order, so as to process\n # events in the same order they were queued.\n key = next(iter(self.event_collector.event_dict.keys()))\n value = self.event_collector.event_dict.pop(key)\n return key + value\n else:\n raise StopIteration\n\n\ndef notify_resource_events_before(handler, registry):\n \"\"\"pyramid_tm \"commit veto\" hook to run ResourceChanged events.\n\n This hook being a \"commit veto\" let us tell pyramid_tm to abort\n the transaction if the ResourceChanged listeners raise.\n \"\"\"\n\n def tween(request):\n response = handler(request)\n for event in request.get_resource_events():\n request.registry.notify(event)\n\n return response\n\n return tween\n\n\ndef setup_transaction_hook(config):\n \"\"\"\n Resource events are plugged with the transactions of ``pyramid_tm``.\n\n Once a transaction is committed, ``AfterResourceRead`` and\n ``AfterResourceChanged`` events are sent.\n \"\"\"\n\n def _notify_resource_events_after(success, request):\n \"\"\"Notify the accumulated resource events if transaction succeeds.\n \"\"\"\n if not success: # pragma: no cover\n return\n\n for event in request.get_resource_events(after_commit=True):\n try:\n request.registry.notify(event)\n except Exception:\n logger.error(\"Unable to notify\", exc_info=True)\n\n def on_new_request(event):\n \"\"\"When a new request comes in, hook on transaction commit.\n \"\"\"\n # Since there is one transaction per batch, ignore subrequests.\n if hasattr(event.request, \"parent\"):\n return\n current = transaction.get()\n current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))\n\n config.add_subscriber(on_new_request, NewRequest)\n config.add_tween(\n \"kinto.core.events.notify_resource_events_before\", under=pyramid.tweens.EXCVIEW\n )\n\n\ndef get_resource_events(request, after_commit=False):\n \"\"\"Generator to iterate the list of events triggered on resources.\n\n The list is sorted chronologically (see OrderedDict).\n\n This drains the resource_events currently in the request, which\n allows us to process new events as they are added by current\n events. However, once the iteration is over, we merge all the\n events we've emitted into a new resource_events, which we store on\n the request so we can reprocess the same events in an after-commit\n tween.\n\n This generator must be completely consumed!\n \"\"\"\n by_resource = request.bound_data.get(\"resource_events\", EventCollector())\n afterwards = EventCollector()\n\n for event_call in by_resource.drain():\n afterwards.add_event(*event_call)\n (_, _, action, payload, impacted, request) = event_call\n\n if after_commit:\n if action == ACTIONS.READ:\n event_cls = AfterResourceRead\n else:\n event_cls = AfterResourceChanged\n else:\n if action == ACTIONS.READ:\n event_cls = ResourceRead\n else:\n event_cls = ResourceChanged\n\n yield event_cls(payload, impacted, request)\n\n request.bound_data[\"resource_events\"] = afterwards\n\n\ndef notify_resource_event(\n request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None\n):\n \"\"\"Request helper to stack a resource event.\n\n If a similar event (same resource, same action) already occured during the\n current transaction (e.g. batch) then just extend the impacted objects of\n the previous one.\n\n :param resource_name: The name of the resource on which the event\n happened (taken from the request if not provided).\n :param resource_data: Information about the resource on which the\n event is being emitted. Usually contains information about how\n to find this object in the hierarchy (for instance,\n ``bucket_id`` and ``collection_id`` for a record). Taken from\n the request matchdict if absent.\n :type resource_data: dict\n\n \"\"\"\n if action == ACTIONS.READ:\n if not isinstance(data, list):\n data = [data]\n impacted = data\n elif action == ACTIONS.CREATE:\n impacted = [{\"new\": data}]\n elif action == ACTIONS.DELETE:\n if not isinstance(data, list):\n impacted = [{\"new\": data, \"old\": old}]\n else:\n impacted = []\n for i, new in enumerate(data):\n impacted.append({\"new\": new, \"old\": old[i]})\n else: # ACTIONS.UPDATE:\n impacted = [{\"new\": data, \"old\": old}]\n\n # Get previously triggered events.\n events = request.bound_data.setdefault(\"resource_events\", EventCollector())\n\n resource_name = resource_name or request.current_resource_name\n matchdict = resource_data or dict(request.matchdict)\n\n payload = {\n \"timestamp\": timestamp,\n \"action\": action.value,\n # Deprecated: don't actually use URI (see #945).\n \"uri\": strip_uri_prefix(request.path),\n \"user_id\": request.prefixed_userid,\n \"resource_name\": resource_name,\n }\n\n # Deprecated: don't actually use `resource_name_id` either (see #945).\n if \"id\" in request.matchdict:\n matchdict[resource_name + \"_id\"] = matchdict.pop(\"id\")\n\n payload.update(**matchdict)\n\n events.add_event(resource_name, parent_id, action, payload, impacted, request)\n", "path": "kinto/core/events.py"}]}
| 3,828 | 158 |
gh_patches_debug_42181
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-2830
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MSBuild helper useEnv option causes missing include paths in some casess
In using MSBuild helper for the first time, I received a number of errors about headers that could not be found. After spending significant time validating that the project works normally when running msbuild manually, I determined that the breaking flag was `/useEnv:True`.
I briefly looked at the implementation and it was not immediately obvious to me where the problem might be. I have worked around it by using the `get_command` method from the MSBuild helper, which returns a command where the default of `useEnv` is `False`.
It's mysterious why it causes this problem, but if everything is correct, then the requested change would be to simply expose `useEnv` as a parameter in the `build` method of the `MSBuild` helper rather than forcing it to `True`.
I will try to provide a way to reproduce the error, but it's a private project and I don't know if I have time to isolate the problem any time soon. I spent to much time just identifying it.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
</issue>
<code>
[start of conans/client/build/msbuild.py]
1 import re
2
3 from conans import tools
4 from conans.client.build.visual_environment import (VisualStudioBuildEnvironment,
5 vs_build_type_flags, vs_std_cpp)
6 from conans.client.tools.oss import cpu_count
7 from conans.client.tools.win import vcvars_command
8 from conans.errors import ConanException
9 from conans.util.env_reader import get_env
10 from conans.util.files import tmp_file
11 from conans.model.conan_file import ConanFile
12
13
14 class MSBuild(object):
15
16 def __init__(self, conanfile):
17 if isinstance(conanfile, ConanFile):
18 self._conanfile = conanfile
19 self._settings = self._conanfile.settings
20 self._output = self._conanfile.output
21 self.build_env = VisualStudioBuildEnvironment(self._conanfile)
22 else: # backwards compatible with build_sln_command
23 self._settings = conanfile
24 self.build_env = None
25
26 def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
27 parallel=True, force_vcvars=False, toolset=None, platforms=None):
28 with tools.environment_append(self.build_env.vars):
29 # Path for custom properties file
30 props_file_contents = self._get_props_file_contents()
31 with tmp_file(props_file_contents) as props_file_path:
32 vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars)
33 command = self.get_command(project_file, props_file_path,
34 targets=targets, upgrade_project=upgrade_project,
35 build_type=build_type, arch=arch, parallel=parallel,
36 toolset=toolset, platforms=platforms,
37 use_env=True)
38 command = "%s && %s" % (vcvars, command)
39 return self._conanfile.run(command)
40
41 def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,
42 build_type=None, arch=None, parallel=True, toolset=None, platforms=None,
43 use_env=False):
44
45 targets = targets or []
46 command = ""
47
48 if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
49 command += "devenv %s /upgrade && " % project_file
50 else:
51 self._output.info("Skipped sln project upgrade")
52
53 build_type = build_type or self._settings.get_safe("build_type")
54 arch = arch or self._settings.get_safe("arch")
55 if not build_type:
56 raise ConanException("Cannot build_sln_command, build_type not defined")
57 if not arch:
58 raise ConanException("Cannot build_sln_command, arch not defined")
59
60
61
62 command += "msbuild %s /p:Configuration=%s" % (project_file, build_type)
63 msvc_arch = {'x86': 'x86',
64 'x86_64': 'x64',
65 'armv7': 'ARM',
66 'armv8': 'ARM64'}
67 if platforms:
68 msvc_arch.update(platforms)
69 msvc_arch = msvc_arch.get(str(arch))
70 try:
71 sln = tools.load(project_file)
72 pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)"
73 r"(.*?)EndGlobalSection", re.DOTALL)
74 solution_global = pattern.search(sln).group(1)
75 lines = solution_global.splitlines()
76 lines = [s.split("=")[0].strip() for s in lines]
77 except Exception:
78 pass
79 else:
80 config = "%s|%s" % (build_type, msvc_arch)
81 if config not in "".join(lines):
82 self._output.warn("***** The configuration %s does not exist in this solution *****" % config)
83 self._output.warn("Use 'platforms' argument to define your architectures")
84
85 if use_env:
86 command += ' /p:UseEnv=true'
87
88 if msvc_arch:
89 command += ' /p:Platform="%s"' % msvc_arch
90
91 if parallel:
92 command += ' /m:%s' % cpu_count()
93
94 if targets:
95 command += " /target:%s" % ";".join(targets)
96
97 if toolset:
98 command += " /p:PlatformToolset=%s" % toolset
99
100 if props_file_path:
101 command += ' /p:ForceImportBeforeCppTargets="%s"' % props_file_path
102
103 return command
104
105 def _get_props_file_contents(self):
106 # how to specify runtime in command line:
107 # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project
108 runtime_library = {"MT": "MultiThreaded",
109 "MTd": "MultiThreadedDebug",
110 "MD": "MultiThreadedDLL",
111 "MDd": "MultiThreadedDebugDLL"}.get(self._settings.get_safe("compiler.runtime"), "")
112
113 if self.build_env:
114 # Take the flags from the build env, the user was able to alter them if needed
115 flags = self.build_env.flags
116 flags.append(self.build_env.std)
117 else: # To be removed when build_sln_command is deprecated
118 flags = vs_build_type_flags(self._settings)
119 flags.append(vs_std_cpp(self._settings))
120
121 template = """<?xml version="1.0" encoding="utf-8"?>
122 <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
123 <ItemDefinitionGroup>
124 <ClCompile>
125 <RuntimeLibrary>{runtime}</RuntimeLibrary>
126 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
127 </ClCompile>
128 </ItemDefinitionGroup>
129 </Project>""".format(**{"runtime": runtime_library,
130 "compiler_flags": " ".join([flag for flag in flags if flag])})
131 return template
132
[end of conans/client/build/msbuild.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py
--- a/conans/client/build/msbuild.py
+++ b/conans/client/build/msbuild.py
@@ -24,7 +24,7 @@
self.build_env = None
def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
- parallel=True, force_vcvars=False, toolset=None, platforms=None):
+ parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True):
with tools.environment_append(self.build_env.vars):
# Path for custom properties file
props_file_contents = self._get_props_file_contents()
@@ -34,7 +34,7 @@
targets=targets, upgrade_project=upgrade_project,
build_type=build_type, arch=arch, parallel=parallel,
toolset=toolset, platforms=platforms,
- use_env=True)
+ use_env=use_env)
command = "%s && %s" % (vcvars, command)
return self._conanfile.run(command)
@@ -43,10 +43,10 @@
use_env=False):
targets = targets or []
- command = ""
+ command = []
if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
- command += "devenv %s /upgrade && " % project_file
+ command.append("devenv %s /upgrade &&" % project_file)
else:
self._output.info("Skipped sln project upgrade")
@@ -57,9 +57,7 @@
if not arch:
raise ConanException("Cannot build_sln_command, arch not defined")
-
-
- command += "msbuild %s /p:Configuration=%s" % (project_file, build_type)
+ command.append("msbuild %s /p:Configuration=%s" % (project_file, build_type))
msvc_arch = {'x86': 'x86',
'x86_64': 'x64',
'armv7': 'ARM',
@@ -83,24 +81,24 @@
self._output.warn("Use 'platforms' argument to define your architectures")
if use_env:
- command += ' /p:UseEnv=true'
+ command.append('/p:UseEnv=true')
if msvc_arch:
- command += ' /p:Platform="%s"' % msvc_arch
+ command.append('/p:Platform="%s"' % msvc_arch)
if parallel:
- command += ' /m:%s' % cpu_count()
+ command.append('/m:%s' % cpu_count())
if targets:
- command += " /target:%s" % ";".join(targets)
+ command.append("/target:%s" % ";".join(targets))
if toolset:
- command += " /p:PlatformToolset=%s" % toolset
+ command.append("/p:PlatformToolset=%s" % toolset)
if props_file_path:
- command += ' /p:ForceImportBeforeCppTargets="%s"' % props_file_path
+ command.append('/p:ForceImportBeforeCppTargets="%s"' % props_file_path)
- return command
+ return " ".join(command)
def _get_props_file_contents(self):
# how to specify runtime in command line:
|
{"golden_diff": "diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py\n--- a/conans/client/build/msbuild.py\n+++ b/conans/client/build/msbuild.py\n@@ -24,7 +24,7 @@\n self.build_env = None\n \n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n- parallel=True, force_vcvars=False, toolset=None, platforms=None):\n+ parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True):\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents()\n@@ -34,7 +34,7 @@\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n- use_env=True)\n+ use_env=use_env)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n \n@@ -43,10 +43,10 @@\n use_env=False):\n \n targets = targets or []\n- command = \"\"\n+ command = []\n \n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n- command += \"devenv %s /upgrade && \" % project_file\n+ command.append(\"devenv %s /upgrade &&\" % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n \n@@ -57,9 +57,7 @@\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n \n-\n-\n- command += \"msbuild %s /p:Configuration=%s\" % (project_file, build_type)\n+ command.append(\"msbuild %s /p:Configuration=%s\" % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n@@ -83,24 +81,24 @@\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n \n if use_env:\n- command += ' /p:UseEnv=true'\n+ command.append('/p:UseEnv=true')\n \n if msvc_arch:\n- command += ' /p:Platform=\"%s\"' % msvc_arch\n+ command.append('/p:Platform=\"%s\"' % msvc_arch)\n \n if parallel:\n- command += ' /m:%s' % cpu_count()\n+ command.append('/m:%s' % cpu_count())\n \n if targets:\n- command += \" /target:%s\" % \";\".join(targets)\n+ command.append(\"/target:%s\" % \";\".join(targets))\n \n if toolset:\n- command += \" /p:PlatformToolset=%s\" % toolset\n+ command.append(\"/p:PlatformToolset=%s\" % toolset)\n \n if props_file_path:\n- command += ' /p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path\n+ command.append('/p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path)\n \n- return command\n+ return \" \".join(command)\n \n def _get_props_file_contents(self):\n # how to specify runtime in command line:\n", "issue": "MSBuild helper useEnv option causes missing include paths in some casess\nIn using MSBuild helper for the first time, I received a number of errors about headers that could not be found. After spending significant time validating that the project works normally when running msbuild manually, I determined that the breaking flag was `/useEnv:True`. \r\n\r\nI briefly looked at the implementation and it was not immediately obvious to me where the problem might be. I have worked around it by using the `get_command` method from the MSBuild helper, which returns a command where the default of `useEnv` is `False`. \r\n\r\nIt's mysterious why it causes this problem, but if everything is correct, then the requested change would be to simply expose `useEnv` as a parameter in the `build` method of the `MSBuild` helper rather than forcing it to `True`.\r\n\r\nI will try to provide a way to reproduce the error, but it's a private project and I don't know if I have time to isolate the problem any time soon. I spent to much time just identifying it. \r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import re\n\nfrom conans import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import tmp_file\nfrom conans.model.conan_file import ConanFile\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None):\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents()\n with tmp_file(props_file_contents) as props_file_path:\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars)\n command = self.get_command(project_file, props_file_path,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=True)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False):\n\n targets = targets or []\n command = \"\"\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command += \"devenv %s /upgrade && \" % project_file\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n\n\n command += \"msbuild %s /p:Configuration=%s\" % (project_file, build_type)\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\" % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if use_env:\n command += ' /p:UseEnv=true'\n\n if msvc_arch:\n command += ' /p:Platform=\"%s\"' % msvc_arch\n\n if parallel:\n command += ' /m:%s' % cpu_count()\n\n if targets:\n command += \" /target:%s\" % \";\".join(targets)\n\n if toolset:\n command += \" /p:PlatformToolset=%s\" % toolset\n\n if props_file_path:\n command += ' /p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path\n\n return command\n\n def _get_props_file_contents(self):\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = self.build_env.flags\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings)\n flags.append(vs_std_cpp(self._settings))\n\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n <RuntimeLibrary>{runtime}</RuntimeLibrary>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n </ItemDefinitionGroup>\n </Project>\"\"\".format(**{\"runtime\": runtime_library,\n \"compiler_flags\": \" \".join([flag for flag in flags if flag])})\n return template\n", "path": "conans/client/build/msbuild.py"}]}
| 2,395 | 763 |
gh_patches_debug_5620
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-551
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flask instrumentation example doesn't work
**Describe your environment**
Running on MacOS against the latest `mater` version of the `opentelemetry-*` packages.
**Steps to reproduce**
Instrument the app as described in the [docs](https://open-telemetry.github.io/opentelemetry-python/ext/flask/flask.html) and [docstring](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py) -
```python
from flask import Flask
from opentelemetry.ext.flask import instrument_app
app = Flask(__name__)
instrument_app(app)
````
**What is the expected behavior?**
I would expect the flask app to start without any errors when running it locally.
**What is the actual behavior?**
I get an error running the app -
```
from opentelemetry.ext.flask import instrument_app
E ImportError: cannot import name 'instrument_app'
```
This is consistent with the fact that the `instrument_app` symbol no longer appears to be in [`opentelemetry.ext.flask`](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py).
<hr>
The last time I ran this code was against v0.3 of the packages, so I imagine a lot has changed since then. It would be helpful to document the new correct way to instrument flask apps as it isn't currently clear to me what the correct way to do this is from looking at the docs and the code. Thanks!
</issue>
<code>
[start of ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Note: This package is not named "flask" because of
16 # https://github.com/PyCQA/pylint/issues/2648
17
18 """
19 This library builds on the OpenTelemetry WSGI middleware to track web requests
20 in Flask applications. In addition to opentelemetry-ext-wsgi, it supports
21 flask-specific features such as:
22
23 * The Flask endpoint name is used as the Span name.
24 * The ``http.route`` Span attribute is set so that one can see which URL rule
25 matched a request.
26
27 Usage
28 -----
29
30 .. code-block:: python
31
32 from flask import Flask
33 from opentelemetry.ext.flask import instrument_app
34
35 app = Flask(__name__)
36 instrument_app(app) # This is where the magic happens. ✨
37
38 @app.route("/")
39 def hello():
40 return "Hello!"
41
42 if __name__ == "__main__":
43 app.run(debug=True)
44
45 API
46 ---
47 """
48
49 import logging
50
51 import flask
52
53 import opentelemetry.ext.wsgi as otel_wsgi
54 from opentelemetry import context, propagators, trace
55 from opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor
56 from opentelemetry.ext.flask.version import __version__
57 from opentelemetry.util import time_ns
58
59 logger = logging.getLogger(__name__)
60
61 _ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
62 _ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
63 _ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
64 _ENVIRON_TOKEN = "opentelemetry-flask.token"
65
66
67 class _InstrumentedFlask(flask.Flask):
68 def __init__(self, *args, **kwargs):
69
70 super().__init__(*args, **kwargs)
71
72 # Single use variable here to avoid recursion issues.
73 wsgi = self.wsgi_app
74
75 def wrapped_app(environ, start_response):
76 # We want to measure the time for route matching, etc.
77 # In theory, we could start the span here and use
78 # update_name later but that API is "highly discouraged" so
79 # we better avoid it.
80 environ[_ENVIRON_STARTTIME_KEY] = time_ns()
81
82 def _start_response(status, response_headers, *args, **kwargs):
83 span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
84 if span:
85 otel_wsgi.add_response_attributes(
86 span, status, response_headers
87 )
88 else:
89 logger.warning(
90 "Flask environ's OpenTelemetry span "
91 "missing at _start_response(%s)",
92 status,
93 )
94
95 return start_response(
96 status, response_headers, *args, **kwargs
97 )
98
99 return wsgi(environ, _start_response)
100
101 self.wsgi_app = wrapped_app
102
103 @self.before_request
104 def _before_flask_request():
105 environ = flask.request.environ
106 span_name = (
107 flask.request.endpoint
108 or otel_wsgi.get_default_span_name(environ)
109 )
110 token = context.attach(
111 propagators.extract(otel_wsgi.get_header_from_environ, environ)
112 )
113
114 tracer = trace.get_tracer(__name__, __version__)
115
116 attributes = otel_wsgi.collect_request_attributes(environ)
117 if flask.request.url_rule:
118 # For 404 that result from no route found, etc, we
119 # don't have a url_rule.
120 attributes["http.route"] = flask.request.url_rule.rule
121 span = tracer.start_span(
122 span_name,
123 kind=trace.SpanKind.SERVER,
124 attributes=attributes,
125 start_time=environ.get(_ENVIRON_STARTTIME_KEY),
126 )
127 activation = tracer.use_span(span, end_on_exit=True)
128 activation.__enter__()
129 environ[_ENVIRON_ACTIVATION_KEY] = activation
130 environ[_ENVIRON_SPAN_KEY] = span
131 environ[_ENVIRON_TOKEN] = token
132
133 @self.teardown_request
134 def _teardown_flask_request(exc):
135 activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
136 if not activation:
137 logger.warning(
138 "Flask environ's OpenTelemetry activation missing"
139 "at _teardown_flask_request(%s)",
140 exc,
141 )
142 return
143
144 if exc is None:
145 activation.__exit__(None, None, None)
146 else:
147 activation.__exit__(
148 type(exc), exc, getattr(exc, "__traceback__", None)
149 )
150 context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
151
152
153 class FlaskInstrumentor(BaseInstrumentor):
154 """A instrumentor for flask.Flask
155
156 See `BaseInstrumentor`
157 """
158
159 def __init__(self):
160 super().__init__()
161 self._original_flask = None
162
163 def _instrument(self):
164 self._original_flask = flask.Flask
165 flask.Flask = _InstrumentedFlask
166
167 def _uninstrument(self):
168 flask.Flask = self._original_flask
169
[end of ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
@@ -29,11 +29,11 @@
.. code-block:: python
+ from opentelemetry.ext.flask import FlaskInstrumentor
+ FlaskInstrumentor().instrument() # This needs to be executed before importing Flask
from flask import Flask
- from opentelemetry.ext.flask import instrument_app
app = Flask(__name__)
- instrument_app(app) # This is where the magic happens. ✨
@app.route("/")
def hello():
|
{"golden_diff": "diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n@@ -29,11 +29,11 @@\n \n .. code-block:: python\n \n+ from opentelemetry.ext.flask import FlaskInstrumentor\n+ FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n- from opentelemetry.ext.flask import instrument_app\n \n app = Flask(__name__)\n- instrument_app(app) # This is where the magic happens. \u2728\n \n @app.route(\"/\")\n def hello():\n", "issue": "Flask instrumentation example doesn't work\n**Describe your environment**\r\nRunning on MacOS against the latest `mater` version of the `opentelemetry-*` packages.\r\n\r\n**Steps to reproduce**\r\nInstrument the app as described in the [docs](https://open-telemetry.github.io/opentelemetry-python/ext/flask/flask.html) and [docstring](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py) -\r\n\r\n```python\r\nfrom flask import Flask\r\nfrom opentelemetry.ext.flask import instrument_app\r\n \r\napp = Flask(__name__)\r\ninstrument_app(app)\r\n````\r\n\r\n**What is the expected behavior?**\r\nI would expect the flask app to start without any errors when running it locally.\r\n\r\n**What is the actual behavior?**\r\nI get an error running the app -\r\n\r\n```\r\n from opentelemetry.ext.flask import instrument_app\r\nE ImportError: cannot import name 'instrument_app'\r\n```\r\n\r\nThis is consistent with the fact that the `instrument_app` symbol no longer appears to be in [`opentelemetry.ext.flask`](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py).\r\n\r\n<hr>\r\nThe last time I ran this code was against v0.3 of the packages, so I imagine a lot has changed since then. It would be helpful to document the new correct way to instrument flask apps as it isn't currently clear to me what the correct way to do this is from looking at the docs and the code. Thanks! \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.ext.flask import instrument_app\n\n app = Flask(__name__)\n instrument_app(app) # This is where the magic happens. \u2728\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}]}
| 2,502 | 192 |
gh_patches_debug_59676
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-90
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'Is reporter a Mozillian' as a feature
The first implementation will simply check if an email contains "@mozilla.com" or "@mozilla.org".
</issue>
<code>
[start of bugbug/bug_features.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import re
7
8 import pandas as pd
9 from sklearn.base import BaseEstimator
10 from sklearn.base import TransformerMixin
11
12 from bugbug import bug_snapshot
13
14
15 def field(bug, field):
16 if field in bug and bug[field] != '---':
17 return bug[field]
18
19 return None
20
21
22 class has_str(object):
23 def __call__(self, bug):
24 return field(bug, 'cf_has_str')
25
26
27 class has_regression_range(object):
28 def __call__(self, bug):
29 return field(bug, 'cf_has_regression_range')
30
31
32 class has_crash_signature(object):
33 def __call__(self, bug):
34 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''
35
36
37 class keywords(object):
38 def __init__(self, to_ignore=set()):
39 self.to_ignore = to_ignore
40
41 def __call__(self, bug):
42 keywords = []
43 subkeywords = []
44 for keyword in bug['keywords']:
45 if keyword in self.to_ignore:
46 continue
47
48 keywords.append(keyword)
49
50 if keyword.startswith('sec-'):
51 subkeywords.append('sec-')
52 elif keyword.startswith('csectype-'):
53 subkeywords.append('csectype-')
54 return keywords + subkeywords
55
56
57 class severity(object):
58 def __call__(self, bug):
59 return field(bug, 'severity')
60
61
62 class is_coverity_issue(object):
63 def __call__(self, bug):
64 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None
65
66
67 class has_url(object):
68 def __call__(self, bug):
69 return bug['url'] != ''
70
71
72 class has_w3c_url(object):
73 def __call__(self, bug):
74 return 'w3c' in bug['url']
75
76
77 class has_github_url(object):
78 def __call__(self, bug):
79 return 'github' in bug['url']
80
81
82 class whiteboard(object):
83 def __call__(self, bug):
84 ret = []
85
86 # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only
87
88 for elem in ['memshrink', '[ux]']:
89 if elem in bug['whiteboard'].lower():
90 ret.append(elem)
91
92 return ret
93
94
95 class patches(object):
96 def __call__(self, bug):
97 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])
98
99
100 class landings(object):
101 def __call__(self, bug):
102 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])
103
104
105 class title(object):
106 def __call__(self, bug):
107 ret = []
108
109 keywords = [
110 'fail',
111 ]
112 for keyword in keywords:
113 if keyword in bug['summary'].lower():
114 ret.append(keyword)
115
116 return ret
117
118
119 class product(object):
120 def __call__(self, bug):
121 return bug['product']
122
123
124 class component(object):
125 def __call__(self, bug):
126 return bug['component']
127
128
129 def cleanup_url(text):
130 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
131 return re.sub(r'http\S+', '__URL__', text)
132
133
134 def cleanup_fileref(text):
135 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text)
136
137
138 def cleanup_responses(text):
139 return re.sub('>[^\n]+', ' ', text)
140
141
142 def cleanup_hex(text):
143 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text)
144
145
146 def cleanup_dll(text):
147 return re.sub(r'\w+\.dll\b', '__DLL_NAME__', text)
148
149
150 def cleanup_synonyms(text):
151 synonyms = [
152 ('safemode', ['safemode', 'safe mode']),
153 ('str', ['str', 'steps to reproduce', 'repro steps']),
154 ('uaf', ['uaf', 'use after free', 'use-after-free']),
155 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),
156 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),
157 ]
158
159 for synonym_group, synonym_list in synonyms:
160 text = re.sub('|'.join(synonym_list), synonym_group, text, flags=re.IGNORECASE)
161
162 return text
163
164
165 def cleanup_crash(text):
166 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text)
167
168
169 class BugExtractor(BaseEstimator, TransformerMixin):
170 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_messages_map=None):
171 self.feature_extractors = feature_extractors
172 self.cleanup_functions = cleanup_functions
173 self.rollback = rollback
174 self.rollback_when = rollback_when
175 self.commit_messages_map = commit_messages_map
176
177 def fit(self, x, y=None):
178 return self
179
180 def transform(self, bugs):
181 results = []
182
183 for bug in bugs:
184 bug_id = bug['id']
185
186 if self.rollback:
187 bug = bug_snapshot.rollback(bug, self.rollback_when)
188
189 data = {}
190
191 for f in self.feature_extractors:
192 res = f(bug)
193
194 if res is None:
195 continue
196
197 if isinstance(res, list):
198 for item in res:
199 data[f.__class__.__name__ + '-' + item] = 'True'
200 continue
201
202 if isinstance(res, bool):
203 res = str(res)
204
205 data[f.__class__.__name__] = res
206
207 # TODO: Try simply using all possible fields instead of extracting features manually.
208
209 for cleanup_function in self.cleanup_functions:
210 bug['summary'] = cleanup_function(bug['summary'])
211 for c in bug['comments']:
212 c['text'] = cleanup_function(c['text'])
213
214 result = {
215 'data': data,
216 'title': bug['summary'],
217 'first_comment': bug['comments'][0]['text'],
218 'comments': ' '.join([c['text'] for c in bug['comments']]),
219 }
220
221 if self.commit_messages_map is not None:
222 result['commits'] = self.commit_messages_map[bug_id] if bug_id in self.commit_messages_map else ''
223
224 results.append(result)
225
226 return pd.DataFrame(results)
227
[end of bugbug/bug_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py
--- a/bugbug/bug_features.py
+++ b/bugbug/bug_features.py
@@ -126,6 +126,11 @@
return bug['component']
+class is_mozillian(object):
+ def __call__(self, bug):
+ return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])
+
+
def cleanup_url(text):
text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
return re.sub(r'http\S+', '__URL__', text)
|
{"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -126,6 +126,11 @@\n return bug['component']\n \n \n+class is_mozillian(object):\n+ def __call__(self, bug):\n+ return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n+\n+\n def cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n", "issue": "'Is reporter a Mozillian' as a feature\nThe first implementation will simply check if an email contains \"@mozilla.com\" or \"@mozilla.org\".\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n ret = []\n\n # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n\n for elem in ['memshrink', '[ux]']:\n if elem in bug['whiteboard'].lower():\n ret.append(elem)\n\n return ret\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+\\.dll\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_messages_map=None):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_messages_map = commit_messages_map\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n if self.commit_messages_map is not None:\n result['commits'] = self.commit_messages_map[bug_id] if bug_id in self.commit_messages_map else ''\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]}
| 2,765 | 158 |
gh_patches_debug_27214
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove contributor notification post account creation
Once a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.

Because of the changes to account roles, these no longer makes sense and should be removed.
</issue>
<code>
[start of kuma/users/signal_handlers.py]
1 from allauth.account.signals import email_confirmed, user_signed_up
2 from allauth.socialaccount.signals import social_account_removed
3 from django.contrib import messages
4 from django.core.exceptions import ObjectDoesNotExist
5 from django.db import transaction
6 from django.db.models.signals import post_delete, post_save, pre_delete
7 from django.dispatch import receiver
8 from django.utils.translation import ugettext_lazy as _
9 from waffle import switch_is_active
10
11 from kuma.core.urlresolvers import reverse
12 from kuma.payments.utils import cancel_stripe_customer_subscription
13 from kuma.wiki.jobs import DocumentContributorsJob
14
15 from .models import User, UserBan
16 from .tasks import send_welcome_email
17
18
19 @receiver(user_signed_up, dispatch_uid='users.user_signed_up')
20 def on_user_signed_up(sender, request, user, **kwargs):
21 """
22 Signal handler to be called when a given user has signed up.
23 """
24 url = reverse('wiki.document', args=['MDN/Getting_started'])
25 msg = _('You have completed the first step of '
26 '<a href="%s">getting started with MDN</a>') % url
27 messages.success(request, msg)
28 if switch_is_active('welcome_email'):
29 # only send if the user has already verified
30 # at least one email address
31 if user.emailaddress_set.filter(verified=True).exists():
32 transaction.on_commit(
33 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
34 )
35
36
37 @receiver(email_confirmed, dispatch_uid='users.email_confirmed')
38 def on_email_confirmed(sender, request, email_address, **kwargs):
39 """
40 Signal handler to be called when a given email address was confirmed
41 by a user.
42 """
43 if switch_is_active('welcome_email'):
44 # only send if the user has exactly one verified (the given)
45 # email address, in other words if it was just confirmed
46 user = email_address.user
47 previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)
48 if not previous_emails.exists():
49 transaction.on_commit(
50 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
51 )
52
53
54 @receiver(social_account_removed, dispatch_uid='users.social_account_removed')
55 def on_social_account_removed(sender, request, socialaccount, **kwargs):
56 """
57 Invoked just after a user successfully removed a social account
58
59 We use it to reset the name of the socialaccount provider in
60 the user's session to one that he also has.
61 """
62 user = socialaccount.user
63 try:
64 all_socialaccounts = user.socialaccount_set.all()
65 next_socialaccount = all_socialaccounts[0]
66 request.session['sociallogin_provider'] = next_socialaccount.provider
67 request.session.modified = True
68 except (ObjectDoesNotExist, IndexError):
69 pass
70
71
72 @receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')
73 def on_ban_save(sender, instance, **kwargs):
74 """
75 Signal handler to be called when a given user ban is saved.
76 """
77 user = instance.user
78 user.is_active = not instance.is_active
79 user.save()
80 invalidate_document_contribution(user)
81
82
83 @receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')
84 def on_ban_delete(sender, instance, **kwargs):
85 """
86 Signal handler to be called when a user ban is deleted.
87 """
88 user = instance.user
89 user.is_active = True
90 user.save()
91 invalidate_document_contribution(user)
92
93
94 def invalidate_document_contribution(user):
95 """
96 Invalidate the contributor list for Documents the user has edited.
97
98 This will remove them if they have been banned, and add them if they
99 have been unbanned.
100 """
101 revisions = user.created_revisions
102 doc_ids = set(revisions.values_list('document_id', flat=True))
103 job = DocumentContributorsJob()
104 for doc_id in doc_ids:
105 job.invalidate(doc_id)
106
107
108 @receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')
109 def unsubscribe_payments_on_user_delete(sender, instance, **kwargs):
110 """Cancel Stripe subscriptions before deleting User."""
111 user = instance
112 if user.stripe_customer_id:
113 # This may raise an exception if the Stripe API call fails.
114 # This will stop User deletion while an admin investigates.
115 cancel_stripe_customer_subscription(user.stripe_customer_id)
116
[end of kuma/users/signal_handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py
--- a/kuma/users/signal_handlers.py
+++ b/kuma/users/signal_handlers.py
@@ -1,14 +1,11 @@
from allauth.account.signals import email_confirmed, user_signed_up
from allauth.socialaccount.signals import social_account_removed
-from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models.signals import post_delete, post_save, pre_delete
from django.dispatch import receiver
-from django.utils.translation import ugettext_lazy as _
from waffle import switch_is_active
-from kuma.core.urlresolvers import reverse
from kuma.payments.utils import cancel_stripe_customer_subscription
from kuma.wiki.jobs import DocumentContributorsJob
@@ -21,10 +18,6 @@
"""
Signal handler to be called when a given user has signed up.
"""
- url = reverse('wiki.document', args=['MDN/Getting_started'])
- msg = _('You have completed the first step of '
- '<a href="%s">getting started with MDN</a>') % url
- messages.success(request, msg)
if switch_is_active('welcome_email'):
# only send if the user has already verified
# at least one email address
|
{"golden_diff": "diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py\n--- a/kuma/users/signal_handlers.py\n+++ b/kuma/users/signal_handlers.py\n@@ -1,14 +1,11 @@\n from allauth.account.signals import email_confirmed, user_signed_up\n from allauth.socialaccount.signals import social_account_removed\n-from django.contrib import messages\n from django.core.exceptions import ObjectDoesNotExist\n from django.db import transaction\n from django.db.models.signals import post_delete, post_save, pre_delete\n from django.dispatch import receiver\n-from django.utils.translation import ugettext_lazy as _\n from waffle import switch_is_active\n \n-from kuma.core.urlresolvers import reverse\n from kuma.payments.utils import cancel_stripe_customer_subscription\n from kuma.wiki.jobs import DocumentContributorsJob\n \n@@ -21,10 +18,6 @@\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n- url = reverse('wiki.document', args=['MDN/Getting_started'])\n- msg = _('You have completed the first step of '\n- '<a href=\"%s\">getting started with MDN</a>') % url\n- messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n", "issue": "Remove contributor notification post account creation\nOnce a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.\r\n\r\n\r\n\r\n\r\nBecause of the changes to account roles, these no longer makes sense and should be removed.\n", "before_files": [{"content": "from allauth.account.signals import email_confirmed, user_signed_up\nfrom allauth.socialaccount.signals import social_account_removed\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.signals import post_delete, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom waffle import switch_is_active\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.payments.utils import cancel_stripe_customer_subscription\nfrom kuma.wiki.jobs import DocumentContributorsJob\n\nfrom .models import User, UserBan\nfrom .tasks import send_welcome_email\n\n\n@receiver(user_signed_up, dispatch_uid='users.user_signed_up')\ndef on_user_signed_up(sender, request, user, **kwargs):\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n url = reverse('wiki.document', args=['MDN/Getting_started'])\n msg = _('You have completed the first step of '\n '<a href=\"%s\">getting started with MDN</a>') % url\n messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n if user.emailaddress_set.filter(verified=True).exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(email_confirmed, dispatch_uid='users.email_confirmed')\ndef on_email_confirmed(sender, request, email_address, **kwargs):\n \"\"\"\n Signal handler to be called when a given email address was confirmed\n by a user.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has exactly one verified (the given)\n # email address, in other words if it was just confirmed\n user = email_address.user\n previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)\n if not previous_emails.exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(social_account_removed, dispatch_uid='users.social_account_removed')\ndef on_social_account_removed(sender, request, socialaccount, **kwargs):\n \"\"\"\n Invoked just after a user successfully removed a social account\n\n We use it to reset the name of the socialaccount provider in\n the user's session to one that he also has.\n \"\"\"\n user = socialaccount.user\n try:\n all_socialaccounts = user.socialaccount_set.all()\n next_socialaccount = all_socialaccounts[0]\n request.session['sociallogin_provider'] = next_socialaccount.provider\n request.session.modified = True\n except (ObjectDoesNotExist, IndexError):\n pass\n\n\n@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')\ndef on_ban_save(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a given user ban is saved.\n \"\"\"\n user = instance.user\n user.is_active = not instance.is_active\n user.save()\n invalidate_document_contribution(user)\n\n\n@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')\ndef on_ban_delete(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a user ban is deleted.\n \"\"\"\n user = instance.user\n user.is_active = True\n user.save()\n invalidate_document_contribution(user)\n\n\ndef invalidate_document_contribution(user):\n \"\"\"\n Invalidate the contributor list for Documents the user has edited.\n\n This will remove them if they have been banned, and add them if they\n have been unbanned.\n \"\"\"\n revisions = user.created_revisions\n doc_ids = set(revisions.values_list('document_id', flat=True))\n job = DocumentContributorsJob()\n for doc_id in doc_ids:\n job.invalidate(doc_id)\n\n\n@receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')\ndef unsubscribe_payments_on_user_delete(sender, instance, **kwargs):\n \"\"\"Cancel Stripe subscriptions before deleting User.\"\"\"\n user = instance\n if user.stripe_customer_id:\n # This may raise an exception if the Stripe API call fails.\n # This will stop User deletion while an admin investigates.\n cancel_stripe_customer_subscription(user.stripe_customer_id)\n", "path": "kuma/users/signal_handlers.py"}]}
| 1,855 | 288 |
gh_patches_debug_26405
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-4156
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
abbrevs should only be expanded at the start of the command
<!--- Provide a general summary of the issue in the Title above -->
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<details>
```
+------------------+---------------------+
| xonsh | 0.9.18 |
| Git SHA | 26475146 |
| Commit Date | May 5 06:42:28 2020 |
| Python | 3.8.3 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.5 |
| shell type | prompt_toolkit |
| pygments | 2.6.1 |
| on posix | True |
| on linux | True |
| distro | manjaro |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
+------------------+---------------------+
```
</details>
<!--- Tell us what should happen -->
as of abbrevs expands in all places. So it may clash will subcommands. For example, if we have
~~`aliases['ps'] = 'procs'`~~
```
abbrevs['ps'] = 'procs'
```
and do type `docker ps` then it will get expanded to `docker procs`.
So the default behaviour should be to expand only at the start of the command. special handling for sudo can be added.
Also we could have special symbols in aliases to mark it as expandable in all places like `aliases['*ps*']` like the glob pattern.
</issue>
<code>
[start of xontrib/abbrevs.py]
1 """
2 Command abbreviations.
3
4 This expands input words from `abbrevs` disctionary as you type.
5 Adds ``abbrevs`` dictionary to hold user-defined "command abbreviations.
6 The dictionary is searched as you type the matching words are replaced
7 at the command line by the corresponding dictionary contents once you hit
8 'Space' or 'Return' key.
9
10 For instance a frequently used command such as ``git status`` can be abbreviated to ``gst`` as follows::
11
12 $ xontrib load abbrevs
13 $ abbrevs['gst'] = 'git status'
14 $ gst # Once you hit <space> or <return> 'gst' gets expanded to 'git status'.
15
16 one can set a callback function that receives current buffer and word to customize the expanded word based on context
17
18 .. code-block:: python
19
20 $ abbrevs['ps'] = lambda buffer, word: "procs" if buffer.text.startswith(word) else word
21
22
23 It is also possible to set the cursor position after expansion with,
24
25 $ abbrevs['gp'] = "git push <edit> --force"
26 """
27
28 import builtins
29 import typing as tp
30
31 from prompt_toolkit.filters import completion_is_selected, IsMultiline
32 from prompt_toolkit.keys import Keys
33 from xonsh.built_ins import DynamicAccessProxy
34 from xonsh.events import events
35 from xonsh.tools import check_for_partial_string
36
37 __all__ = ()
38
39 builtins.__xonsh__.abbrevs = dict()
40 proxy = DynamicAccessProxy("abbrevs", "__xonsh__.abbrevs")
41 setattr(builtins, "abbrevs", proxy)
42
43
44 class _LastExpanded(tp.NamedTuple):
45 word: str
46 expanded: str
47
48
49 last_expanded: tp.Optional[_LastExpanded] = None
50 EDIT_SYMBOL = "<edit>"
51
52
53 def get_abbreviated(key: str, buffer) -> str:
54 abbrevs = getattr(builtins, "abbrevs", None)
55 abbr = abbrevs[key]
56 if callable(abbr):
57 text = abbr(buffer=buffer, word=key)
58 else:
59 text = abbr
60 return text
61
62
63 def expand_abbrev(buffer) -> bool:
64 """expand the given abbr text. Return true if cursor position changed."""
65 global last_expanded
66 last_expanded = None
67 abbrevs = getattr(builtins, "abbrevs", None)
68 if abbrevs is None:
69 return False
70 document = buffer.document
71 word = document.get_word_before_cursor(WORD=True)
72 if word in abbrevs.keys():
73 partial = document.text[: document.cursor_position]
74 startix, endix, quote = check_for_partial_string(partial)
75 if startix is not None and endix is None:
76 return False
77 buffer.delete_before_cursor(count=len(word))
78 text = get_abbreviated(word, buffer)
79 buffer.insert_text(text)
80 last_expanded = _LastExpanded(word, text)
81 if EDIT_SYMBOL in text:
82 set_cursor_position(buffer, text)
83 return True
84 return False
85
86
87 def revert_abbrev(buffer) -> bool:
88 global last_expanded
89 if last_expanded is None:
90 return False
91 document = buffer.document
92 expansion = last_expanded.expanded + " "
93 if not document.text_before_cursor.endswith(expansion):
94 return False
95 buffer.delete_before_cursor(count=len(expansion))
96 buffer.insert_text(last_expanded.word)
97 last_expanded = None
98 return True
99
100
101 def set_cursor_position(buffer, expanded: str) -> None:
102 pos = expanded.rfind(EDIT_SYMBOL)
103 if pos == -1:
104 return
105 buffer.cursor_position = buffer.cursor_position - (len(expanded) - pos)
106 buffer.delete(len(EDIT_SYMBOL))
107
108
109 @events.on_ptk_create
110 def custom_keybindings(bindings, **kw):
111
112 from xonsh.ptk_shell.key_bindings import carriage_return
113 from prompt_toolkit.filters import EmacsInsertMode, ViInsertMode
114
115 handler = bindings.add
116 insert_mode = ViInsertMode() | EmacsInsertMode()
117
118 @handler(" ", filter=IsMultiline() & insert_mode)
119 def handle_space(event):
120 buffer = event.app.current_buffer
121
122 add_space = True
123 if not revert_abbrev(buffer):
124 position_changed = expand_abbrev(buffer)
125 if position_changed:
126 add_space = False
127 if add_space:
128 buffer.insert_text(" ")
129
130 @handler(
131 Keys.ControlJ, filter=IsMultiline() & insert_mode & ~completion_is_selected
132 )
133 @handler(
134 Keys.ControlM, filter=IsMultiline() & insert_mode & ~completion_is_selected
135 )
136 def multiline_carriage_return(event):
137 buffer = event.app.current_buffer
138 current_char = buffer.document.current_char
139 if not current_char or current_char.isspace():
140 expand_abbrev(buffer)
141 carriage_return(buffer, event.cli)
142
[end of xontrib/abbrevs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/xontrib/abbrevs.py b/xontrib/abbrevs.py
--- a/xontrib/abbrevs.py
+++ b/xontrib/abbrevs.py
@@ -28,6 +28,7 @@
import builtins
import typing as tp
+from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import completion_is_selected, IsMultiline
from prompt_toolkit.keys import Keys
from xonsh.built_ins import DynamicAccessProxy
@@ -60,7 +61,7 @@
return text
-def expand_abbrev(buffer) -> bool:
+def expand_abbrev(buffer: Buffer) -> bool:
"""expand the given abbr text. Return true if cursor position changed."""
global last_expanded
last_expanded = None
@@ -74,9 +75,11 @@
startix, endix, quote = check_for_partial_string(partial)
if startix is not None and endix is None:
return False
- buffer.delete_before_cursor(count=len(word))
text = get_abbreviated(word, buffer)
+
+ buffer.delete_before_cursor(count=len(word))
buffer.insert_text(text)
+
last_expanded = _LastExpanded(word, text)
if EDIT_SYMBOL in text:
set_cursor_position(buffer, text)
|
{"golden_diff": "diff --git a/xontrib/abbrevs.py b/xontrib/abbrevs.py\n--- a/xontrib/abbrevs.py\n+++ b/xontrib/abbrevs.py\n@@ -28,6 +28,7 @@\n import builtins\n import typing as tp\n \n+from prompt_toolkit.buffer import Buffer\n from prompt_toolkit.filters import completion_is_selected, IsMultiline\n from prompt_toolkit.keys import Keys\n from xonsh.built_ins import DynamicAccessProxy\n@@ -60,7 +61,7 @@\n return text\n \n \n-def expand_abbrev(buffer) -> bool:\n+def expand_abbrev(buffer: Buffer) -> bool:\n \"\"\"expand the given abbr text. Return true if cursor position changed.\"\"\"\n global last_expanded\n last_expanded = None\n@@ -74,9 +75,11 @@\n startix, endix, quote = check_for_partial_string(partial)\n if startix is not None and endix is None:\n return False\n- buffer.delete_before_cursor(count=len(word))\n text = get_abbreviated(word, buffer)\n+\n+ buffer.delete_before_cursor(count=len(word))\n buffer.insert_text(text)\n+\n last_expanded = _LastExpanded(word, text)\n if EDIT_SYMBOL in text:\n set_cursor_position(buffer, text)\n", "issue": "abbrevs should only be expanded at the start of the command\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n+------------------+---------------------+\r\n| xonsh | 0.9.18 |\r\n| Git SHA | 26475146 |\r\n| Commit Date | May 5 06:42:28 2020 |\r\n| Python | 3.8.3 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.5 |\r\n| shell type | prompt_toolkit |\r\n| pygments | 2.6.1 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | manjaro |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n+------------------+---------------------+\r\n```\r\n\r\n</details>\r\n\r\n<!--- Tell us what should happen -->\r\nas of abbrevs expands in all places. So it may clash will subcommands. For example, if we have \r\n\r\n~~`aliases['ps'] = 'procs'`~~\r\n```\r\nabbrevs['ps'] = 'procs'\r\n``` \r\n\r\nand do type `docker ps` then it will get expanded to `docker procs`. \r\n\r\nSo the default behaviour should be to expand only at the start of the command. special handling for sudo can be added. \r\nAlso we could have special symbols in aliases to mark it as expandable in all places like `aliases['*ps*']` like the glob pattern. \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCommand abbreviations.\n\nThis expands input words from `abbrevs` disctionary as you type.\nAdds ``abbrevs`` dictionary to hold user-defined \"command abbreviations.\nThe dictionary is searched as you type the matching words are replaced\nat the command line by the corresponding dictionary contents once you hit\n'Space' or 'Return' key.\n\nFor instance a frequently used command such as ``git status`` can be abbreviated to ``gst`` as follows::\n\n $ xontrib load abbrevs\n $ abbrevs['gst'] = 'git status'\n $ gst # Once you hit <space> or <return> 'gst' gets expanded to 'git status'.\n\none can set a callback function that receives current buffer and word to customize the expanded word based on context\n\n.. code-block:: python\n\n $ abbrevs['ps'] = lambda buffer, word: \"procs\" if buffer.text.startswith(word) else word\n\n\nIt is also possible to set the cursor position after expansion with,\n\n $ abbrevs['gp'] = \"git push <edit> --force\"\n\"\"\"\n\nimport builtins\nimport typing as tp\n\nfrom prompt_toolkit.filters import completion_is_selected, IsMultiline\nfrom prompt_toolkit.keys import Keys\nfrom xonsh.built_ins import DynamicAccessProxy\nfrom xonsh.events import events\nfrom xonsh.tools import check_for_partial_string\n\n__all__ = ()\n\nbuiltins.__xonsh__.abbrevs = dict()\nproxy = DynamicAccessProxy(\"abbrevs\", \"__xonsh__.abbrevs\")\nsetattr(builtins, \"abbrevs\", proxy)\n\n\nclass _LastExpanded(tp.NamedTuple):\n word: str\n expanded: str\n\n\nlast_expanded: tp.Optional[_LastExpanded] = None\nEDIT_SYMBOL = \"<edit>\"\n\n\ndef get_abbreviated(key: str, buffer) -> str:\n abbrevs = getattr(builtins, \"abbrevs\", None)\n abbr = abbrevs[key]\n if callable(abbr):\n text = abbr(buffer=buffer, word=key)\n else:\n text = abbr\n return text\n\n\ndef expand_abbrev(buffer) -> bool:\n \"\"\"expand the given abbr text. Return true if cursor position changed.\"\"\"\n global last_expanded\n last_expanded = None\n abbrevs = getattr(builtins, \"abbrevs\", None)\n if abbrevs is None:\n return False\n document = buffer.document\n word = document.get_word_before_cursor(WORD=True)\n if word in abbrevs.keys():\n partial = document.text[: document.cursor_position]\n startix, endix, quote = check_for_partial_string(partial)\n if startix is not None and endix is None:\n return False\n buffer.delete_before_cursor(count=len(word))\n text = get_abbreviated(word, buffer)\n buffer.insert_text(text)\n last_expanded = _LastExpanded(word, text)\n if EDIT_SYMBOL in text:\n set_cursor_position(buffer, text)\n return True\n return False\n\n\ndef revert_abbrev(buffer) -> bool:\n global last_expanded\n if last_expanded is None:\n return False\n document = buffer.document\n expansion = last_expanded.expanded + \" \"\n if not document.text_before_cursor.endswith(expansion):\n return False\n buffer.delete_before_cursor(count=len(expansion))\n buffer.insert_text(last_expanded.word)\n last_expanded = None\n return True\n\n\ndef set_cursor_position(buffer, expanded: str) -> None:\n pos = expanded.rfind(EDIT_SYMBOL)\n if pos == -1:\n return\n buffer.cursor_position = buffer.cursor_position - (len(expanded) - pos)\n buffer.delete(len(EDIT_SYMBOL))\n\n\n@events.on_ptk_create\ndef custom_keybindings(bindings, **kw):\n\n from xonsh.ptk_shell.key_bindings import carriage_return\n from prompt_toolkit.filters import EmacsInsertMode, ViInsertMode\n\n handler = bindings.add\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n @handler(\" \", filter=IsMultiline() & insert_mode)\n def handle_space(event):\n buffer = event.app.current_buffer\n\n add_space = True\n if not revert_abbrev(buffer):\n position_changed = expand_abbrev(buffer)\n if position_changed:\n add_space = False\n if add_space:\n buffer.insert_text(\" \")\n\n @handler(\n Keys.ControlJ, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n @handler(\n Keys.ControlM, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n def multiline_carriage_return(event):\n buffer = event.app.current_buffer\n current_char = buffer.document.current_char\n if not current_char or current_char.isspace():\n expand_abbrev(buffer)\n carriage_return(buffer, event.cli)\n", "path": "xontrib/abbrevs.py"}]}
| 2,382 | 283 |
gh_patches_debug_11004
|
rasdani/github-patches
|
git_diff
|
OpenNMT__OpenNMT-py-1841
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Alpha channel and grayscale in image-to-text with -image_channel_size=3
For training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.
I could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):
from this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
```
to this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 1)))
```
The flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.
Should I do a PR ?
</issue>
<code>
[start of onmt/inputters/image_dataset.py]
1 # -*- coding: utf-8 -*-
2
3 import os
4
5 import torch
6 from torchtext.data import Field
7
8 from onmt.inputters.datareader_base import DataReaderBase
9
10 # domain specific dependencies
11 try:
12 from PIL import Image
13 from torchvision import transforms
14 import cv2
15 except ImportError:
16 Image, transforms, cv2 = None, None, None
17
18
19 class ImageDataReader(DataReaderBase):
20 """Read image data from disk.
21
22 Args:
23 truncate (tuple[int] or NoneType): maximum img size. Use
24 ``(0,0)`` or ``None`` for unlimited.
25 channel_size (int): Number of channels per image.
26
27 Raises:
28 onmt.inputters.datareader_base.MissingDependencyException: If
29 importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
30 """
31
32 def __init__(self, truncate=None, channel_size=3):
33 self._check_deps()
34 self.truncate = truncate
35 self.channel_size = channel_size
36
37 @classmethod
38 def from_opt(cls, opt):
39 return cls(channel_size=opt.image_channel_size)
40
41 @classmethod
42 def _check_deps(cls):
43 if any([Image is None, transforms is None, cv2 is None]):
44 cls._raise_missing_dep(
45 "PIL", "torchvision", "cv2")
46
47 def read(self, images, side, img_dir=None):
48 """Read data into dicts.
49
50 Args:
51 images (str or Iterable[str]): Sequence of image paths or
52 path to file containing audio paths.
53 In either case, the filenames may be relative to ``src_dir``
54 (default behavior) or absolute.
55 side (str): Prefix used in return dict. Usually
56 ``"src"`` or ``"tgt"``.
57 img_dir (str): Location of source image files. See ``images``.
58
59 Yields:
60 a dictionary containing image data, path and index for each line.
61 """
62 if isinstance(images, str):
63 images = DataReaderBase._read_file(images)
64
65 for i, filename in enumerate(images):
66 filename = filename.decode("utf-8").strip()
67 img_path = os.path.join(img_dir, filename)
68 if not os.path.exists(img_path):
69 img_path = filename
70
71 assert os.path.exists(img_path), \
72 'img path %s not found' % filename
73
74 if self.channel_size == 1:
75 img = transforms.ToTensor()(
76 Image.fromarray(cv2.imread(img_path, 0)))
77 else:
78 img = transforms.ToTensor()(Image.open(img_path))
79 if self.truncate and self.truncate != (0, 0):
80 if not (img.size(1) <= self.truncate[0]
81 and img.size(2) <= self.truncate[1]):
82 continue
83 yield {side: img, side + '_path': filename, 'indices': i}
84
85
86 def img_sort_key(ex):
87 """Sort using the size of the image: (width, height)."""
88 return ex.src.size(2), ex.src.size(1)
89
90
91 def batch_img(data, vocab):
92 """Pad and batch a sequence of images."""
93 c = data[0].size(0)
94 h = max([t.size(1) for t in data])
95 w = max([t.size(2) for t in data])
96 imgs = torch.zeros(len(data), c, h, w).fill_(1)
97 for i, img in enumerate(data):
98 imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
99 return imgs
100
101
102 def image_fields(**kwargs):
103 img = Field(
104 use_vocab=False, dtype=torch.float,
105 postprocessing=batch_img, sequential=False)
106 return img
107
[end of onmt/inputters/image_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py
--- a/onmt/inputters/image_dataset.py
+++ b/onmt/inputters/image_dataset.py
@@ -75,7 +75,8 @@
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
- img = transforms.ToTensor()(Image.open(img_path))
+ img = Image.open(img_path).convert('RGB')
+ img = transforms.ToTensor()(img)
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
|
{"golden_diff": "diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py\n--- a/onmt/inputters/image_dataset.py\n+++ b/onmt/inputters/image_dataset.py\n@@ -75,7 +75,8 @@\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n- img = transforms.ToTensor()(Image.open(img_path))\n+ img = Image.open(img_path).convert('RGB')\n+ img = transforms.ToTensor()(img)\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n", "issue": "Alpha channel and grayscale in image-to-text with -image_channel_size=3\nFor training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.\r\nI could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):\r\n\r\nfrom this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(Image.open(img_path))\r\n```\r\nto this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 1)))\r\n```\r\nThe flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.\r\n\r\nShould I do a PR ?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nfrom torchtext.data import Field\n\nfrom onmt.inputters.datareader_base import DataReaderBase\n\n# domain specific dependencies\ntry:\n from PIL import Image\n from torchvision import transforms\n import cv2\nexcept ImportError:\n Image, transforms, cv2 = None, None, None\n\n\nclass ImageDataReader(DataReaderBase):\n \"\"\"Read image data from disk.\n\n Args:\n truncate (tuple[int] or NoneType): maximum img size. Use\n ``(0,0)`` or ``None`` for unlimited.\n channel_size (int): Number of channels per image.\n\n Raises:\n onmt.inputters.datareader_base.MissingDependencyException: If\n importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.\n \"\"\"\n\n def __init__(self, truncate=None, channel_size=3):\n self._check_deps()\n self.truncate = truncate\n self.channel_size = channel_size\n\n @classmethod\n def from_opt(cls, opt):\n return cls(channel_size=opt.image_channel_size)\n\n @classmethod\n def _check_deps(cls):\n if any([Image is None, transforms is None, cv2 is None]):\n cls._raise_missing_dep(\n \"PIL\", \"torchvision\", \"cv2\")\n\n def read(self, images, side, img_dir=None):\n \"\"\"Read data into dicts.\n\n Args:\n images (str or Iterable[str]): Sequence of image paths or\n path to file containing audio paths.\n In either case, the filenames may be relative to ``src_dir``\n (default behavior) or absolute.\n side (str): Prefix used in return dict. Usually\n ``\"src\"`` or ``\"tgt\"``.\n img_dir (str): Location of source image files. See ``images``.\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n if isinstance(images, str):\n images = DataReaderBase._read_file(images)\n\n for i, filename in enumerate(images):\n filename = filename.decode(\"utf-8\").strip()\n img_path = os.path.join(img_dir, filename)\n if not os.path.exists(img_path):\n img_path = filename\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % filename\n\n if self.channel_size == 1:\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = transforms.ToTensor()(Image.open(img_path))\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n continue\n yield {side: img, side + '_path': filename, 'indices': i}\n\n\ndef img_sort_key(ex):\n \"\"\"Sort using the size of the image: (width, height).\"\"\"\n return ex.src.size(2), ex.src.size(1)\n\n\ndef batch_img(data, vocab):\n \"\"\"Pad and batch a sequence of images.\"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n\ndef image_fields(**kwargs):\n img = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=batch_img, sequential=False)\n return img\n", "path": "onmt/inputters/image_dataset.py"}]}
| 1,831 | 158 |
gh_patches_debug_3789
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-1564
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Detect opera as browser in user_agent
### Expected Behavior
`user_agent.browser` is reported as `opera` and `user_agent.version` as `60` when browsing using Opera 60 in MacOs 10.13.5.
```python
@web.route('/test')
def test():
browser = request.user_agent.browser
version = request.user_agent.version and int(request.user_agent.version.split('.')[0])
platform = request.user_agent.platform
uas = request.user_agent.string
jsonify(dict(browser=browser, version=version, platform=platform, uas=uas))
```
### Actual Behavior
When using **Opera** the report is as follows (note the 'OPR/60'):
```
{
"browser": "chrome",
"platform": "macos",
"uas": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36 OPR/60.0.3255.95",
"version": 73
}
```
When using **Chrome** the report is:
```
{
"browser": "chrome",
"platform": "macos",
"uas": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"version": 74
}
```
### Environment
* Python version: 3.7.0
* Flask version: 1.0.2
* Werkzeug version: 0.14.1
</issue>
<code>
[start of src/werkzeug/useragents.py]
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.useragents
4 ~~~~~~~~~~~~~~~~~~~
5
6 This module provides a helper to inspect user agent strings. This module
7 is far from complete but should work for most of the currently available
8 browsers.
9
10
11 :copyright: 2007 Pallets
12 :license: BSD-3-Clause
13 """
14 import re
15
16
17 class UserAgentParser(object):
18 """A simple user agent parser. Used by the `UserAgent`."""
19
20 platforms = (
21 ("cros", "chromeos"),
22 ("iphone|ios", "iphone"),
23 ("ipad", "ipad"),
24 (r"darwin|mac|os\s*x", "macos"),
25 ("win", "windows"),
26 (r"android", "android"),
27 ("netbsd", "netbsd"),
28 ("openbsd", "openbsd"),
29 ("freebsd", "freebsd"),
30 ("dragonfly", "dragonflybsd"),
31 ("(sun|i86)os", "solaris"),
32 (r"x11|lin(\b|ux)?", "linux"),
33 (r"nintendo\s+wii", "wii"),
34 ("irix", "irix"),
35 ("hp-?ux", "hpux"),
36 ("aix", "aix"),
37 ("sco|unix_sv", "sco"),
38 ("bsd", "bsd"),
39 ("amiga", "amiga"),
40 ("blackberry|playbook", "blackberry"),
41 ("symbian", "symbian"),
42 )
43 browsers = (
44 ("googlebot", "google"),
45 ("msnbot", "msn"),
46 ("yahoo", "yahoo"),
47 ("ask jeeves", "ask"),
48 (r"aol|america\s+online\s+browser", "aol"),
49 ("opera", "opera"),
50 ("edge", "edge"),
51 ("chrome|crios", "chrome"),
52 ("seamonkey", "seamonkey"),
53 ("firefox|firebird|phoenix|iceweasel", "firefox"),
54 ("galeon", "galeon"),
55 ("safari|version", "safari"),
56 ("webkit", "webkit"),
57 ("camino", "camino"),
58 ("konqueror", "konqueror"),
59 ("k-meleon", "kmeleon"),
60 ("netscape", "netscape"),
61 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
62 ("lynx", "lynx"),
63 ("links", "links"),
64 ("Baiduspider", "baidu"),
65 ("bingbot", "bing"),
66 ("mozilla", "mozilla"),
67 )
68
69 _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
70 _language_re = re.compile(
71 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
72 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
73 )
74
75 def __init__(self):
76 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
77 self.browsers = [
78 (b, re.compile(self._browser_version_re % a, re.I))
79 for a, b in self.browsers
80 ]
81
82 def __call__(self, user_agent):
83 for platform, regex in self.platforms: # noqa: B007
84 match = regex.search(user_agent)
85 if match is not None:
86 break
87 else:
88 platform = None
89 for browser, regex in self.browsers: # noqa: B007
90 match = regex.search(user_agent)
91 if match is not None:
92 version = match.group(1)
93 break
94 else:
95 browser = version = None
96 match = self._language_re.search(user_agent)
97 if match is not None:
98 language = match.group(1) or match.group(2)
99 else:
100 language = None
101 return platform, browser, version, language
102
103
104 class UserAgent(object):
105 """Represents a user agent. Pass it a WSGI environment or a user agent
106 string and you can inspect some of the details from the user agent
107 string via the attributes. The following attributes exist:
108
109 .. attribute:: string
110
111 the raw user agent string
112
113 .. attribute:: platform
114
115 the browser platform. The following platforms are currently
116 recognized:
117
118 - `aix`
119 - `amiga`
120 - `android`
121 - `blackberry`
122 - `bsd`
123 - `chromeos`
124 - `dragonflybsd`
125 - `freebsd`
126 - `hpux`
127 - `ipad`
128 - `iphone`
129 - `irix`
130 - `linux`
131 - `macos`
132 - `netbsd`
133 - `openbsd`
134 - `sco`
135 - `solaris`
136 - `symbian`
137 - `wii`
138 - `windows`
139
140 .. attribute:: browser
141
142 the name of the browser. The following browsers are currently
143 recognized:
144
145 - `aol` *
146 - `ask` *
147 - `baidu` *
148 - `bing` *
149 - `camino`
150 - `chrome`
151 - `edge`
152 - `firefox`
153 - `galeon`
154 - `google` *
155 - `kmeleon`
156 - `konqueror`
157 - `links`
158 - `lynx`
159 - `mozilla`
160 - `msie`
161 - `msn`
162 - `netscape`
163 - `opera`
164 - `safari`
165 - `seamonkey`
166 - `webkit`
167 - `yahoo` *
168
169 (Browsers marked with a star (``*``) are crawlers.)
170
171 .. attribute:: version
172
173 the version of the browser
174
175 .. attribute:: language
176
177 the language of the browser
178 """
179
180 _parser = UserAgentParser()
181
182 def __init__(self, environ_or_string):
183 if isinstance(environ_or_string, dict):
184 environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
185 self.string = environ_or_string
186 self.platform, self.browser, self.version, self.language = self._parser(
187 environ_or_string
188 )
189
190 def to_header(self):
191 return self.string
192
193 def __str__(self):
194 return self.string
195
196 def __nonzero__(self):
197 return bool(self.browser)
198
199 __bool__ = __nonzero__
200
201 def __repr__(self):
202 return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
203
[end of src/werkzeug/useragents.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -46,7 +46,7 @@
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
- ("opera", "opera"),
+ (r"opera|opr", "opera"),
("edge", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
|
{"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -46,7 +46,7 @@\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n- (\"opera\", \"opera\"),\n+ (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n", "issue": "Detect opera as browser in user_agent\n### Expected Behavior\r\n\r\n`user_agent.browser` is reported as `opera` and `user_agent.version` as `60` when browsing using Opera 60 in MacOs 10.13.5.\r\n\r\n```python\r\n@web.route('/test')\r\ndef test():\r\n browser = request.user_agent.browser\r\n version = request.user_agent.version and int(request.user_agent.version.split('.')[0])\r\n platform = request.user_agent.platform\r\n uas = request.user_agent.string\r\n jsonify(dict(browser=browser, version=version, platform=platform, uas=uas))\r\n```\r\n\r\n### Actual Behavior\r\n\r\nWhen using **Opera** the report is as follows (note the 'OPR/60'):\r\n\r\n```\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"macos\", \r\n \"uas\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36 OPR/60.0.3255.95\", \r\n \"version\": 73\r\n}\r\n```\r\n\r\nWhen using **Chrome** the report is:\r\n\r\n```\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"macos\", \r\n \"uas\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\", \r\n \"version\": 74\r\n}\r\n```\r\n\r\n### Environment\r\n\r\n* Python version: 3.7.0\r\n* Flask version: 1.0.2\r\n* Werkzeug version: 0.14.1\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\"cros\", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (\"opera\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. The following platforms are currently\n recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. The following browsers are currently\n recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser\n\n .. attribute:: language\n\n the language of the browser\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}]}
| 3,022 | 140 |
gh_patches_debug_39793
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2965
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tijuanaflats is broken
During the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))
</issue>
<code>
[start of locations/spiders/tijuanaflats.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class TijuanaFlatsSpider(scrapy.Spider):
9 name = "tijuanaflats"
10 item_attributes = { 'brand': "Tijuana Flats" }
11 allowed_domains = ['tijuanaflats.com']
12 start_urls = (
13 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
14 )
15
16 def parse(self, response):
17 response.selector.remove_namespaces()
18 city_urls = response.xpath('//url/loc/text()').extract()
19 for path in city_urls:
20 yield scrapy.Request(
21 path.strip(),
22 callback=self.parse_store,
23 )
24
25 def parse_store(self, response):
26
27 if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
28 storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
29 storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
30 else:
31 storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
32
33
34 properties = {
35 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
36 'website': response.request.url,
37 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
38 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
39 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
40 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
41 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
42 'opening_hours': storeHours,
43 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
44 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
45 }
46
47 yield GeojsonPointItem(**properties)
[end of locations/spiders/tijuanaflats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py
--- a/locations/spiders/tijuanaflats.py
+++ b/locations/spiders/tijuanaflats.py
@@ -1,47 +1,45 @@
# -*- coding: utf-8 -*-
+import json
+
import scrapy
-import re
from locations.items import GeojsonPointItem
class TijuanaFlatsSpider(scrapy.Spider):
name = "tijuanaflats"
- item_attributes = { 'brand': "Tijuana Flats" }
- allowed_domains = ['tijuanaflats.com']
- start_urls = (
- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
- )
+ item_attributes = {"brand": "Tijuana Flats", "brand_wikidata": "Q7801833"}
+ allowed_domains = ["tijuanaflats.com"]
+ start_urls = ("https://www.tijuanaflats.com/locations",)
def parse(self, response):
- response.selector.remove_namespaces()
- city_urls = response.xpath('//url/loc/text()').extract()
- for path in city_urls:
- yield scrapy.Request(
- path.strip(),
- callback=self.parse_store,
+ data = json.loads(
+ response.xpath(
+ '//tjs-view-locations/attribute::*[name()=":locations"]'
+ ).extract_first()
+ )
+ for row in data:
+ for ent in row["yoast_json_ld"][0]["@graph"]:
+ if ent["@type"] == "WebPage" and row["slug"] in ent["url"]:
+ name = ent["name"]
+
+ # extract text from html snippet
+ hours_of_operation = scrapy.Selector(text=row["acf"]["hours_of_operation"])
+ opening_hours = "; ".join(
+ a.strip() for a in hours_of_operation.xpath("//text()").extract()
)
- def parse_store(self, response):
-
- if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
- storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
- storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
- else:
- storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
-
-
- properties = {
- 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'website': response.request.url,
- 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
- 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
- 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
- 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
- 'opening_hours': storeHours,
- 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
- 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
- }
-
- yield GeojsonPointItem(**properties)
\ No newline at end of file
+ properties = {
+ "ref": row["slug"],
+ "name": name,
+ "lat": row["acf"]["physical_location"]["lat"],
+ "lon": row["acf"]["physical_location"]["lng"],
+ "addr_full": row["acf"]["address_1"],
+ "city": row["acf"]["city"],
+ "state": row["acf"]["state"],
+ "postcode": row["acf"]["zip"],
+ "phone": row["acf"]["contact_phone"],
+ "website": f'https://www.tijuanaflats.com/locations/{row["slug"]}',
+ "opening_hours": opening_hours,
+ }
+ yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py\n--- a/locations/spiders/tijuanaflats.py\n+++ b/locations/spiders/tijuanaflats.py\n@@ -1,47 +1,45 @@\n # -*- coding: utf-8 -*-\n+import json\n+\n import scrapy\n-import re\n \n from locations.items import GeojsonPointItem\n \n \n class TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n- item_attributes = { 'brand': \"Tijuana Flats\" }\n- allowed_domains = ['tijuanaflats.com']\n- start_urls = (\n- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n- )\n+ item_attributes = {\"brand\": \"Tijuana Flats\", \"brand_wikidata\": \"Q7801833\"}\n+ allowed_domains = [\"tijuanaflats.com\"]\n+ start_urls = (\"https://www.tijuanaflats.com/locations\",)\n \n def parse(self, response):\n- response.selector.remove_namespaces()\n- city_urls = response.xpath('//url/loc/text()').extract()\n- for path in city_urls:\n- yield scrapy.Request(\n- path.strip(),\n- callback=self.parse_store,\n+ data = json.loads(\n+ response.xpath(\n+ '//tjs-view-locations/attribute::*[name()=\":locations\"]'\n+ ).extract_first()\n+ )\n+ for row in data:\n+ for ent in row[\"yoast_json_ld\"][0][\"@graph\"]:\n+ if ent[\"@type\"] == \"WebPage\" and row[\"slug\"] in ent[\"url\"]:\n+ name = ent[\"name\"]\n+\n+ # extract text from html snippet\n+ hours_of_operation = scrapy.Selector(text=row[\"acf\"][\"hours_of_operation\"])\n+ opening_hours = \"; \".join(\n+ a.strip() for a in hours_of_operation.xpath(\"//text()\").extract()\n )\n \n- def parse_store(self, response):\n-\n- if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n- storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n- storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n- else:\n- storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n-\n-\n- properties = {\n- 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'website': response.request.url,\n- 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n- 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n- 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n- 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n- 'opening_hours': storeHours,\n- 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n- 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n- }\n-\n- yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n+ properties = {\n+ \"ref\": row[\"slug\"],\n+ \"name\": name,\n+ \"lat\": row[\"acf\"][\"physical_location\"][\"lat\"],\n+ \"lon\": row[\"acf\"][\"physical_location\"][\"lng\"],\n+ \"addr_full\": row[\"acf\"][\"address_1\"],\n+ \"city\": row[\"acf\"][\"city\"],\n+ \"state\": row[\"acf\"][\"state\"],\n+ \"postcode\": row[\"acf\"][\"zip\"],\n+ \"phone\": row[\"acf\"][\"contact_phone\"],\n+ \"website\": f'https://www.tijuanaflats.com/locations/{row[\"slug\"]}',\n+ \"opening_hours\": opening_hours,\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider tijuanaflats is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n item_attributes = { 'brand': \"Tijuana Flats\" }\n allowed_domains = ['tijuanaflats.com']\n start_urls = (\n 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n else:\n storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n\n\n properties = {\n 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'website': response.request.url,\n 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n 'opening_hours': storeHours,\n 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n }\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/tijuanaflats.py"}]}
| 1,355 | 1,003 |
gh_patches_debug_836
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-2472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't show Steam Linux Runtime when importing games
Link to the tool on steamdb: https://steamdb.info/app/1070560/
</issue>
<code>
[start of lutris/services/steam.py]
1 """Steam service"""
2 import os
3 import re
4
5 from lutris import pga
6 from lutris.config import make_game_config_id, LutrisConfig
7 from lutris.util.steam.appmanifest import AppManifest, get_appmanifests
8 from lutris.util.steam.config import get_steamapps_paths
9 from lutris.services.service_game import ServiceGame
10
11 NAME = "Steam"
12 ICON = "steam"
13 ONLINE = False
14
15
16 class SteamGame(ServiceGame):
17 """ServiceGame for Steam games"""
18
19 store = "steam"
20 installer_slug = "steam"
21 excluded_appids = [
22 "228980", # Steamworks Common Redistributables
23 ]
24
25 @classmethod
26 def new_from_steam_game(cls, appmanifest, game_id=None):
27 """Return a Steam game instance from an AppManifest"""
28 steam_game = SteamGame()
29 steam_game.appid = str(appmanifest.steamid)
30 steam_game.game_id = game_id
31 steam_game.name = appmanifest.name
32 steam_game.slug = appmanifest.slug
33 steam_game.runner = appmanifest.get_runner_name()
34 return steam_game
35
36 @classmethod
37 def new_from_lutris_id(cls, game_id):
38 steam_game = SteamGame()
39 steam_game.game_id = game_id
40 return steam_game
41
42 @property
43 def config_id(self):
44 return make_game_config_id(self.slug)
45
46 @classmethod
47 def is_importable(cls, appmanifest):
48 """Return whether a Steam game should be imported"""
49 if not appmanifest.is_installed():
50 return False
51 if appmanifest.steamid in cls.excluded_appids:
52 return False
53 if re.match(r"^Proton \d*", appmanifest.name):
54 return False
55 return True
56
57 def install(self, updated_info=None):
58 """Add an installed game to the library
59
60 Params:
61 updated_info (dict): Optional dictonary containing existing data not to overwrite
62 """
63 if updated_info:
64 name = updated_info["name"]
65 slug = updated_info["slug"]
66 else:
67 name = self.name
68 slug = self.slug
69 self.game_id = pga.add_or_update(
70 id=self.game_id,
71 name=name,
72 runner=self.runner,
73 slug=slug,
74 steamid=int(self.appid),
75 installed=1,
76 configpath=self.config_id,
77 installer_slug=self.installer_slug,
78 )
79 self.create_config()
80 return self.game_id
81
82 def create_config(self):
83 """Create the game configuration for a Steam game"""
84 game_config = LutrisConfig(
85 runner_slug=self.runner, game_config_id=self.config_id
86 )
87 game_config.raw_game_config.update({"appid": self.appid})
88 game_config.save()
89
90
91 class SteamSyncer:
92 platform = "linux"
93
94 def __init__(self):
95 self._lutris_games = None
96 self._lutris_steamids = None
97
98 @property
99 def runner(self):
100 return "steam" if self.platform == "linux" else "winesteam"
101
102 @property
103 def lutris_games(self):
104 if not self._lutris_games:
105 self._lutris_games = pga.get_games_where(
106 steamid__isnull=False, steamid__not=""
107 )
108 return self._lutris_games
109
110 @property
111 def lutris_steamids(self):
112 if not self._lutris_steamids:
113 self._lutris_steamids = {str(game["steamid"]) for game in self.lutris_games}
114 return self._lutris_steamids
115
116 def load(self, force_reload=False):
117 """Return importable Steam games"""
118 games = []
119 steamapps_paths = get_steamapps_paths()
120 for steamapps_path in steamapps_paths[self.platform]:
121 for appmanifest_file in get_appmanifests(steamapps_path):
122 app_manifest = AppManifest(
123 os.path.join(steamapps_path, appmanifest_file)
124 )
125 if SteamGame.is_importable(app_manifest):
126 games.append(SteamGame.new_from_steam_game(app_manifest))
127 return games
128
129 def get_pga_game(self, game):
130 """Return a PGA game if one is found"""
131 for pga_game in self.lutris_games:
132 if (
133 str(pga_game["steamid"]) == game.appid
134 and (pga_game["runner"] == self.runner or not pga_game["runner"])
135 and not pga_game["installed"]
136 ):
137 return pga_game
138
139 def sync(self, games, full=False):
140 """Syncs Steam games to Lutris"""
141 available_ids = set() # Set of Steam appids seen while browsing AppManifests
142 added_games = []
143 for game in games:
144 steamid = game.appid
145 available_ids.add(steamid)
146 pga_game = self.get_pga_game(game)
147
148 if pga_game:
149 if (
150 steamid in self.lutris_steamids
151 and pga_game["installed"] != 1
152 and pga_game["installed"]
153 ):
154 added_games.append(game.install())
155
156 if steamid not in self.lutris_steamids:
157 added_games.append(game.install())
158 else:
159 if pga_game:
160 added_games.append(game.install(pga_game))
161
162 if not full:
163 return added_games, games
164
165 removed_games = []
166 unavailable_ids = self.lutris_steamids.difference(available_ids)
167 for steamid in unavailable_ids:
168 for pga_game in self.lutris_games:
169 if (
170 str(pga_game["steamid"]) == steamid
171 and pga_game["installed"]
172 and pga_game["runner"] == self.runner
173 ):
174 game = SteamGame.new_from_lutris_id(pga_game["id"])
175 game.uninstall()
176 removed_games.append(pga_game["id"])
177 return (added_games, removed_games)
178
179
180 SYNCER = SteamSyncer
181
[end of lutris/services/steam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/services/steam.py b/lutris/services/steam.py
--- a/lutris/services/steam.py
+++ b/lutris/services/steam.py
@@ -20,6 +20,7 @@
installer_slug = "steam"
excluded_appids = [
"228980", # Steamworks Common Redistributables
+ "1070560", # Steam Linux Runtime
]
@classmethod
|
{"golden_diff": "diff --git a/lutris/services/steam.py b/lutris/services/steam.py\n--- a/lutris/services/steam.py\n+++ b/lutris/services/steam.py\n@@ -20,6 +20,7 @@\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n+ \"1070560\", # Steam Linux Runtime\n ]\n \n @classmethod\n", "issue": "Don't show Steam Linux Runtime when importing games\nLink to the tool on steamdb: https://steamdb.info/app/1070560/\n", "before_files": [{"content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py"}]}
| 2,297 | 107 |
gh_patches_debug_27088
|
rasdani/github-patches
|
git_diff
|
coala__coala-5000
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change default_coafile naming convention.
Should we change the name of `default_coafile` in `coalib` to `system_coafile`?
It will avoid confusion due to the following lines in `Constants.py`
```
system_coafile = os.path.join(coalib_root, 'default_coafile')
user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
default_coafile = '.coafile'
```
</issue>
<code>
[start of coalib/misc/Constants.py]
1 # -*- coding: utf-8 -*-
2
3 import appdirs
4 import os
5 import re
6
7 from coalib import VERSION
8
9
10 THIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '
11 'Please contact the developers for assistance.')
12
13 CRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '
14 'sorry for the inconvenience. Please contact the '
15 'developers for assistance. During execution of '
16 'coala an exception was raised. This should never '
17 'happen. When asked for, the following information '
18 'may help investigating:')
19
20 VERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '
21 'dependency you have installed and the '
22 'requirements of coala. This may be resolved by '
23 'creating a separate virtual environment for '
24 'coala or running `pip3 install "%s"`. Be aware '
25 'that the latter solution might break other '
26 'python packages that depend on the currently '
27 'installed version.')
28
29 OBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'
30
31 TRUE_STRINGS = ['1',
32 'on',
33 'okay',
34 'ok',
35 'okey-dokey',
36 'y',
37 'yes',
38 'yeah',
39 'yea',
40 'ya',
41 'ye',
42 'yessir',
43 'sure',
44 'true',
45 'tru',
46 'uh-huh',
47 'definitely',
48 'yup',
49 'yep',
50 'right',
51 'aye',
52 'alright',
53 'alrighty',
54 'hell yeah',
55 'affirmative',
56 'certainly',
57 'definitely',
58 'absolutely',
59 'roger',
60 'righto',
61 'ja',
62 'da',
63 'si',
64 'oui',
65 'amen',
66 'totally',
67 '10-4',
68 'positive']
69
70 FALSE_STRINGS = ['0',
71 'off',
72 'n',
73 'no',
74 'nix',
75 'nope',
76 'nop',
77 'nah',
78 'nay',
79 'false',
80 'uh-uh',
81 'wrong',
82 'none',
83 'nay',
84 'hell no',
85 'fat chance',
86 'not a chance in hell',
87 'not in a million years',
88 'out of the question',
89 'no siree',
90 'no way',
91 'nein',
92 'njet',
93 'nee',
94 'non',
95 'hakuna',
96 'negative']
97
98 # This string contains many unicode characters to challenge tests.
99 COMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 ÄÖü ABc @€¥ §&% {[( ←↓→↑ '
100 'ĦŊħ ß°^ \\\n\u2192')
101
102 # Path to the coalib directory
103 coalib_root = os.path.join(os.path.dirname(__file__),
104 os.path.pardir)
105
106 # Path to the language definition files
107 language_definitions = os.path.join(coalib_root,
108 'bearlib',
109 'languages',
110 'definitions')
111
112 system_coafile = os.path.join(coalib_root, 'default_coafile')
113
114 user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
115
116 default_coafile = '.coafile'
117
118 USER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)
119
120 GLOBBING_SPECIAL_CHARS = '()[]|?*'
121
122 URL_REGEX = re.compile(
123 r'^(?:(?:http|ftp)[s]?://)?' # scheme
124 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' # domain name
125 r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
126 r'localhost|' # OR localhost
127 r'(?:\d{1,3}\.){3}\d{1,3})' # OR an ip
128 r'(?::\d+)?' # optional port number
129 r'(?:/?|[/?]\S+)$', # path
130 re.IGNORECASE)
131
[end of coalib/misc/Constants.py]
[start of setup.py]
1 #!/usr/bin/env python3
2
3 import datetime
4 import locale
5 import platform
6 import sys
7 from os import getenv
8 from subprocess import call
9
10 import setuptools.command.build_py
11 from setuptools import find_packages, setup
12 from setuptools.command.test import test as TestCommand
13
14 from coalib import VERSION, assert_supported_version, get_version
15 from coalib.misc.BuildManPage import BuildManPage
16
17 try:
18 lc = locale.getlocale()
19 pf = platform.system()
20 if pf != 'Windows' and lc == (None, None):
21 locale.setlocale(locale.LC_ALL, 'C.UTF-8')
22 except (ValueError, UnicodeError):
23 locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
24
25
26 assert_supported_version()
27
28
29 class BuildPyCommand(setuptools.command.build_py.build_py):
30
31 def run(self):
32 if platform.system() != 'Windows':
33 self.run_command('build_manpage')
34 setuptools.command.build_py.build_py.run(self)
35
36
37 class PyTestCommand(TestCommand):
38
39 def run_tests(self):
40 # import here, cause outside the eggs aren't loaded
41 import pytest
42 errno = pytest.main([])
43 sys.exit(errno)
44
45
46 class BuildDocsCommand(setuptools.command.build_py.build_py):
47 apidoc_command = (
48 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'
49 )
50 doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')
51
52 def run(self):
53 errOne = call(self.apidoc_command)
54 errTwo = call(self.doc_command)
55 sys.exit(errOne or errTwo)
56
57
58 # Generate API documentation only if we are running on readthedocs.io
59 on_rtd = getenv('READTHEDOCS', None) is not None
60 if on_rtd:
61 call(BuildDocsCommand.apidoc_command)
62 if 'dev' in VERSION:
63 current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
64 call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',
65 '-b {}'.format(current_version)])
66 VERSION = get_version()
67
68 with open('requirements.txt') as requirements:
69 required = requirements.read().splitlines()
70
71 with open('test-requirements.txt') as requirements:
72 test_required = requirements.read().splitlines()
73
74 with open('README.rst') as readme:
75 long_description = readme.read()
76
77
78 if __name__ == '__main__':
79 if platform.system() != 'Windows':
80 data_files = [('.', ['coala.1'])]
81 else:
82 data_files = [('.', [])]
83
84 setup(name='coala',
85 version=VERSION,
86 description='Linting and Fixing Code for All Languages',
87 author='The coala developers',
88 author_email='coala.analyzer@gmail.com',
89 maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger'
90 if not on_rtd else 'L.S., F.N., M.K.',
91 maintainer_email=('lasse.schuirmann@gmail.com, '
92 'fabian@neuschmidt.de, '
93 'makman@alice.de'),
94 url='http://coala.io/',
95 platforms='any',
96 packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
97 install_requires=required,
98 tests_require=test_required,
99 package_data={'coalib': ['default_coafile', 'VERSION',
100 'bearlib/languages/documentation/*.coalang']
101 },
102 license='AGPL-3.0',
103 data_files=data_files,
104 long_description=long_description,
105 entry_points={
106 'console_scripts': [
107 'coala = coalib.coala:main',
108 'coala-ci = coalib.coala_ci:main',
109 'coala-json = coalib.coala_json:main',
110 'coala-format = coalib.coala_format:main',
111 'coala-delete-orig = coalib.coala_delete_orig:main']},
112 # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
113 classifiers=[
114 'Development Status :: 4 - Beta',
115
116 'Environment :: Console',
117 'Environment :: MacOS X',
118 'Environment :: Win32 (MS Windows)',
119 'Environment :: X11 Applications :: Gnome',
120
121 'Intended Audience :: Science/Research',
122 'Intended Audience :: Developers',
123
124 'License :: OSI Approved :: GNU Affero General Public License '
125 'v3 or later (AGPLv3+)',
126
127 'Operating System :: OS Independent',
128
129 'Programming Language :: Python :: Implementation :: CPython',
130 'Programming Language :: Python :: 3.4',
131 'Programming Language :: Python :: 3.5',
132 'Programming Language :: Python :: 3 :: Only',
133
134 'Topic :: Scientific/Engineering :: Information Analysis',
135 'Topic :: Software Development :: Quality Assurance',
136 'Topic :: Text Processing :: Linguistic'],
137 cmdclass={'build_manpage': BuildManPage,
138 'build_py': BuildPyCommand,
139 'docs': BuildDocsCommand,
140 'test': PyTestCommand})
141
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py
--- a/coalib/misc/Constants.py
+++ b/coalib/misc/Constants.py
@@ -3,6 +3,7 @@
import appdirs
import os
import re
+import logging
from coalib import VERSION
@@ -109,7 +110,17 @@
'languages',
'definitions')
-system_coafile = os.path.join(coalib_root, 'default_coafile')
+
+def get_system_coafile(coalib_root):
+ if os.path.isfile(os.path.join(coalib_root, 'system_coafile')):
+ return os.path.join(coalib_root, 'system_coafile')
+ else:
+ logging.warning('Filename deafult_coafile has been deprecated. '
+ 'Please use system_coafile instead.')
+ return os.path.join(coalib_root, 'default_coafile')
+
+
+system_coafile = get_system_coafile(coalib_root)
user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,7 +96,7 @@
packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
install_requires=required,
tests_require=test_required,
- package_data={'coalib': ['default_coafile', 'VERSION',
+ package_data={'coalib': ['system_coafile', 'VERSION',
'bearlib/languages/documentation/*.coalang']
},
license='AGPL-3.0',
|
{"golden_diff": "diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py\n--- a/coalib/misc/Constants.py\n+++ b/coalib/misc/Constants.py\n@@ -3,6 +3,7 @@\n import appdirs\n import os\n import re\n+import logging\n \n from coalib import VERSION\n \n@@ -109,7 +110,17 @@\n 'languages',\n 'definitions')\n \n-system_coafile = os.path.join(coalib_root, 'default_coafile')\n+\n+def get_system_coafile(coalib_root):\n+ if os.path.isfile(os.path.join(coalib_root, 'system_coafile')):\n+ return os.path.join(coalib_root, 'system_coafile')\n+ else:\n+ logging.warning('Filename deafult_coafile has been deprecated. '\n+ 'Please use system_coafile instead.')\n+ return os.path.join(coalib_root, 'default_coafile')\n+\n+\n+system_coafile = get_system_coafile(coalib_root)\n \n user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n- package_data={'coalib': ['default_coafile', 'VERSION',\n+ package_data={'coalib': ['system_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n", "issue": "Change default_coafile naming convention.\nShould we change the name of `default_coafile` in `coalib` to `system_coafile`?\r\nIt will avoid confusion due to the following lines in `Constants.py`\r\n```\r\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\r\n\r\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\r\n\r\ndefault_coafile = '.coafile'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport appdirs\nimport os\nimport re\n\nfrom coalib import VERSION\n\n\nTHIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '\n 'Please contact the developers for assistance.')\n\nCRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '\n 'sorry for the inconvenience. Please contact the '\n 'developers for assistance. During execution of '\n 'coala an exception was raised. This should never '\n 'happen. When asked for, the following information '\n 'may help investigating:')\n\nVERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '\n 'dependency you have installed and the '\n 'requirements of coala. This may be resolved by '\n 'creating a separate virtual environment for '\n 'coala or running `pip3 install \"%s\"`. Be aware '\n 'that the latter solution might break other '\n 'python packages that depend on the currently '\n 'installed version.')\n\nOBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'\n\nTRUE_STRINGS = ['1',\n 'on',\n 'okay',\n 'ok',\n 'okey-dokey',\n 'y',\n 'yes',\n 'yeah',\n 'yea',\n 'ya',\n 'ye',\n 'yessir',\n 'sure',\n 'true',\n 'tru',\n 'uh-huh',\n 'definitely',\n 'yup',\n 'yep',\n 'right',\n 'aye',\n 'alright',\n 'alrighty',\n 'hell yeah',\n 'affirmative',\n 'certainly',\n 'definitely',\n 'absolutely',\n 'roger',\n 'righto',\n 'ja',\n 'da',\n 'si',\n 'oui',\n 'amen',\n 'totally',\n '10-4',\n 'positive']\n\nFALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nix',\n 'nope',\n 'nop',\n 'nah',\n 'nay',\n 'false',\n 'uh-uh',\n 'wrong',\n 'none',\n 'nay',\n 'hell no',\n 'fat chance',\n 'not a chance in hell',\n 'not in a million years',\n 'out of the question',\n 'no siree',\n 'no way',\n 'nein',\n 'njet',\n 'nee',\n 'non',\n 'hakuna',\n 'negative']\n\n# This string contains many unicode characters to challenge tests.\nCOMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 '\n '\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192')\n\n# Path to the coalib directory\ncoalib_root = os.path.join(os.path.dirname(__file__),\n os.path.pardir)\n\n# Path to the language definition files\nlanguage_definitions = os.path.join(coalib_root,\n 'bearlib',\n 'languages',\n 'definitions')\n\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\n\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n\ndefault_coafile = '.coafile'\n\nUSER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)\n\nGLOBBING_SPECIAL_CHARS = '()[]|?*'\n\nURL_REGEX = re.compile(\n r'^(?:(?:http|ftp)[s]?://)?' # scheme\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+' # domain name\n r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # OR localhost\n r'(?:\\d{1,3}\\.){3}\\d{1,3})' # OR an ip\n r'(?::\\d+)?' # optional port number\n r'(?:/?|[/?]\\S+)$', # path\n re.IGNORECASE)\n", "path": "coalib/misc/Constants.py"}, {"content": "#!/usr/bin/env python3\n\nimport datetime\nimport locale\nimport platform\nimport sys\nfrom os import getenv\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom coalib import VERSION, assert_supported_version, get_version\nfrom coalib.misc.BuildManPage import BuildManPage\n\ntry:\n lc = locale.getlocale()\n pf = platform.system()\n if pf != 'Windows' and lc == (None, None):\n locale.setlocale(locale.LC_ALL, 'C.UTF-8')\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nassert_supported_version()\n\n\nclass BuildPyCommand(setuptools.command.build_py.build_py):\n\n def run(self):\n if platform.system() != 'Windows':\n self.run_command('build_manpage')\n setuptools.command.build_py.build_py.run(self)\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = (\n 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'\n )\n doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n errOne = call(self.apidoc_command)\n errTwo = call(self.doc_command)\n sys.exit(errOne or errTwo)\n\n\n# Generate API documentation only if we are running on readthedocs.io\non_rtd = getenv('READTHEDOCS', None) is not None\nif on_rtd:\n call(BuildDocsCommand.apidoc_command)\n if 'dev' in VERSION:\n current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',\n '-b {}'.format(current_version)])\n VERSION = get_version()\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\n\nif __name__ == '__main__':\n if platform.system() != 'Windows':\n data_files = [('.', ['coala.1'])]\n else:\n data_files = [('.', [])]\n\n setup(name='coala',\n version=VERSION,\n description='Linting and Fixing Code for All Languages',\n author='The coala developers',\n author_email='coala.analyzer@gmail.com',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger'\n if not on_rtd else 'L.S., F.N., M.K.',\n maintainer_email=('lasse.schuirmann@gmail.com, '\n 'fabian@neuschmidt.de, '\n 'makman@alice.de'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n package_data={'coalib': ['default_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n data_files=data_files,\n long_description=long_description,\n entry_points={\n 'console_scripts': [\n 'coala = coalib.coala:main',\n 'coala-ci = coalib.coala_ci:main',\n 'coala-json = coalib.coala_json:main',\n 'coala-format = coalib.coala_format:main',\n 'coala-delete-orig = coalib.coala_delete_orig:main']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'build_manpage': BuildManPage,\n 'build_py': BuildPyCommand,\n 'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}]}
| 3,300 | 367 |
gh_patches_debug_7645
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3327
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/context/random/__init__.py]
1 from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
2 sync_states, moe_set_seed, reset_seeds)
3
4 __all__ = [
5 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
6 'sync_states', 'moe_set_seed', 'reset_seeds'
7 ]
8
[end of colossalai/context/random/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py
--- a/colossalai/context/random/__init__.py
+++ b/colossalai/context/random/__init__.py
@@ -1,5 +1,16 @@
-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
- sync_states, moe_set_seed, reset_seeds)
+from ._helper import (
+ add_seed,
+ get_current_mode,
+ get_seeds,
+ get_states,
+ moe_set_seed,
+ reset_seeds,
+ seed,
+ set_mode,
+ set_seed_states,
+ sync_states,
+ with_seed,
+)
__all__ = [
'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
|
{"golden_diff": "diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py\n--- a/colossalai/context/random/__init__.py\n+++ b/colossalai/context/random/__init__.py\n@@ -1,5 +1,16 @@\n-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n- sync_states, moe_set_seed, reset_seeds)\n+from ._helper import (\n+ add_seed,\n+ get_current_mode,\n+ get_seeds,\n+ get_states,\n+ moe_set_seed,\n+ reset_seeds,\n+ seed,\n+ set_mode,\n+ set_seed_states,\n+ sync_states,\n+ with_seed,\n+)\n \n __all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n sync_states, moe_set_seed, reset_seeds)\n\n__all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n 'sync_states', 'moe_set_seed', 'reset_seeds'\n]\n", "path": "colossalai/context/random/__init__.py"}]}
| 675 | 217 |
gh_patches_debug_1703
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-1591
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error Importing Pandera with Polars extra
**Describe the bug**
I get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.
#### Code Sample, a copy-pastable example
I installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:
```python
import pandera as pa
```
I got the following error message:
```
>>> import pandera as pa
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".venv/lib/python3.11/site-packages/pandera/__init__.py", line 6, in <module>
from pandera import errors, external_config, typing
File ".venv/lib/python3.11/site-packages/pandera/external_config.py", line 23, in <module>
import pyspark.pandas
ModuleNotFoundError: No module named 'pyspark'
```
#### Versions:
- Pandera: 0.19.0b2
- Python: 3.11.7
- Ubuntu: 22.04
</issue>
<code>
[start of pandera/external_config.py]
1 """Configuration for external packages."""
2
3 import os
4
5 is_spark_local_ip_dirty = False
6 is_pyarrow_ignore_timezone_dirty = False
7
8 try:
9 # try importing pyspark to see if it exists. This is important because the
10 # pandera.typing module defines a Series type that inherits from
11 # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas
12 # Series and DataFrames to support type hinting:
13 # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names
14 # pylint: disable=unused-import
15 if os.getenv("SPARK_LOCAL_IP") is None:
16 is_spark_local_ip_dirty = True
17 os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
18 if os.getenv("PYARROW_IGNORE_TIMEZONE") is None:
19 is_pyarrow_ignore_timezone_dirty = True
20 # This can be overriden by the user
21 os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
22
23 import pyspark.pandas
24 finally:
25 if is_spark_local_ip_dirty:
26 os.environ.pop("SPARK_LOCAL_IP")
27 if is_pyarrow_ignore_timezone_dirty:
28 os.environ.pop("PYARROW_IGNORE_TIMEZONE")
29
[end of pandera/external_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pandera/external_config.py b/pandera/external_config.py
--- a/pandera/external_config.py
+++ b/pandera/external_config.py
@@ -21,6 +21,8 @@
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
import pyspark.pandas
+except (ImportError, ModuleNotFoundError):
+ pass
finally:
if is_spark_local_ip_dirty:
os.environ.pop("SPARK_LOCAL_IP")
|
{"golden_diff": "diff --git a/pandera/external_config.py b/pandera/external_config.py\n--- a/pandera/external_config.py\n+++ b/pandera/external_config.py\n@@ -21,6 +21,8 @@\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n \n import pyspark.pandas\n+except (ImportError, ModuleNotFoundError):\n+ pass\n finally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n", "issue": "Error Importing Pandera with Polars extra\n**Describe the bug**\r\nI get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\nI installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:\r\n\r\n```python\r\nimport pandera as pa\r\n```\r\n\r\nI got the following error message:\r\n```\r\n>>> import pandera as pa\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \".venv/lib/python3.11/site-packages/pandera/__init__.py\", line 6, in <module>\r\n from pandera import errors, external_config, typing\r\n File \".venv/lib/python3.11/site-packages/pandera/external_config.py\", line 23, in <module>\r\n import pyspark.pandas\r\nModuleNotFoundError: No module named 'pyspark'\r\n```\r\n\r\n#### Versions:\r\n\r\n - Pandera: 0.19.0b2\r\n - Python: 3.11.7\r\n - Ubuntu: 22.04\r\n\n", "before_files": [{"content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py"}]}
| 1,221 | 109 |
gh_patches_debug_33756
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1126
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
errno command is throwing errors
```
pwndbg> errno
Traceback (most recent call last):
File "/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py", line 145, in __call__
return self.function(*args, **kwargs)
File "/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py", line 239, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/gsgx/code/pwndbg/pwndbg/commands/misc.py", line 32, in errno
errno_location = pwndbg.symbol.get("__errno_location")
AttributeError: '_ArgparsedCommand' object has no attribute 'symbol'
```
I don't know why the issue is occurring, but it can be fixed with this:
```diff
- errno_location = pwndbg.symbol.get("__errno_location")
- err = pwndbg.memory.int(errno_location)
+ errno_location = _pwndbg.symbol.address("__errno_location")
+ print(errno_location)
+ err = _pwndbg.memory.s32(errno_location)
```
But even after fixing that, there's an issue with the following lines:
```python
err = _pwndbg.memory.s32(errno_location)
#err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))
```
The commented out line is what actually works for me, while the existing code doesn't. I wonder if this was originally due to a change in glibc? If so, we should figure out what version and make this work on both versions.
</issue>
<code>
[start of pwndbg/lib/tips.py]
1 from random import choice
2
3 TIPS = [
4 # GDB hints
5 "GDB's `apropos <topic>` command displays all registered commands that are related to the given <topic>",
6 "GDB's `follow-fork-mode` parameter can be used to set whether to trace parent or child after fork() calls",
7 'Use GDB\'s `dprintf` command to print all calls to given function. E.g. `dprintf malloc, "malloc(%p)\\n", (void*)$rdi` will print all malloc calls',
8 "Use GDB's `pi` command to run an interactive Python console where you can use Pwndbg APIs like `pwndbg.gdb.memory.read(addr, len)`, `pwndbg.gdb.memory.write(addr, data)`, `pwndbg.gdb.vmmap.get()` and so on!",
9 "GDB's `set directories <path>` parameter can be used to debug e.g. glibc sources like the malloc/free functions!",
10 # Pwndbg hints
11 "GDB and Pwndbg parameters can be shown or set with `show <param>` and `set <param> <value>` GDB commands",
12 "Use Pwndbg's `config` and `theme` commands to tune its configuration and theme colors!",
13 "Pwndbg mirrors some of Windbg commands like `eq`, `ew`, `ed`, `eb`, `es`, `dq`, `dw`, `dd`, `db`, `ds` for writing and reading memory",
14 "Pwndbg resolves kernel memory maps by parsing page tables (default) or via `monitor info mem` QEMU gdbstub command (use `set kernel-vmmap-via-page-tables off` for that)",
15 "Use the `vmmap` instruction for a better & colored memory maps display (than the GDB's `info proc mappings`)",
16 "Use the `telescope` command to dereference a given address/pointer multiple times (if the dereferenced value is a valid ptr; see `config telescope` to configure its behavior)",
17 "Use the `context` (or `ctx`) command to display the context once again. You can reconfigure the context layout with `set context-section <sections>` or forward the output to a file/tty via `set context-output <file>`. See also `config context` to configure it further!",
18 "Disable Pwndbg context information display with `set context-sections ''`",
19 "Pwndbg context displays where the program branches to thanks to emulating few instructions into the future. You can disable this with `set emulate off` which may also speed up debugging",
20 "Use the `canary` command to see all stack canary/cookie values on the stack (based on the *usual* stack canary value initialized by glibc)",
21 "Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)",
22 "Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context",
23 "The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command",
24 ]
25
26
27 def get_tip_of_the_day() -> str:
28 return choice(TIPS)
29
[end of pwndbg/lib/tips.py]
[start of pwndbg/commands/misc.py]
1 import argparse
2 import errno
3
4 import gdb
5
6 import pwndbg.auxv
7 import pwndbg.commands
8 import pwndbg.gdblib.arch as _arch
9 import pwndbg.regs
10 import pwndbg.symbol
11
12 errno.errorcode[0] = "OK"
13
14 parser = argparse.ArgumentParser(
15 description="""
16 Converts errno (or argument) to its string representation.
17 """
18 )
19 parser.add_argument(
20 "err",
21 type=int,
22 nargs="?",
23 default=None,
24 help="Errno; if not passed, it is retrieved from __errno_location",
25 )
26
27
28 @pwndbg.commands.ArgparsedCommand(parser, command_name="errno")
29 @pwndbg.commands.OnlyWhenRunning
30 def errno_(err):
31 if err is None:
32 # Try to get the `errno` variable value
33 # if it does not exist, get the errno variable from its location
34 try:
35 err = int(gdb.parse_and_eval("errno"))
36 except gdb.error:
37 try:
38 err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
39 except gdb.error:
40 print(
41 "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)"
42 )
43 return
44
45 msg = errno.errorcode.get(int(err), "Unknown error code")
46 print("Errno %s: %s" % (err, msg))
47
48
49 parser = argparse.ArgumentParser(
50 description="""
51 Prints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.
52 """
53 )
54 parser.add_argument(
55 "filter_pattern",
56 type=str,
57 nargs="?",
58 default=None,
59 help="Filter to apply to commands names/docs",
60 )
61
62
63 @pwndbg.commands.ArgparsedCommand(parser, command_name="pwndbg")
64 def pwndbg_(filter_pattern):
65 for name, docs in list_and_filter_commands(filter_pattern):
66 print("%-20s %s" % (name, docs))
67
68
69 parser = argparse.ArgumentParser(description="""Print the distance between the two arguments.""")
70 parser.add_argument("a", type=int, help="The first address.")
71 parser.add_argument("b", type=int, help="The second address.")
72
73
74 @pwndbg.commands.ArgparsedCommand(parser)
75 def distance(a, b):
76 """Print the distance between the two arguments"""
77 a = int(a) & pwndbg.arch.ptrmask
78 b = int(b) & pwndbg.arch.ptrmask
79
80 distance = b - a
81
82 print("%#x->%#x is %#x bytes (%#x words)" % (a, b, distance, distance // pwndbg.arch.ptrsize))
83
84
85 def list_and_filter_commands(filter_str):
86 sorted_commands = list(pwndbg.commands.commands)
87 sorted_commands.sort(key=lambda x: x.__name__)
88
89 if filter_str:
90 filter_str = filter_str.lower()
91
92 results = []
93
94 for c in sorted_commands:
95 name = c.__name__
96 docs = c.__doc__
97
98 if docs:
99 docs = docs.strip()
100 if docs:
101 docs = docs.splitlines()[0]
102
103 if not filter_str or filter_str in name.lower() or (docs and filter_str in docs.lower()):
104 results.append((name, docs))
105
106 return results
107
[end of pwndbg/commands/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/commands/misc.py b/pwndbg/commands/misc.py
--- a/pwndbg/commands/misc.py
+++ b/pwndbg/commands/misc.py
@@ -35,10 +35,24 @@
err = int(gdb.parse_and_eval("errno"))
except gdb.error:
try:
- err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
+ # We can't simply call __errno_location because its .plt.got entry may be uninitialized
+ # (e.g. if the binary was just started with `starti` command)
+ # So we have to check the got.plt entry first before calling it
+ errno_loc_gotplt = pwndbg.symbol.address("__errno_location@got.plt")
+
+ # If the got.plt entry is not there (is None), it means the symbol is not used by the binary
+ if errno_loc_gotplt is None or pwndbg.vmmap.find(
+ pwndbg.memory.pvoid(errno_loc_gotplt)
+ ):
+ err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
+ else:
+ print(
+ "Could not determine error code automatically: the __errno_location@got.plt has no valid address yet (perhaps libc.so hasn't been loaded yet?)"
+ )
+ return
except gdb.error:
print(
- "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)"
+ "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (perhaps libc.so hasn't been not loaded yet?)"
)
return
diff --git a/pwndbg/lib/tips.py b/pwndbg/lib/tips.py
--- a/pwndbg/lib/tips.py
+++ b/pwndbg/lib/tips.py
@@ -21,6 +21,7 @@
"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)",
"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context",
"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command",
+ "Use the `errno` (or `errno <number>`) command to see the name of the last or provided (libc) error",
]
|
{"golden_diff": "diff --git a/pwndbg/commands/misc.py b/pwndbg/commands/misc.py\n--- a/pwndbg/commands/misc.py\n+++ b/pwndbg/commands/misc.py\n@@ -35,10 +35,24 @@\n err = int(gdb.parse_and_eval(\"errno\"))\n except gdb.error:\n try:\n- err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n+ # We can't simply call __errno_location because its .plt.got entry may be uninitialized\n+ # (e.g. if the binary was just started with `starti` command)\n+ # So we have to check the got.plt entry first before calling it\n+ errno_loc_gotplt = pwndbg.symbol.address(\"__errno_location@got.plt\")\n+\n+ # If the got.plt entry is not there (is None), it means the symbol is not used by the binary\n+ if errno_loc_gotplt is None or pwndbg.vmmap.find(\n+ pwndbg.memory.pvoid(errno_loc_gotplt)\n+ ):\n+ err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n+ else:\n+ print(\n+ \"Could not determine error code automatically: the __errno_location@got.plt has no valid address yet (perhaps libc.so hasn't been loaded yet?)\"\n+ )\n+ return\n except gdb.error:\n print(\n- \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)\"\n+ \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (perhaps libc.so hasn't been not loaded yet?)\"\n )\n return\n \ndiff --git a/pwndbg/lib/tips.py b/pwndbg/lib/tips.py\n--- a/pwndbg/lib/tips.py\n+++ b/pwndbg/lib/tips.py\n@@ -21,6 +21,7 @@\n \"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)\",\n \"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context\",\n \"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command\",\n+ \"Use the `errno` (or `errno <number>`) command to see the name of the last or provided (libc) error\",\n ]\n", "issue": "errno command is throwing errors\n```\r\npwndbg> errno\r\nTraceback (most recent call last):\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py\", line 145, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py\", line 239, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/misc.py\", line 32, in errno\r\n errno_location = pwndbg.symbol.get(\"__errno_location\")\r\nAttributeError: '_ArgparsedCommand' object has no attribute 'symbol'\r\n```\r\n\r\nI don't know why the issue is occurring, but it can be fixed with this:\r\n```diff\r\n- errno_location = pwndbg.symbol.get(\"__errno_location\")\r\n- err = pwndbg.memory.int(errno_location)\r\n+ errno_location = _pwndbg.symbol.address(\"__errno_location\")\r\n+ print(errno_location)\r\n+ err = _pwndbg.memory.s32(errno_location)\r\n```\r\n\r\nBut even after fixing that, there's an issue with the following lines:\r\n```python\r\nerr = _pwndbg.memory.s32(errno_location)\r\n#err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))\r\n```\r\n\r\nThe commented out line is what actually works for me, while the existing code doesn't. I wonder if this was originally due to a change in glibc? If so, we should figure out what version and make this work on both versions.\r\n\r\n\n", "before_files": [{"content": "from random import choice\n\nTIPS = [\n # GDB hints\n \"GDB's `apropos <topic>` command displays all registered commands that are related to the given <topic>\",\n \"GDB's `follow-fork-mode` parameter can be used to set whether to trace parent or child after fork() calls\",\n 'Use GDB\\'s `dprintf` command to print all calls to given function. E.g. `dprintf malloc, \"malloc(%p)\\\\n\", (void*)$rdi` will print all malloc calls',\n \"Use GDB's `pi` command to run an interactive Python console where you can use Pwndbg APIs like `pwndbg.gdb.memory.read(addr, len)`, `pwndbg.gdb.memory.write(addr, data)`, `pwndbg.gdb.vmmap.get()` and so on!\",\n \"GDB's `set directories <path>` parameter can be used to debug e.g. glibc sources like the malloc/free functions!\",\n # Pwndbg hints\n \"GDB and Pwndbg parameters can be shown or set with `show <param>` and `set <param> <value>` GDB commands\",\n \"Use Pwndbg's `config` and `theme` commands to tune its configuration and theme colors!\",\n \"Pwndbg mirrors some of Windbg commands like `eq`, `ew`, `ed`, `eb`, `es`, `dq`, `dw`, `dd`, `db`, `ds` for writing and reading memory\",\n \"Pwndbg resolves kernel memory maps by parsing page tables (default) or via `monitor info mem` QEMU gdbstub command (use `set kernel-vmmap-via-page-tables off` for that)\",\n \"Use the `vmmap` instruction for a better & colored memory maps display (than the GDB's `info proc mappings`)\",\n \"Use the `telescope` command to dereference a given address/pointer multiple times (if the dereferenced value is a valid ptr; see `config telescope` to configure its behavior)\",\n \"Use the `context` (or `ctx`) command to display the context once again. You can reconfigure the context layout with `set context-section <sections>` or forward the output to a file/tty via `set context-output <file>`. See also `config context` to configure it further!\",\n \"Disable Pwndbg context information display with `set context-sections ''`\",\n \"Pwndbg context displays where the program branches to thanks to emulating few instructions into the future. You can disable this with `set emulate off` which may also speed up debugging\",\n \"Use the `canary` command to see all stack canary/cookie values on the stack (based on the *usual* stack canary value initialized by glibc)\",\n \"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)\",\n \"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context\",\n \"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command\",\n]\n\n\ndef get_tip_of_the_day() -> str:\n return choice(TIPS)\n", "path": "pwndbg/lib/tips.py"}, {"content": "import argparse\nimport errno\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.commands\nimport pwndbg.gdblib.arch as _arch\nimport pwndbg.regs\nimport pwndbg.symbol\n\nerrno.errorcode[0] = \"OK\"\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nConverts errno (or argument) to its string representation.\n\"\"\"\n)\nparser.add_argument(\n \"err\",\n type=int,\n nargs=\"?\",\n default=None,\n help=\"Errno; if not passed, it is retrieved from __errno_location\",\n)\n\n\n@pwndbg.commands.ArgparsedCommand(parser, command_name=\"errno\")\n@pwndbg.commands.OnlyWhenRunning\ndef errno_(err):\n if err is None:\n # Try to get the `errno` variable value\n # if it does not exist, get the errno variable from its location\n try:\n err = int(gdb.parse_and_eval(\"errno\"))\n except gdb.error:\n try:\n err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n except gdb.error:\n print(\n \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)\"\n )\n return\n\n msg = errno.errorcode.get(int(err), \"Unknown error code\")\n print(\"Errno %s: %s\" % (err, msg))\n\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nPrints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.\n\"\"\"\n)\nparser.add_argument(\n \"filter_pattern\",\n type=str,\n nargs=\"?\",\n default=None,\n help=\"Filter to apply to commands names/docs\",\n)\n\n\n@pwndbg.commands.ArgparsedCommand(parser, command_name=\"pwndbg\")\ndef pwndbg_(filter_pattern):\n for name, docs in list_and_filter_commands(filter_pattern):\n print(\"%-20s %s\" % (name, docs))\n\n\nparser = argparse.ArgumentParser(description=\"\"\"Print the distance between the two arguments.\"\"\")\nparser.add_argument(\"a\", type=int, help=\"The first address.\")\nparser.add_argument(\"b\", type=int, help=\"The second address.\")\n\n\n@pwndbg.commands.ArgparsedCommand(parser)\ndef distance(a, b):\n \"\"\"Print the distance between the two arguments\"\"\"\n a = int(a) & pwndbg.arch.ptrmask\n b = int(b) & pwndbg.arch.ptrmask\n\n distance = b - a\n\n print(\"%#x->%#x is %#x bytes (%#x words)\" % (a, b, distance, distance // pwndbg.arch.ptrsize))\n\n\ndef list_and_filter_commands(filter_str):\n sorted_commands = list(pwndbg.commands.commands)\n sorted_commands.sort(key=lambda x: x.__name__)\n\n if filter_str:\n filter_str = filter_str.lower()\n\n results = []\n\n for c in sorted_commands:\n name = c.__name__\n docs = c.__doc__\n\n if docs:\n docs = docs.strip()\n if docs:\n docs = docs.splitlines()[0]\n\n if not filter_str or filter_str in name.lower() or (docs and filter_str in docs.lower()):\n results.append((name, docs))\n\n return results\n", "path": "pwndbg/commands/misc.py"}]}
| 2,604 | 573 |
gh_patches_debug_22260
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1392
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E0002 bug when using parameters for DynamoDB AttributeDefinitions
*cfn-lint version: 0.28.2*
*Description of issue.*
Rule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template.
Repeatable with this template snippet:
```
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
HashKeyName:
Description: Primary Key Name
Type: String
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '1'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
HashKeyType:
Description: Primary Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '1'
MaxLength: '1'
ConstraintDescription: must be either S or N
RangeKeyName:
Description: Sort Key Name
Type: String
Default: 'NA'
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '0'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
RangeKeyType:
Description: Sort Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '0'
MaxLength: '1'
ConstraintDescription: must be either S or Ns
Conditions:
isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]
Resources:
DynamoDBTable:
DeletionPolicy: Delete
UpdateReplacePolicy: Delete
Type: AWS::DynamoDB::Table
Properties:
AttributeDefinitions: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
- AttributeName: !Ref RangeKeyName
AttributeType: !Ref RangeKeyType
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
KeySchema: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
KeyType: HASH
- AttributeName: !Ref RangeKeyName
KeyType: RANGE
- - AttributeName: !Ref HashKeyName
KeyType: HASH
```
</issue>
<code>
[start of src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.decode.node import list_node
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class AttributeMismatch(CloudFormationLintRule):
12 """Check DynamoDB Attributes"""
13 id = 'E3039'
14 shortdesc = 'AttributeDefinitions / KeySchemas mismatch'
15 description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'
17 tags = ['resources', 'dynamodb']
18
19 def __init__(self):
20 """Init"""
21 super(AttributeMismatch, self).__init__()
22 self.resource_property_types = ['AWS::DynamoDB::Table']
23
24 def _get_key_schema_attributes(self, key_schemas_sets):
25 """ Get Key Schema attributes """
26 keys = set()
27
28 for properties, _ in key_schemas_sets:
29 for key in properties:
30 attribute_name = key.get_safe('AttributeName', type_t=six.string_types)
31 if attribute_name:
32 keys.add(key.get('AttributeName'))
33 return keys
34
35 def _get_attribute_secondary(self, property_sets):
36 """ Get the key schemas from secondary indexes """
37 keys = set()
38
39 for properties, _ in property_sets:
40 for index in properties:
41 keys = keys.union(
42 self._get_key_schema_attributes(
43 index.get_safe('KeySchema', list_node([], None, None), [], list)
44 )
45 )
46
47 return keys
48
49 def check_property_set(self, property_set, path):
50 """ Check a property set """
51 matches = []
52 properties = property_set.get('Object')
53
54 keys = set()
55 attributes = set()
56
57 for attribute in properties.get('AttributeDefinitions', []):
58 attribute_name = attribute.get('AttributeName')
59 if isinstance(attribute_name, six.string_types):
60 attributes.add(attribute.get('AttributeName'))
61 else:
62 self.logger.info('attribute definitions is not using just strings')
63 return matches
64 keys = keys.union(
65 self._get_key_schema_attributes(
66 properties.get_safe('KeySchema', list_node([], None, None), [], list)
67 )
68 )
69 keys = keys.union(self._get_attribute_secondary(
70 properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list
71 ))) # pylint: disable=bad-continuation
72 keys = keys.union(self._get_attribute_secondary(
73 properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list
74 ))) # pylint: disable=bad-continuation
75
76 if attributes != keys:
77 message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
78 matches.append(RuleMatch(
79 path,
80 message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
81 ))
82
83 return matches
84
85 def check(self, properties, path, cfn):
86 """Check itself"""
87 matches = []
88
89 property_sets = cfn.get_object_without_conditions(properties, path)
90 for property_set in property_sets:
91 matches.extend(self.check_property_set(property_set, path))
92 return matches
93
94 def match_resource_properties(self, properties, _, path, cfn):
95 """Match for sub properties"""
96 matches = []
97 matches.extend(self.check(properties, path, cfn))
98 return matches
99
[end of src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
@@ -77,7 +77,8 @@
message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
matches.append(RuleMatch(
path,
- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
+ message.format(sorted(list(attributes)), sorted(
+ list(keys)), '/'.join(map(str, path)))
))
return matches
@@ -86,7 +87,8 @@
"""Check itself"""
matches = []
- property_sets = cfn.get_object_without_conditions(properties, path)
+ property_sets = cfn.get_object_without_conditions(
+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])
for property_set in property_sets:
matches.extend(self.check_property_set(property_set, path))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n@@ -77,7 +77,8 @@\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n+ message.format(sorted(list(attributes)), sorted(\n+ list(keys)), '/'.join(map(str, path)))\n ))\n \n return matches\n@@ -86,7 +87,8 @@\n \"\"\"Check itself\"\"\"\n matches = []\n \n- property_sets = cfn.get_object_without_conditions(properties, path)\n+ property_sets = cfn.get_object_without_conditions(\n+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n", "issue": "E0002 bug when using parameters for DynamoDB AttributeDefinitions\n*cfn-lint version: 0.28.2*\r\n\r\n*Description of issue.*\r\n\r\nRule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template. \r\n\r\nRepeatable with this template snippet:\r\n\r\n```\r\nAWSTemplateFormatVersion: '2010-09-09'\r\n\r\nParameters:\r\n HashKeyName:\r\n Description: Primary Key Name\r\n Type: String\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '1'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n HashKeyType:\r\n Description: Primary Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '1'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or N\r\n\r\n RangeKeyName:\r\n Description: Sort Key Name\r\n Type: String\r\n Default: 'NA'\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '0'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n RangeKeyType:\r\n Description: Sort Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '0'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or Ns\r\n\r\nConditions:\r\n isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]\r\n\r\nResources:\r\n DynamoDBTable:\r\n DeletionPolicy: Delete\r\n UpdateReplacePolicy: Delete\r\n Type: AWS::DynamoDB::Table\r\n Properties:\r\n AttributeDefinitions: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n - AttributeName: !Ref RangeKeyName\r\n AttributeType: !Ref RangeKeyType\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n KeySchema: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n - AttributeName: !Ref RangeKeyName\r\n KeyType: RANGE\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.decode.node import list_node\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass AttributeMismatch(CloudFormationLintRule):\n \"\"\"Check DynamoDB Attributes\"\"\"\n id = 'E3039'\n shortdesc = 'AttributeDefinitions / KeySchemas mismatch'\n description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'\n tags = ['resources', 'dynamodb']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AttributeMismatch, self).__init__()\n self.resource_property_types = ['AWS::DynamoDB::Table']\n\n def _get_key_schema_attributes(self, key_schemas_sets):\n \"\"\" Get Key Schema attributes \"\"\"\n keys = set()\n\n for properties, _ in key_schemas_sets:\n for key in properties:\n attribute_name = key.get_safe('AttributeName', type_t=six.string_types)\n if attribute_name:\n keys.add(key.get('AttributeName'))\n return keys\n\n def _get_attribute_secondary(self, property_sets):\n \"\"\" Get the key schemas from secondary indexes \"\"\"\n keys = set()\n\n for properties, _ in property_sets:\n for index in properties:\n keys = keys.union(\n self._get_key_schema_attributes(\n index.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n\n return keys\n\n def check_property_set(self, property_set, path):\n \"\"\" Check a property set \"\"\"\n matches = []\n properties = property_set.get('Object')\n\n keys = set()\n attributes = set()\n\n for attribute in properties.get('AttributeDefinitions', []):\n attribute_name = attribute.get('AttributeName')\n if isinstance(attribute_name, six.string_types):\n attributes.add(attribute.get('AttributeName'))\n else:\n self.logger.info('attribute definitions is not using just strings')\n return matches\n keys = keys.union(\n self._get_key_schema_attributes(\n properties.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n\n if attributes != keys:\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n ))\n\n return matches\n\n def check(self, properties, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n property_sets = cfn.get_object_without_conditions(properties, path)\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n matches.extend(self.check(properties, path, cfn))\n return matches\n", "path": "src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py"}]}
| 2,036 | 256 |
gh_patches_debug_29965
|
rasdani/github-patches
|
git_diff
|
openai__gym-1549
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VectorEnv seems much slower than SubprocVecEnv
The following test script shows a large performance difference between SubprocVecEnv and VectorEnv:
```
elapsed_vectorenv 24.20034408569336
elapsed_subprocvecenv 0.813650369644165
```
```
from gym.vector import make
from baselines.common.vec_env import SubprocVecEnv
import numpy as np
import time
import gym
NUM_STEPS = 1000
def run_vectorenv():
venv = make("PongNoFrameskip-v4", num_envs=3)
venv.reset()
start = time.time()
for _ in range(NUM_STEPS):
obs, rews, dones, infos = venv.step(venv.action_space.sample())
elapsed = time.time() - start
venv.close()
return elapsed
def run_subprocvecenv():
def make_env():
return gym.make("PongNoFrameskip-v4")
venv = SubprocVecEnv([make_env] * 3)
venv.reset()
start = time.time()
for _ in range(NUM_STEPS):
obs, rews, dones, infos = venv.step(np.array([venv.action_space.sample() for _ in range(venv.num_envs)]))
elapsed = time.time() - start
venv.close()
return elapsed
def main():
elapsed_vectorenv = run_vectorenv()
elapsed_subprocvecenv = run_subprocvecenv()
print("elapsed_vectorenv", elapsed_vectorenv)
print("elapsed_subprocvecenv", elapsed_subprocvecenv)
```
</issue>
<code>
[start of gym/vector/utils/shared_memory.py]
1 import numpy as np
2 from multiprocessing import Array
3 from ctypes import c_bool
4 from collections import OrderedDict
5
6 from gym import logger
7 from gym.spaces import Tuple, Dict
8 from gym.vector.utils.spaces import _BaseGymSpaces
9
10 __all__ = [
11 'create_shared_memory',
12 'read_from_shared_memory',
13 'write_to_shared_memory'
14 ]
15
16 def create_shared_memory(space, n=1):
17 """Create a shared memory object, to be shared across processes. This
18 eventually contains the observations from the vectorized environment.
19
20 Parameters
21 ----------
22 space : `gym.spaces.Space` instance
23 Observation space of a single environment in the vectorized environment.
24
25 n : int
26 Number of environments in the vectorized environment (i.e. the number
27 of processes).
28
29 Returns
30 -------
31 shared_memory : dict, tuple, or `multiprocessing.Array` instance
32 Shared object across processes.
33 """
34 if isinstance(space, _BaseGymSpaces):
35 return create_base_shared_memory(space, n=n)
36 elif isinstance(space, Tuple):
37 return create_tuple_shared_memory(space, n=n)
38 elif isinstance(space, Dict):
39 return create_dict_shared_memory(space, n=n)
40 else:
41 raise NotImplementedError()
42
43 def create_base_shared_memory(space, n=1):
44 dtype = space.dtype.char
45 if dtype in '?':
46 dtype = c_bool
47 return Array(dtype, n * int(np.prod(space.shape)))
48
49 def create_tuple_shared_memory(space, n=1):
50 return tuple(create_shared_memory(subspace, n=n)
51 for subspace in space.spaces)
52
53 def create_dict_shared_memory(space, n=1):
54 return OrderedDict([(key, create_shared_memory(subspace, n=n))
55 for (key, subspace) in space.spaces.items()])
56
57
58 def read_from_shared_memory(shared_memory, space, n=1):
59 """Read the batch of observations from shared memory as a numpy array.
60
61 Parameters
62 ----------
63 shared_memory : dict, tuple, or `multiprocessing.Array` instance
64 Shared object across processes. This contains the observations from the
65 vectorized environment. This object is created with `create_shared_memory`.
66
67 space : `gym.spaces.Space` instance
68 Observation space of a single environment in the vectorized environment.
69
70 n : int
71 Number of environments in the vectorized environment (i.e. the number
72 of processes).
73
74 Returns
75 -------
76 observations : dict, tuple or `np.ndarray` instance
77 Batch of observations as a (possibly nested) numpy array.
78
79 Notes
80 -----
81 The numpy array objects returned by `read_from_shared_memory` shares the
82 memory of `shared_memory`. Any changes to `shared_memory` are forwarded
83 to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.
84 """
85 if isinstance(space, _BaseGymSpaces):
86 return read_base_from_shared_memory(shared_memory, space, n=n)
87 elif isinstance(space, Tuple):
88 return read_tuple_from_shared_memory(shared_memory, space, n=n)
89 elif isinstance(space, Dict):
90 return read_dict_from_shared_memory(shared_memory, space, n=n)
91 else:
92 raise NotImplementedError()
93
94 def read_base_from_shared_memory(shared_memory, space, n=1):
95 return np.frombuffer(shared_memory.get_obj(),
96 dtype=space.dtype).reshape((n,) + space.shape)
97
98 def read_tuple_from_shared_memory(shared_memory, space, n=1):
99 return tuple(read_from_shared_memory(memory, subspace, n=n)
100 for (memory, subspace) in zip(shared_memory, space.spaces))
101
102 def read_dict_from_shared_memory(shared_memory, space, n=1):
103 return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))
104 for ((key, memory), subspace) in zip(shared_memory.items(),
105 space.spaces.values())])
106
107
108 def write_to_shared_memory(index, value, shared_memory, space):
109 """Write the observation of a single environment into shared memory.
110
111 Parameters
112 ----------
113 index : int
114 Index of the environment (must be in `[0, num_envs)`).
115
116 value : sample from `space`
117 Observation of the single environment to write to shared memory.
118
119 shared_memory : dict, tuple, or `multiprocessing.Array` instance
120 Shared object across processes. This contains the observations from the
121 vectorized environment. This object is created with `create_shared_memory`.
122
123 space : `gym.spaces.Space` instance
124 Observation space of a single environment in the vectorized environment.
125
126 Returns
127 -------
128 `None`
129 """
130 if isinstance(space, _BaseGymSpaces):
131 write_base_to_shared_memory(index, value, shared_memory, space)
132 elif isinstance(space, Tuple):
133 write_tuple_to_shared_memory(index, value, shared_memory, space)
134 elif isinstance(space, Dict):
135 write_dict_to_shared_memory(index, value, shared_memory, space)
136 else:
137 raise NotImplementedError()
138
139 def write_base_to_shared_memory(index, value, shared_memory, space):
140 size = int(np.prod(space.shape))
141 shared_memory[index * size:(index + 1) * size] = np.asarray(value,
142 dtype=space.dtype).flatten()
143
144 def write_tuple_to_shared_memory(index, values, shared_memory, space):
145 for value, memory, subspace in zip(values, shared_memory, space.spaces):
146 write_to_shared_memory(index, value, memory, subspace)
147
148 def write_dict_to_shared_memory(index, values, shared_memory, space):
149 for key, value in values.items():
150 write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])
151
[end of gym/vector/utils/shared_memory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gym/vector/utils/shared_memory.py b/gym/vector/utils/shared_memory.py
--- a/gym/vector/utils/shared_memory.py
+++ b/gym/vector/utils/shared_memory.py
@@ -100,9 +100,8 @@
for (memory, subspace) in zip(shared_memory, space.spaces))
def read_dict_from_shared_memory(shared_memory, space, n=1):
- return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))
- for ((key, memory), subspace) in zip(shared_memory.items(),
- space.spaces.values())])
+ return OrderedDict([(key, read_from_shared_memory(shared_memory[key],
+ subspace, n=n)) for (key, subspace) in space.spaces.items()])
def write_to_shared_memory(index, value, shared_memory, space):
@@ -138,13 +137,14 @@
def write_base_to_shared_memory(index, value, shared_memory, space):
size = int(np.prod(space.shape))
- shared_memory[index * size:(index + 1) * size] = np.asarray(value,
- dtype=space.dtype).flatten()
+ destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)
+ np.copyto(destination[index * size:(index + 1) * size], np.asarray(
+ value, dtype=space.dtype).flatten())
def write_tuple_to_shared_memory(index, values, shared_memory, space):
for value, memory, subspace in zip(values, shared_memory, space.spaces):
write_to_shared_memory(index, value, memory, subspace)
def write_dict_to_shared_memory(index, values, shared_memory, space):
- for key, value in values.items():
- write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])
+ for key, subspace in space.spaces.items():
+ write_to_shared_memory(index, values[key], shared_memory[key], subspace)
|
{"golden_diff": "diff --git a/gym/vector/utils/shared_memory.py b/gym/vector/utils/shared_memory.py\n--- a/gym/vector/utils/shared_memory.py\n+++ b/gym/vector/utils/shared_memory.py\n@@ -100,9 +100,8 @@\n for (memory, subspace) in zip(shared_memory, space.spaces))\n \n def read_dict_from_shared_memory(shared_memory, space, n=1):\n- return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))\n- for ((key, memory), subspace) in zip(shared_memory.items(), \n- space.spaces.values())])\n+ return OrderedDict([(key, read_from_shared_memory(shared_memory[key],\n+ subspace, n=n)) for (key, subspace) in space.spaces.items()])\n \n \n def write_to_shared_memory(index, value, shared_memory, space):\n@@ -138,13 +137,14 @@\n \n def write_base_to_shared_memory(index, value, shared_memory, space):\n size = int(np.prod(space.shape))\n- shared_memory[index * size:(index + 1) * size] = np.asarray(value,\n- dtype=space.dtype).flatten()\n+ destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)\n+ np.copyto(destination[index * size:(index + 1) * size], np.asarray(\n+ value, dtype=space.dtype).flatten())\n \n def write_tuple_to_shared_memory(index, values, shared_memory, space):\n for value, memory, subspace in zip(values, shared_memory, space.spaces):\n write_to_shared_memory(index, value, memory, subspace)\n \n def write_dict_to_shared_memory(index, values, shared_memory, space):\n- for key, value in values.items():\n- write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])\n+ for key, subspace in space.spaces.items():\n+ write_to_shared_memory(index, values[key], shared_memory[key], subspace)\n", "issue": "VectorEnv seems much slower than SubprocVecEnv\nThe following test script shows a large performance difference between SubprocVecEnv and VectorEnv:\r\n\r\n```\r\nelapsed_vectorenv 24.20034408569336\r\nelapsed_subprocvecenv 0.813650369644165\r\n```\r\n\r\n```\r\nfrom gym.vector import make\r\nfrom baselines.common.vec_env import SubprocVecEnv\r\nimport numpy as np\r\nimport time\r\nimport gym\r\n\r\nNUM_STEPS = 1000\r\n\r\ndef run_vectorenv():\r\n venv = make(\"PongNoFrameskip-v4\", num_envs=3)\r\n venv.reset()\r\n start = time.time()\r\n for _ in range(NUM_STEPS):\r\n obs, rews, dones, infos = venv.step(venv.action_space.sample())\r\n elapsed = time.time() - start\r\n venv.close()\r\n return elapsed\r\n\r\n\r\ndef run_subprocvecenv():\r\n def make_env():\r\n return gym.make(\"PongNoFrameskip-v4\")\r\n\r\n venv = SubprocVecEnv([make_env] * 3)\r\n venv.reset()\r\n start = time.time()\r\n for _ in range(NUM_STEPS):\r\n obs, rews, dones, infos = venv.step(np.array([venv.action_space.sample() for _ in range(venv.num_envs)]))\r\n elapsed = time.time() - start\r\n venv.close()\r\n return elapsed\r\n\r\n\r\ndef main():\r\n elapsed_vectorenv = run_vectorenv()\r\n elapsed_subprocvecenv = run_subprocvecenv()\r\n print(\"elapsed_vectorenv\", elapsed_vectorenv)\r\n print(\"elapsed_subprocvecenv\", elapsed_subprocvecenv)\r\n```\n", "before_files": [{"content": "import numpy as np\nfrom multiprocessing import Array\nfrom ctypes import c_bool\nfrom collections import OrderedDict\n\nfrom gym import logger\nfrom gym.spaces import Tuple, Dict\nfrom gym.vector.utils.spaces import _BaseGymSpaces\n\n__all__ = [\n 'create_shared_memory',\n 'read_from_shared_memory',\n 'write_to_shared_memory'\n]\n\ndef create_shared_memory(space, n=1):\n \"\"\"Create a shared memory object, to be shared across processes. This\n eventually contains the observations from the vectorized environment.\n\n Parameters\n ----------\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return create_base_shared_memory(space, n=n)\n elif isinstance(space, Tuple):\n return create_tuple_shared_memory(space, n=n)\n elif isinstance(space, Dict):\n return create_dict_shared_memory(space, n=n)\n else:\n raise NotImplementedError()\n\ndef create_base_shared_memory(space, n=1):\n dtype = space.dtype.char\n if dtype in '?':\n dtype = c_bool\n return Array(dtype, n * int(np.prod(space.shape)))\n\ndef create_tuple_shared_memory(space, n=1):\n return tuple(create_shared_memory(subspace, n=n)\n for subspace in space.spaces)\n\ndef create_dict_shared_memory(space, n=1):\n return OrderedDict([(key, create_shared_memory(subspace, n=n))\n for (key, subspace) in space.spaces.items()])\n\n\ndef read_from_shared_memory(shared_memory, space, n=1):\n \"\"\"Read the batch of observations from shared memory as a numpy array.\n\n Parameters\n ----------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n observations : dict, tuple or `np.ndarray` instance\n Batch of observations as a (possibly nested) numpy array.\n\n Notes\n -----\n The numpy array objects returned by `read_from_shared_memory` shares the\n memory of `shared_memory`. Any changes to `shared_memory` are forwarded\n to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return read_base_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Tuple):\n return read_tuple_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Dict):\n return read_dict_from_shared_memory(shared_memory, space, n=n)\n else:\n raise NotImplementedError()\n\ndef read_base_from_shared_memory(shared_memory, space, n=1):\n return np.frombuffer(shared_memory.get_obj(),\n dtype=space.dtype).reshape((n,) + space.shape)\n\ndef read_tuple_from_shared_memory(shared_memory, space, n=1):\n return tuple(read_from_shared_memory(memory, subspace, n=n)\n for (memory, subspace) in zip(shared_memory, space.spaces))\n\ndef read_dict_from_shared_memory(shared_memory, space, n=1):\n return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))\n for ((key, memory), subspace) in zip(shared_memory.items(), \n space.spaces.values())])\n\n\ndef write_to_shared_memory(index, value, shared_memory, space):\n \"\"\"Write the observation of a single environment into shared memory.\n\n Parameters\n ----------\n index : int\n Index of the environment (must be in `[0, num_envs)`).\n\n value : sample from `space`\n Observation of the single environment to write to shared memory.\n\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n Returns\n -------\n `None`\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n write_base_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Tuple):\n write_tuple_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Dict):\n write_dict_to_shared_memory(index, value, shared_memory, space)\n else:\n raise NotImplementedError()\n\ndef write_base_to_shared_memory(index, value, shared_memory, space):\n size = int(np.prod(space.shape))\n shared_memory[index * size:(index + 1) * size] = np.asarray(value,\n dtype=space.dtype).flatten()\n\ndef write_tuple_to_shared_memory(index, values, shared_memory, space):\n for value, memory, subspace in zip(values, shared_memory, space.spaces):\n write_to_shared_memory(index, value, memory, subspace)\n\ndef write_dict_to_shared_memory(index, values, shared_memory, space):\n for key, value in values.items():\n write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])\n", "path": "gym/vector/utils/shared_memory.py"}]}
| 2,476 | 428 |
gh_patches_debug_37156
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-3949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.svtplay: Cannot resolve playable stream
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
When trying to play for example https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
It does not seem to parse the information correctly anymore to resolve a playable stream. This plugin compared to the one for tv4play is parsing the HTML page in order to resolve where to go if you are on the program page instead of the episode page.
This lookup resolves to nothing anymore:
```
latest_episode_url_re = re.compile(r'''
class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
''', re.VERBOSE)
```
When debugging I find that this https://api.svt.se/video/KA2BmZD resolves the JSON.
```
{
"svtId": "KA2BmZD",
"programVersionId": "1400537-002A",
"contentDuration": 3506,
"blockedForChildren": false,
"live": false,
"programTitle": "Draknästet",
"episodeTitle": "Avsnitt 2",
...
}
```
With the following changes it resolves the video_id correctly from the HTML:
```
latest_episode_vod_id = re.compile(r'''
data-rt="top-area-play-button"\s+href=".*(?:id=)(?P<video_id>[^"]+)"
''', re.VERBOSE)
```
If you are directly on the play page of the site you get the vod_id in the URL as a parameter. So I have refactored to support both.
Now it finds the vod_id and initiates the dash stream worker but it still doesn't run.
I get Exception in thread Thread-DASHStreamWorker and this one seems a little more tricky to figure out.
```
[cli][info] Opening stream: 1080p (dash)
[stream.dash][debug] Opening DASH reader for: 0 (video/mp4)
[stream.dash][debug] Opening DASH reader for: 5 (audio/mp4)
[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=0))
[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=5))
[cli][error] Try 1/1: Could not open stream <Stream()> (Could not open stream: cannot use FFMPEG)
[stream.ffmpegmux][debug] Closing ffmpeg thread
error: Could not open stream <Stream()>, tried 1 times, exiting
[stream.dash][debug] Download of segment: https://svt-vod-8r.akamaized.net/d0/se/20210723/841d135d-fb92-4acb-9dff-898c1db4af30/cmaf-video-avc-1920x1080p25-3089/cmaf-video-avc-1920x1080p25-3089-init.mp4 complete
Exception in thread Thread-DASHStreamWorker:
Traceback (most recent call last):
File "/usr/lib64/python3.9/threading.py", line 973, in _bootstrap_inner
self.run()
File "/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py", line 87, in run
self.writer.put(segment)
File "/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py", line 140, in put
future = self.executor.submit(self.fetch, segment, retries=self.retries)
File "/usr/lib64/python3.9/concurrent/futures/thread.py", line 163, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
```
### Debug log
```text
[cli][debug] OS: Linux-5.13.12-200.fc34.x86_64-x86_64-with-glibc2.33
[cli][debug] Python: 3.9.6
[cli][debug] Streamlink: 2.3.0
[cli][debug] Requests(2.25.1), Socks(1.7.1), Websocket(0.57.0)
[cli][debug] Arguments:
[cli][debug] url=https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player=mpv
[cli][debug] --verbose-player=True
[cli][debug] --player-passthrough=['hls']
[cli][debug] --hls-segment-threads=2
[cli][info] Found matching plugin svtplay for URL https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
[plugins.svtplay][debug] Path=/
error: No playable streams found on this URL: https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
```
</issue>
<code>
[start of src/streamlink/plugins/svtplay.py]
1 import logging
2 import re
3 from urllib.parse import urljoin
4
5 from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
6 from streamlink.plugin.api import validate
7 from streamlink.stream import DASHStream, HTTPStream
8 from streamlink.stream.ffmpegmux import MuxedStream
9
10 log = logging.getLogger(__name__)
11
12
13 @pluginmatcher(re.compile(
14 r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se(/(kanaler/)?)'
15 ))
16 class SVTPlay(Plugin):
17 api_url = 'https://api.svt.se/videoplayer-api/video/{0}'
18
19 author = None
20 category = None
21 title = None
22
23 latest_episode_url_re = re.compile(r'''
24 class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
25 ''', re.VERBOSE)
26
27 live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')
28
29 vod_id_re = re.compile(r'''
30 (?:DATA_LAKE\s+=\s+{"content":{"id":|"svtId":|data-video-id=)
31 "(?P<vod_id>[^"]+)"
32 ''', re.VERBOSE)
33
34 _video_schema = validate.Schema({
35 validate.optional('programTitle'): validate.text,
36 validate.optional('episodeTitle'): validate.text,
37 'videoReferences': [{
38 'url': validate.url(),
39 'format': validate.text,
40 }],
41 validate.optional('subtitleReferences'): [{
42 'url': validate.url(),
43 'format': validate.text,
44 }],
45 })
46
47 arguments = PluginArguments(
48 PluginArgument("mux-subtitles", is_global=True)
49 )
50
51 def get_author(self):
52 if self.author is not None:
53 return self.author
54
55 def get_category(self):
56 if self.category is not None:
57 return self.category
58
59 def get_title(self):
60 if self.title is not None:
61 return self.title
62
63 def _set_metadata(self, data, category):
64 if 'programTitle' in data:
65 self.author = data['programTitle']
66
67 self.category = category
68
69 if 'episodeTitle' in data:
70 self.title = data['episodeTitle']
71
72 def _get_live(self, path):
73 match = self.live_id_re.search(path)
74 if match is None:
75 return
76
77 live_id = "ch-{0}".format(match.group('live_id'))
78 log.debug("Live ID={0}".format(live_id))
79
80 res = self.session.http.get(self.api_url.format(live_id))
81 api_data = self.session.http.json(res, schema=self._video_schema)
82
83 self._set_metadata(api_data, 'Live')
84
85 for playlist in api_data['videoReferences']:
86 if playlist['format'] == 'dashhbbtv':
87 yield from DASHStream.parse_manifest(self.session, playlist['url']).items()
88
89 def _get_vod(self):
90 res = self.session.http.get(self.url)
91 match = self.latest_episode_url_re.search(res.text)
92 if match:
93 res = self.session.http.get(
94 urljoin(self.url, match.group('url')),
95 )
96
97 match = self.vod_id_re.search(res.text)
98 if match is None:
99 return
100
101 vod_id = match.group('vod_id')
102 log.debug("VOD ID={0}".format(vod_id))
103
104 res = self.session.http.get(self.api_url.format(vod_id))
105 api_data = self.session.http.json(res, schema=self._video_schema)
106
107 self._set_metadata(api_data, 'VOD')
108
109 substreams = {}
110 if 'subtitleReferences' in api_data:
111 for subtitle in api_data['subtitleReferences']:
112 if subtitle['format'] == 'webvtt':
113 log.debug("Subtitle={0}".format(subtitle['url']))
114 substreams[subtitle['format']] = HTTPStream(
115 self.session,
116 subtitle['url'],
117 )
118
119 for manifest in api_data['videoReferences']:
120 if manifest['format'] == 'dashhbbtv':
121 for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items():
122 if self.get_option('mux_subtitles') and substreams:
123 yield q, MuxedStream(self.session, s, subtitles=substreams)
124 else:
125 yield q, s
126
127 def _get_streams(self):
128 path, live = self.match.groups()
129 log.debug("Path={0}".format(path))
130
131 if live:
132 return self._get_live(path)
133 else:
134 return self._get_vod()
135
136
137 __plugin__ = SVTPlay
138
[end of src/streamlink/plugins/svtplay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/svtplay.py b/src/streamlink/plugins/svtplay.py
--- a/src/streamlink/plugins/svtplay.py
+++ b/src/streamlink/plugins/svtplay.py
@@ -1,6 +1,6 @@
import logging
import re
-from urllib.parse import urljoin
+from urllib.parse import parse_qsl, urlparse
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
@@ -21,16 +21,11 @@
title = None
latest_episode_url_re = re.compile(r'''
- class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
+ data-rt="top-area-play-button"\s+href="(?P<url>[^"]+)"
''', re.VERBOSE)
live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')
- vod_id_re = re.compile(r'''
- (?:DATA_LAKE\s+=\s+{"content":{"id":|"svtId":|data-video-id=)
- "(?P<vod_id>[^"]+)"
- ''', re.VERBOSE)
-
_video_schema = validate.Schema({
validate.optional('programTitle'): validate.text,
validate.optional('episodeTitle'): validate.text,
@@ -87,18 +82,18 @@
yield from DASHStream.parse_manifest(self.session, playlist['url']).items()
def _get_vod(self):
- res = self.session.http.get(self.url)
- match = self.latest_episode_url_re.search(res.text)
- if match:
- res = self.session.http.get(
- urljoin(self.url, match.group('url')),
- )
-
- match = self.vod_id_re.search(res.text)
- if match is None:
+ vod_id = self._get_vod_id(self.url)
+
+ if vod_id is None:
+ res = self.session.http.get(self.url)
+ match = self.latest_episode_url_re.search(res.text)
+ if match is None:
+ return
+ vod_id = self._get_vod_id(match.group("url"))
+
+ if vod_id is None:
return
- vod_id = match.group('vod_id')
log.debug("VOD ID={0}".format(vod_id))
res = self.session.http.get(self.api_url.format(vod_id))
@@ -124,6 +119,10 @@
else:
yield q, s
+ def _get_vod_id(self, url):
+ qs = dict(parse_qsl(urlparse(url).query))
+ return qs.get("id")
+
def _get_streams(self):
path, live = self.match.groups()
log.debug("Path={0}".format(path))
|
{"golden_diff": "diff --git a/src/streamlink/plugins/svtplay.py b/src/streamlink/plugins/svtplay.py\n--- a/src/streamlink/plugins/svtplay.py\n+++ b/src/streamlink/plugins/svtplay.py\n@@ -1,6 +1,6 @@\n import logging\n import re\n-from urllib.parse import urljoin\n+from urllib.parse import parse_qsl, urlparse\n \n from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher\n from streamlink.plugin.api import validate\n@@ -21,16 +21,11 @@\n title = None\n \n latest_episode_url_re = re.compile(r'''\n- class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\n+ data-rt=\"top-area-play-button\"\\s+href=\"(?P<url>[^\"]+)\"\n ''', re.VERBOSE)\n \n live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')\n \n- vod_id_re = re.compile(r'''\n- (?:DATA_LAKE\\s+=\\s+{\"content\":{\"id\":|\"svtId\":|data-video-id=)\n- \"(?P<vod_id>[^\"]+)\"\n- ''', re.VERBOSE)\n-\n _video_schema = validate.Schema({\n validate.optional('programTitle'): validate.text,\n validate.optional('episodeTitle'): validate.text,\n@@ -87,18 +82,18 @@\n yield from DASHStream.parse_manifest(self.session, playlist['url']).items()\n \n def _get_vod(self):\n- res = self.session.http.get(self.url)\n- match = self.latest_episode_url_re.search(res.text)\n- if match:\n- res = self.session.http.get(\n- urljoin(self.url, match.group('url')),\n- )\n-\n- match = self.vod_id_re.search(res.text)\n- if match is None:\n+ vod_id = self._get_vod_id(self.url)\n+\n+ if vod_id is None:\n+ res = self.session.http.get(self.url)\n+ match = self.latest_episode_url_re.search(res.text)\n+ if match is None:\n+ return\n+ vod_id = self._get_vod_id(match.group(\"url\"))\n+\n+ if vod_id is None:\n return\n \n- vod_id = match.group('vod_id')\n log.debug(\"VOD ID={0}\".format(vod_id))\n \n res = self.session.http.get(self.api_url.format(vod_id))\n@@ -124,6 +119,10 @@\n else:\n yield q, s\n \n+ def _get_vod_id(self, url):\n+ qs = dict(parse_qsl(urlparse(url).query))\n+ return qs.get(\"id\")\n+\n def _get_streams(self):\n path, live = self.match.groups()\n log.debug(\"Path={0}\".format(path))\n", "issue": "plugins.svtplay: Cannot resolve playable stream\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nWhen trying to play for example https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n\r\nIt does not seem to parse the information correctly anymore to resolve a playable stream. This plugin compared to the one for tv4play is parsing the HTML page in order to resolve where to go if you are on the program page instead of the episode page.\r\n\r\nThis lookup resolves to nothing anymore:\r\n\r\n```\r\nlatest_episode_url_re = re.compile(r'''\r\n class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\r\n ''', re.VERBOSE)\r\n```\r\n\r\nWhen debugging I find that this https://api.svt.se/video/KA2BmZD resolves the JSON.\r\n\r\n```\r\n{\r\n \"svtId\": \"KA2BmZD\",\r\n \"programVersionId\": \"1400537-002A\",\r\n \"contentDuration\": 3506,\r\n \"blockedForChildren\": false,\r\n \"live\": false,\r\n \"programTitle\": \"Drakn\u00e4stet\",\r\n \"episodeTitle\": \"Avsnitt 2\",\r\n ...\r\n }\r\n```\r\n\r\nWith the following changes it resolves the video_id correctly from the HTML:\r\n```\r\nlatest_episode_vod_id = re.compile(r'''\r\n data-rt=\"top-area-play-button\"\\s+href=\".*(?:id=)(?P<video_id>[^\"]+)\"\r\n ''', re.VERBOSE)\r\n```\r\n\r\nIf you are directly on the play page of the site you get the vod_id in the URL as a parameter. So I have refactored to support both.\r\n\r\nNow it finds the vod_id and initiates the dash stream worker but it still doesn't run.\r\n\r\nI get Exception in thread Thread-DASHStreamWorker and this one seems a little more tricky to figure out.\r\n\r\n```\r\n[cli][info] Opening stream: 1080p (dash)\r\n[stream.dash][debug] Opening DASH reader for: 0 (video/mp4)\r\n[stream.dash][debug] Opening DASH reader for: 5 (audio/mp4)\r\n[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=0))\r\n[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=5))\r\n[cli][error] Try 1/1: Could not open stream <Stream()> (Could not open stream: cannot use FFMPEG)\r\n[stream.ffmpegmux][debug] Closing ffmpeg thread\r\nerror: Could not open stream <Stream()>, tried 1 times, exiting\r\n[stream.dash][debug] Download of segment: https://svt-vod-8r.akamaized.net/d0/se/20210723/841d135d-fb92-4acb-9dff-898c1db4af30/cmaf-video-avc-1920x1080p25-3089/cmaf-video-avc-1920x1080p25-3089-init.mp4 complete\r\nException in thread Thread-DASHStreamWorker:\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.9/threading.py\", line 973, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py\", line 87, in run\r\n self.writer.put(segment)\r\n File \"/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py\", line 140, in put\r\n future = self.executor.submit(self.fetch, segment, retries=self.retries)\r\n File \"/usr/lib64/python3.9/concurrent/futures/thread.py\", line 163, in submit\r\n raise RuntimeError('cannot schedule new futures after '\r\nRuntimeError: cannot schedule new futures after interpreter shutdown\r\n```\r\n\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-5.13.12-200.fc34.x86_64-x86_64-with-glibc2.33\r\n[cli][debug] Python: 3.9.6\r\n[cli][debug] Streamlink: 2.3.0\r\n[cli][debug] Requests(2.25.1), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=mpv\r\n[cli][debug] --verbose-player=True\r\n[cli][debug] --player-passthrough=['hls']\r\n[cli][debug] --hls-segment-threads=2\r\n[cli][info] Found matching plugin svtplay for URL https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n[plugins.svtplay][debug] Path=/\r\nerror: No playable streams found on this URL: https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\n```\n\n", "before_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import DASHStream, HTTPStream\nfrom streamlink.stream.ffmpegmux import MuxedStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:www\\.)?(?:svtplay|oppetarkiv)\\.se(/(kanaler/)?)'\n))\nclass SVTPlay(Plugin):\n api_url = 'https://api.svt.se/videoplayer-api/video/{0}'\n\n author = None\n category = None\n title = None\n\n latest_episode_url_re = re.compile(r'''\n class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\n ''', re.VERBOSE)\n\n live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')\n\n vod_id_re = re.compile(r'''\n (?:DATA_LAKE\\s+=\\s+{\"content\":{\"id\":|\"svtId\":|data-video-id=)\n \"(?P<vod_id>[^\"]+)\"\n ''', re.VERBOSE)\n\n _video_schema = validate.Schema({\n validate.optional('programTitle'): validate.text,\n validate.optional('episodeTitle'): validate.text,\n 'videoReferences': [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n validate.optional('subtitleReferences'): [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n })\n\n arguments = PluginArguments(\n PluginArgument(\"mux-subtitles\", is_global=True)\n )\n\n def get_author(self):\n if self.author is not None:\n return self.author\n\n def get_category(self):\n if self.category is not None:\n return self.category\n\n def get_title(self):\n if self.title is not None:\n return self.title\n\n def _set_metadata(self, data, category):\n if 'programTitle' in data:\n self.author = data['programTitle']\n\n self.category = category\n\n if 'episodeTitle' in data:\n self.title = data['episodeTitle']\n\n def _get_live(self, path):\n match = self.live_id_re.search(path)\n if match is None:\n return\n\n live_id = \"ch-{0}\".format(match.group('live_id'))\n log.debug(\"Live ID={0}\".format(live_id))\n\n res = self.session.http.get(self.api_url.format(live_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'Live')\n\n for playlist in api_data['videoReferences']:\n if playlist['format'] == 'dashhbbtv':\n yield from DASHStream.parse_manifest(self.session, playlist['url']).items()\n\n def _get_vod(self):\n res = self.session.http.get(self.url)\n match = self.latest_episode_url_re.search(res.text)\n if match:\n res = self.session.http.get(\n urljoin(self.url, match.group('url')),\n )\n\n match = self.vod_id_re.search(res.text)\n if match is None:\n return\n\n vod_id = match.group('vod_id')\n log.debug(\"VOD ID={0}\".format(vod_id))\n\n res = self.session.http.get(self.api_url.format(vod_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'VOD')\n\n substreams = {}\n if 'subtitleReferences' in api_data:\n for subtitle in api_data['subtitleReferences']:\n if subtitle['format'] == 'webvtt':\n log.debug(\"Subtitle={0}\".format(subtitle['url']))\n substreams[subtitle['format']] = HTTPStream(\n self.session,\n subtitle['url'],\n )\n\n for manifest in api_data['videoReferences']:\n if manifest['format'] == 'dashhbbtv':\n for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items():\n if self.get_option('mux_subtitles') and substreams:\n yield q, MuxedStream(self.session, s, subtitles=substreams)\n else:\n yield q, s\n\n def _get_streams(self):\n path, live = self.match.groups()\n log.debug(\"Path={0}\".format(path))\n\n if live:\n return self._get_live(path)\n else:\n return self._get_vod()\n\n\n__plugin__ = SVTPlay\n", "path": "src/streamlink/plugins/svtplay.py"}]}
| 3,298 | 630 |
gh_patches_debug_3324
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-4991
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Unexpected behaviour when reading large text files with mixed datatypes
read_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg
``` python
>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})
len(set(df.colA))
500001
>>> df.to_csv('testpandas2.txt')
>>> df2=read_csv('testpandas2.txt')
>>> len(set(df2.colA))
762143
>>> pandas.__version__
'0.11.0'
```
It seems some of the integers are parsed as integers and others as strings.
``` python
>>> list(set(df2.colA))[-10:]
['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']
>>> list(set(df2.colA))[:10]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
BUG: Unexpected behaviour when reading large text files with mixed datatypes
read_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg
``` python
>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})
len(set(df.colA))
500001
>>> df.to_csv('testpandas2.txt')
>>> df2=read_csv('testpandas2.txt')
>>> len(set(df2.colA))
762143
>>> pandas.__version__
'0.11.0'
```
It seems some of the integers are parsed as integers and others as strings.
``` python
>>> list(set(df2.colA))[-10:]
['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']
>>> list(set(df2.colA))[:10]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
</issue>
<code>
[start of pandas/io/common.py]
1 """Common IO api utilities"""
2
3 import sys
4 import zipfile
5 from contextlib import contextmanager, closing
6
7 from pandas.compat import StringIO
8 from pandas import compat
9
10
11 if compat.PY3:
12 from urllib.request import urlopen
13 _urlopen = urlopen
14 from urllib.parse import urlparse as parse_url
15 import urllib.parse as compat_parse
16 from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode
17 from urllib.error import URLError
18 from http.client import HTTPException
19 else:
20 from urllib2 import urlopen as _urlopen
21 from urllib import urlencode
22 from urlparse import urlparse as parse_url
23 from urlparse import uses_relative, uses_netloc, uses_params
24 from urllib2 import URLError
25 from httplib import HTTPException
26 from contextlib import contextmanager, closing
27 from functools import wraps
28
29 # @wraps(_urlopen)
30 @contextmanager
31 def urlopen(*args, **kwargs):
32 with closing(_urlopen(*args, **kwargs)) as f:
33 yield f
34
35
36 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
37 _VALID_URLS.discard('')
38
39 class PerformanceWarning(Warning):
40 pass
41
42
43 def _is_url(url):
44 """Check to see if a URL has a valid protocol.
45
46 Parameters
47 ----------
48 url : str or unicode
49
50 Returns
51 -------
52 isurl : bool
53 If `url` has a valid protocol return True otherwise False.
54 """
55 try:
56 return parse_url(url).scheme in _VALID_URLS
57 except:
58 return False
59
60
61 def _is_s3_url(url):
62 """Check for an s3 url"""
63 try:
64 return parse_url(url).scheme == 's3'
65 except:
66 return False
67
68
69 def maybe_read_encoded_stream(reader, encoding=None):
70 """ read an encoded stream from the reader and transform the bytes to unicode
71 if required based on the encoding
72
73 Parameters
74 ----------
75 reader : a streamable file-like object
76 encoding : optional, the encoding to attempt to read
77
78 Returns
79 -------
80 a tuple of (a stream of decoded bytes, the encoding which was used)
81
82 """
83
84 if compat.PY3 or encoding is not None: # pragma: no cover
85 if encoding:
86 errors = 'strict'
87 else:
88 errors = 'replace'
89 encoding = 'utf-8'
90 reader = StringIO(reader.read().decode(encoding, errors))
91 else:
92 encoding = None
93 return reader, encoding
94
95 def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
96 """
97 If the filepath_or_buffer is a url, translate and return the buffer
98 passthru otherwise.
99
100 Parameters
101 ----------
102 filepath_or_buffer : a url, filepath, or buffer
103 encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
104
105 Returns
106 -------
107 a filepath_or_buffer, the encoding
108 """
109
110 if _is_url(filepath_or_buffer):
111 req = _urlopen(str(filepath_or_buffer))
112 return maybe_read_encoded_stream(req,encoding)
113
114 if _is_s3_url(filepath_or_buffer):
115 try:
116 import boto
117 except:
118 raise ImportError("boto is required to handle s3 files")
119 # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
120 # are environment variables
121 parsed_url = parse_url(filepath_or_buffer)
122 conn = boto.connect_s3()
123 b = conn.get_bucket(parsed_url.netloc)
124 k = boto.s3.key.Key(b)
125 k.key = parsed_url.path
126 filepath_or_buffer = StringIO(k.get_contents_as_string())
127 return filepath_or_buffer, None
128
129 return filepath_or_buffer, None
130
131
132 # ZipFile is not a context manager for <= 2.6
133 # must be tuple index here since 2.6 doesn't use namedtuple for version_info
134 if sys.version_info[1] <= 6:
135 @contextmanager
136 def ZipFile(*args, **kwargs):
137 with closing(zipfile.ZipFile(*args, **kwargs)) as zf:
138 yield zf
139 else:
140 ZipFile = zipfile.ZipFile
141
[end of pandas/io/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pandas/io/common.py b/pandas/io/common.py
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -36,10 +36,15 @@
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
+
class PerformanceWarning(Warning):
pass
+class DtypeWarning(Warning):
+ pass
+
+
def _is_url(url):
"""Check to see if a URL has a valid protocol.
|
{"golden_diff": "diff --git a/pandas/io/common.py b/pandas/io/common.py\n--- a/pandas/io/common.py\n+++ b/pandas/io/common.py\n@@ -36,10 +36,15 @@\n _VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n _VALID_URLS.discard('')\n \n+\n class PerformanceWarning(Warning):\n pass\n \n \n+class DtypeWarning(Warning):\n+ pass\n+\n+\n def _is_url(url):\n \"\"\"Check to see if a URL has a valid protocol.\n", "issue": "BUG: Unexpected behaviour when reading large text files with mixed datatypes\nread_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg\n\n``` python\n\n>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})\nlen(set(df.colA))\n500001\n\n>>> df.to_csv('testpandas2.txt')\n>>> df2=read_csv('testpandas2.txt')\n>>> len(set(df2.colA))\n762143\n\n >>> pandas.__version__\n'0.11.0'\n```\n\nIt seems some of the integers are parsed as integers and others as strings.\n\n``` python\n>>> list(set(df2.colA))[-10:]\n['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']\n>>> list(set(df2.colA))[:10]\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n```\n\nBUG: Unexpected behaviour when reading large text files with mixed datatypes\nread_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg\n\n``` python\n\n>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})\nlen(set(df.colA))\n500001\n\n>>> df.to_csv('testpandas2.txt')\n>>> df2=read_csv('testpandas2.txt')\n>>> len(set(df2.colA))\n762143\n\n >>> pandas.__version__\n'0.11.0'\n```\n\nIt seems some of the integers are parsed as integers and others as strings.\n\n``` python\n>>> list(set(df2.colA))[-10:]\n['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']\n>>> list(set(df2.colA))[:10]\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n```\n\n", "before_files": [{"content": "\"\"\"Common IO api utilities\"\"\"\n\nimport sys\nimport zipfile\nfrom contextlib import contextmanager, closing\n\nfrom pandas.compat import StringIO\nfrom pandas import compat\n\n\nif compat.PY3:\n from urllib.request import urlopen\n _urlopen = urlopen\n from urllib.parse import urlparse as parse_url\n import urllib.parse as compat_parse\n from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode\n from urllib.error import URLError\n from http.client import HTTPException\nelse:\n from urllib2 import urlopen as _urlopen\n from urllib import urlencode\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n from urllib2 import URLError\n from httplib import HTTPException\n from contextlib import contextmanager, closing\n from functools import wraps\n\n # @wraps(_urlopen)\n @contextmanager\n def urlopen(*args, **kwargs):\n with closing(_urlopen(*args, **kwargs)) as f:\n yield f\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\nclass PerformanceWarning(Warning):\n pass\n\n\ndef _is_url(url):\n \"\"\"Check to see if a URL has a valid protocol.\n\n Parameters\n ----------\n url : str or unicode\n\n Returns\n -------\n isurl : bool\n If `url` has a valid protocol return True otherwise False.\n \"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef _is_s3_url(url):\n \"\"\"Check for an s3 url\"\"\"\n try:\n return parse_url(url).scheme == 's3'\n except:\n return False\n\n\ndef maybe_read_encoded_stream(reader, encoding=None):\n \"\"\" read an encoded stream from the reader and transform the bytes to unicode\n if required based on the encoding\n\n Parameters\n ----------\n reader : a streamable file-like object\n encoding : optional, the encoding to attempt to read\n\n Returns\n -------\n a tuple of (a stream of decoded bytes, the encoding which was used)\n\n \"\"\"\n\n if compat.PY3 or encoding is not None: # pragma: no cover\n if encoding:\n errors = 'strict'\n else:\n errors = 'replace'\n encoding = 'utf-8'\n reader = StringIO(reader.read().decode(encoding, errors))\n else:\n encoding = None\n return reader, encoding\n\ndef get_filepath_or_buffer(filepath_or_buffer, encoding=None):\n \"\"\"\n If the filepath_or_buffer is a url, translate and return the buffer\n passthru otherwise.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath, or buffer\n encoding : the encoding to use to decode py3 bytes, default is 'utf-8'\n\n Returns\n -------\n a filepath_or_buffer, the encoding\n \"\"\"\n\n if _is_url(filepath_or_buffer):\n req = _urlopen(str(filepath_or_buffer))\n return maybe_read_encoded_stream(req,encoding)\n\n if _is_s3_url(filepath_or_buffer):\n try:\n import boto\n except:\n raise ImportError(\"boto is required to handle s3 files\")\n # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n # are environment variables\n parsed_url = parse_url(filepath_or_buffer)\n conn = boto.connect_s3()\n b = conn.get_bucket(parsed_url.netloc)\n k = boto.s3.key.Key(b)\n k.key = parsed_url.path\n filepath_or_buffer = StringIO(k.get_contents_as_string())\n return filepath_or_buffer, None\n\n return filepath_or_buffer, None\n\n\n# ZipFile is not a context manager for <= 2.6\n# must be tuple index here since 2.6 doesn't use namedtuple for version_info\nif sys.version_info[1] <= 6:\n @contextmanager\n def ZipFile(*args, **kwargs):\n with closing(zipfile.ZipFile(*args, **kwargs)) as zf:\n yield zf\nelse:\n ZipFile = zipfile.ZipFile\n", "path": "pandas/io/common.py"}]}
| 2,330 | 114 |
gh_patches_debug_5432
|
rasdani/github-patches
|
git_diff
|
lhotse-speech__lhotse-240
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cut concatenate doesn't consider the first sample in each batch
Found in #234
</issue>
<code>
[start of lhotse/dataset/cut_transforms/concatenate.py]
1 from typing import Optional, Sequence
2
3 from lhotse import CutSet
4 from lhotse.cut import AnyCut
5 from lhotse.utils import Seconds
6
7
8 class CutConcatenate:
9 """
10 A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;
11 e.g. instead of creating a batch with 40 examples, we will merge some of the examples together
12 adding some silence between them to avoid a large number of padding frames that waste the computation.
13 """
14
15 def __init__(
16 self,
17 gap: Seconds = 1.0,
18 duration_factor: float = 1.0
19 ) -> None:
20 """
21 CutConcatenate's constructor.
22
23 :param gap: The duration of silence in seconds that is inserted between the cuts;
24 it's goal is to let the model "know" that there are separate utterances in a single example.
25 :param duration_factor: Determines the maximum duration of the concatenated cuts;
26 by default it's 1, setting the limit at the duration of the longest cut in the batch.
27 """
28 self.gap = gap
29 self.duration_factor = duration_factor
30
31 def __call__(self, cuts: CutSet) -> CutSet:
32 cuts = cuts.sort_by_duration(ascending=False)
33 return concat_cuts(
34 cuts,
35 gap=self.gap,
36 max_duration=cuts[0].duration * self.duration_factor
37 )
38
39
40 def concat_cuts(
41 cuts: Sequence[AnyCut],
42 gap: Seconds = 1.0,
43 max_duration: Optional[Seconds] = None
44 ) -> CutSet:
45 """
46 We're going to concatenate the cuts to minimize the amount of total padding frames used.
47 This means that some samples in the batch will be merged together into one sample,
48 separated by an interval of silence.
49 This is actually solving a knapsack problem.
50 In this initial implementation we're using a greedy approach:
51 going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut
52 that still has some "space" at the end.
53
54 :param cuts: a list of cuts to pack.
55 :param gap: the duration of silence inserted between concatenated cuts.
56 :param max_duration: the maximum duration for the concatenated cuts
57 (by default set to the duration of the first cut).
58 :return a list of packed cuts.
59 """
60 if len(cuts) <= 1:
61 # Nothing to do.
62 return CutSet.from_cuts(cuts)
63 cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
64 max_duration = cuts[0].duration if max_duration is None else max_duration
65 current_idx = 1
66 while True:
67 can_fit = False
68 shortest = cuts[-1]
69 for idx in range(current_idx, len(cuts) - 1):
70 cut = cuts[current_idx]
71 can_fit = cut.duration + gap + shortest.duration <= max_duration
72 if can_fit:
73 cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)
74 cuts = cuts[:-1]
75 break
76 current_idx += 1
77 if not can_fit:
78 break
79 return CutSet.from_cuts(cuts)
80
[end of lhotse/dataset/cut_transforms/concatenate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py
--- a/lhotse/dataset/cut_transforms/concatenate.py
+++ b/lhotse/dataset/cut_transforms/concatenate.py
@@ -62,7 +62,7 @@
return CutSet.from_cuts(cuts)
cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
max_duration = cuts[0].duration if max_duration is None else max_duration
- current_idx = 1
+ current_idx = 0
while True:
can_fit = False
shortest = cuts[-1]
|
{"golden_diff": "diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py\n--- a/lhotse/dataset/cut_transforms/concatenate.py\n+++ b/lhotse/dataset/cut_transforms/concatenate.py\n@@ -62,7 +62,7 @@\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n- current_idx = 1\n+ current_idx = 0\n while True:\n can_fit = False\n shortest = cuts[-1]\n", "issue": "Cut concatenate doesn't consider the first sample in each batch\nFound in #234 \n", "before_files": [{"content": "from typing import Optional, Sequence\n\nfrom lhotse import CutSet\nfrom lhotse.cut import AnyCut\nfrom lhotse.utils import Seconds\n\n\nclass CutConcatenate:\n \"\"\"\n A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;\n e.g. instead of creating a batch with 40 examples, we will merge some of the examples together\n adding some silence between them to avoid a large number of padding frames that waste the computation.\n \"\"\"\n\n def __init__(\n self,\n gap: Seconds = 1.0,\n duration_factor: float = 1.0\n ) -> None:\n \"\"\"\n CutConcatenate's constructor.\n\n :param gap: The duration of silence in seconds that is inserted between the cuts;\n it's goal is to let the model \"know\" that there are separate utterances in a single example.\n :param duration_factor: Determines the maximum duration of the concatenated cuts;\n by default it's 1, setting the limit at the duration of the longest cut in the batch.\n \"\"\"\n self.gap = gap\n self.duration_factor = duration_factor\n\n def __call__(self, cuts: CutSet) -> CutSet:\n cuts = cuts.sort_by_duration(ascending=False)\n return concat_cuts(\n cuts,\n gap=self.gap,\n max_duration=cuts[0].duration * self.duration_factor\n )\n\n\ndef concat_cuts(\n cuts: Sequence[AnyCut],\n gap: Seconds = 1.0,\n max_duration: Optional[Seconds] = None\n) -> CutSet:\n \"\"\"\n We're going to concatenate the cuts to minimize the amount of total padding frames used.\n This means that some samples in the batch will be merged together into one sample,\n separated by an interval of silence.\n This is actually solving a knapsack problem.\n In this initial implementation we're using a greedy approach:\n going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut\n that still has some \"space\" at the end.\n\n :param cuts: a list of cuts to pack.\n :param gap: the duration of silence inserted between concatenated cuts.\n :param max_duration: the maximum duration for the concatenated cuts\n (by default set to the duration of the first cut).\n :return a list of packed cuts.\n \"\"\"\n if len(cuts) <= 1:\n # Nothing to do.\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n current_idx = 1\n while True:\n can_fit = False\n shortest = cuts[-1]\n for idx in range(current_idx, len(cuts) - 1):\n cut = cuts[current_idx]\n can_fit = cut.duration + gap + shortest.duration <= max_duration\n if can_fit:\n cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)\n cuts = cuts[:-1]\n break\n current_idx += 1\n if not can_fit:\n break\n return CutSet.from_cuts(cuts)\n", "path": "lhotse/dataset/cut_transforms/concatenate.py"}]}
| 1,424 | 154 |
gh_patches_debug_34601
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-7316
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Resampling Maps in the example gallery gives a confusing example for the superpixel method
### Provide a general description of the issue or problem.
That's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method.
It says:
`new_dimensions = u.Quantity(aia_map.dimensions) / 16`
`aia_superpixel_map = aia_map.superpixel([new_dimensions]`
The first line should be instead e.g.:
`new_dimensions=[16,16]*u.pixel `
</issue>
<code>
[start of examples/map/map_resampling_and_superpixels.py]
1 """
2 ===============
3 Resampling Maps
4 ===============
5
6 How to resample a map using the resample method, which implements interpolation, or
7 using superpixels, which combines pixels.
8 """
9 import matplotlib.pyplot as plt
10
11 import astropy.units as u
12
13 import sunpy.data.sample
14 import sunpy.map
15
16 ###############################################################################
17 # We start with the sample data.
18
19 aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
20
21 ##############################################################################
22 # To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
23 # specifying the new dimensions in pixels. By default, this method uses linear interpolation
24 # but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
25
26 new_dimensions = [40, 40] * u.pixel
27 aia_resampled_map = aia_map.resample(new_dimensions)
28
29 ##############################################################################
30 # Let's plot the result.
31
32 fig = plt.figure()
33 ax = fig.add_subplot(projection=aia_resampled_map)
34 aia_resampled_map.plot(axes=ax)
35 plt.show()
36
37 ##############################################################################
38 # Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
39 # This can be used to increase the signal to noise ratio by reducing the
40 # resolution of the image by combining pixels. This means that the new dimension
41 # must divide the original size exactly.
42 # For example you can reduce the AIA map resolution by a factor of 16.
43
44 new_dimensions = u.Quantity(aia_map.dimensions) / 16
45 aia_superpixel_map = aia_map.superpixel(new_dimensions)
46
47 ##############################################################################
48 # Let's plot the result.
49
50 fig = plt.figure()
51 ax = fig.add_subplot(projection=aia_superpixel_map)
52 aia_superpixel_map.plot(axes=ax)
53 plt.show()
54
[end of examples/map/map_resampling_and_superpixels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py
--- a/examples/map/map_resampling_and_superpixels.py
+++ b/examples/map/map_resampling_and_superpixels.py
@@ -13,15 +13,16 @@
import sunpy.data.sample
import sunpy.map
-###############################################################################
+##############################################################################
# We start with the sample data.
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
##############################################################################
-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
-# specifying the new dimensions in pixels. By default, this method uses linear interpolation
-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
+# To reduce the angular resolution of the map, you can use the
+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions
+# in pixels. By default, this method uses linear interpolation but this can be
+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').
new_dimensions = [40, 40] * u.pixel
aia_resampled_map = aia_map.resample(new_dimensions)
@@ -35,14 +36,15 @@
plt.show()
##############################################################################
-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
-# This can be used to increase the signal to noise ratio by reducing the
-# resolution of the image by combining pixels. This means that the new dimension
-# must divide the original size exactly.
-# For example you can reduce the AIA map resolution by a factor of 16.
-
-new_dimensions = u.Quantity(aia_map.dimensions) / 16
-aia_superpixel_map = aia_map.superpixel(new_dimensions)
+# Another way to reduce the angular resolution of the map is by using the
+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.
+# The superpixel dimensions do not need to be square, and the intensity of
+# each superpixel defaults to the sum of the constituent pixels. For example,
+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16
+# superpixels.
+
+superpixel_size = [16, 16] * u.pixel
+aia_superpixel_map = aia_map.superpixel(superpixel_size)
##############################################################################
# Let's plot the result.
|
{"golden_diff": "diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py\n--- a/examples/map/map_resampling_and_superpixels.py\n+++ b/examples/map/map_resampling_and_superpixels.py\n@@ -13,15 +13,16 @@\n import sunpy.data.sample\n import sunpy.map\n \n-###############################################################################\n+##############################################################################\n # We start with the sample data.\n \n aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n \n ##############################################################################\n-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n-# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n+# To reduce the angular resolution of the map, you can use the\n+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions\n+# in pixels. By default, this method uses linear interpolation but this can be\n+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n \n new_dimensions = [40, 40] * u.pixel\n aia_resampled_map = aia_map.resample(new_dimensions)\n@@ -35,14 +36,15 @@\n plt.show()\n \n ##############################################################################\n-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n-# This can be used to increase the signal to noise ratio by reducing the\n-# resolution of the image by combining pixels. This means that the new dimension\n-# must divide the original size exactly.\n-# For example you can reduce the AIA map resolution by a factor of 16.\n-\n-new_dimensions = u.Quantity(aia_map.dimensions) / 16\n-aia_superpixel_map = aia_map.superpixel(new_dimensions)\n+# Another way to reduce the angular resolution of the map is by using the\n+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.\n+# The superpixel dimensions do not need to be square, and the intensity of\n+# each superpixel defaults to the sum of the constituent pixels. For example,\n+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16\n+# superpixels.\n+\n+superpixel_size = [16, 16] * u.pixel\n+aia_superpixel_map = aia_map.superpixel(superpixel_size)\n \n ##############################################################################\n # Let's plot the result.\n", "issue": "Resampling Maps in the example gallery gives a confusing example for the superpixel method\n### Provide a general description of the issue or problem.\n\nThat's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method. \r\nIt says:\r\n`new_dimensions = u.Quantity(aia_map.dimensions) / 16`\r\n`aia_superpixel_map = aia_map.superpixel([new_dimensions]`\r\n\r\nThe first line should be instead e.g.:\r\n`new_dimensions=[16,16]*u.pixel `\n", "before_files": [{"content": "\"\"\"\n===============\nResampling Maps\n===============\n\nHow to resample a map using the resample method, which implements interpolation, or\nusing superpixels, which combines pixels.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n###############################################################################\n# We start with the sample data.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n\nnew_dimensions = [40, 40] * u.pixel\naia_resampled_map = aia_map.resample(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_resampled_map)\naia_resampled_map.plot(axes=ax)\nplt.show()\n\n##############################################################################\n# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n# This can be used to increase the signal to noise ratio by reducing the\n# resolution of the image by combining pixels. This means that the new dimension\n# must divide the original size exactly.\n# For example you can reduce the AIA map resolution by a factor of 16.\n\nnew_dimensions = u.Quantity(aia_map.dimensions) / 16\naia_superpixel_map = aia_map.superpixel(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_superpixel_map)\naia_superpixel_map.plot(axes=ax)\nplt.show()\n", "path": "examples/map/map_resampling_and_superpixels.py"}]}
| 1,168 | 554 |
gh_patches_debug_11324
|
rasdani/github-patches
|
git_diff
|
spack__spack-5006
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue while building mpiP
Trying to build this on ubuntu desktop gives (config.log):
```
configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5
conftest.c:11:17: fatal error: mpi.h: No such file or directory
compilation terminated.
configure:4289: $? = 1
configure: failed program was:
| /* confdefs.h */
| #define PACKAGE_NAME "mpiP"
| #define PACKAGE_TARNAME "mpip"
| #define PACKAGE_VERSION "3.3"
| #define PACKAGE_STRING "mpiP 3.3"
| #define PACKAGE_BUGREPORT "mpip-help@lists.sourceforge.net"
| #define PACKAGE_URL ""
| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose
| #define HAVE_LIBM 1
| /* end confdefs.h. */
| #include "mpi.h"
|
| int
| main ()
| {
| #ifndef MPI_Init
| #ifdef __cplusplus
| (void) MPI_Init;
| #else
| (void) MPI_Init;
| #endif
| #endif
|
| ;
| return 0;
| }
configure:4289: result: no
configure:4294: error: "Failed to find declaration for MPI_Init!"
```
I am creating PR with additional `--with-cc` option to configure but then seeing next error:
```
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist = StandardFileHeader(sname)
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
raise KeyError(key)
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
KeyError: 'LOGNAME' raise KeyError(key)
```
Issue while building mpiP
Trying to build this on ubuntu desktop gives (config.log):
```
configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5
conftest.c:11:17: fatal error: mpi.h: No such file or directory
compilation terminated.
configure:4289: $? = 1
configure: failed program was:
| /* confdefs.h */
| #define PACKAGE_NAME "mpiP"
| #define PACKAGE_TARNAME "mpip"
| #define PACKAGE_VERSION "3.3"
| #define PACKAGE_STRING "mpiP 3.3"
| #define PACKAGE_BUGREPORT "mpip-help@lists.sourceforge.net"
| #define PACKAGE_URL ""
| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose
| #define HAVE_LIBM 1
| /* end confdefs.h. */
| #include "mpi.h"
|
| int
| main ()
| {
| #ifndef MPI_Init
| #ifdef __cplusplus
| (void) MPI_Init;
| #else
| (void) MPI_Init;
| #endif
| #endif
|
| ;
| return 0;
| }
configure:4289: result: no
configure:4294: error: "Failed to find declaration for MPI_Init!"
```
I am creating PR with additional `--with-cc` option to configure but then seeing next error:
```
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist = StandardFileHeader(sname)
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
raise KeyError(key)
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
KeyError: 'LOGNAME' raise KeyError(key)
```
</issue>
<code>
[start of var/spack/repos/builtin/packages/mpip/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26 import os
27
28
29 class Mpip(AutotoolsPackage):
30 """mpiP: Lightweight, Scalable MPI Profiling"""
31 homepage = "http://mpip.sourceforge.net/"
32 url = "http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz"
33
34 version("3.4.1", "1168adc83777ac31d6ebd385823aabbd")
35
36 depends_on("libelf", type="build")
37 depends_on("libdwarf", type="build")
38 depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build")
39 depends_on("mpi", type="build")
40
41 def configure_args(self):
42 return ['--without-f77']
43
[end of var/spack/repos/builtin/packages/mpip/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py
--- a/var/spack/repos/builtin/packages/mpip/package.py
+++ b/var/spack/repos/builtin/packages/mpip/package.py
@@ -33,10 +33,14 @@
version("3.4.1", "1168adc83777ac31d6ebd385823aabbd")
- depends_on("libelf", type="build")
- depends_on("libdwarf", type="build")
- depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build")
- depends_on("mpi", type="build")
+ depends_on("libelf")
+ depends_on("libdwarf")
+ depends_on('libunwind', when=os.uname()[4] == "x86_64")
+ depends_on("mpi")
def configure_args(self):
- return ['--without-f77']
+ config_args = ['--without-f77']
+ config_args.append("--with-cc=%s" % self.spec['mpi'].mpicc)
+ config_args.append("--with-cxx=%s" % self.spec['mpi'].mpicxx)
+
+ return config_args
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py\n--- a/var/spack/repos/builtin/packages/mpip/package.py\n+++ b/var/spack/repos/builtin/packages/mpip/package.py\n@@ -33,10 +33,14 @@\n \n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n \n- depends_on(\"libelf\", type=\"build\")\n- depends_on(\"libdwarf\", type=\"build\")\n- depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n- depends_on(\"mpi\", type=\"build\")\n+ depends_on(\"libelf\")\n+ depends_on(\"libdwarf\")\n+ depends_on('libunwind', when=os.uname()[4] == \"x86_64\")\n+ depends_on(\"mpi\")\n \n def configure_args(self):\n- return ['--without-f77']\n+ config_args = ['--without-f77']\n+ config_args.append(\"--with-cc=%s\" % self.spec['mpi'].mpicc)\n+ config_args.append(\"--with-cxx=%s\" % self.spec['mpi'].mpicxx)\n+\n+ return config_args\n", "issue": "Issue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"mpip-help@lists.sourceforge.net\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\nIssue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"mpip-help@lists.sourceforge.net\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Mpip(AutotoolsPackage):\n \"\"\"mpiP: Lightweight, Scalable MPI Profiling\"\"\"\n homepage = \"http://mpip.sourceforge.net/\"\n url = \"http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz\"\n\n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n\n depends_on(\"libelf\", type=\"build\")\n depends_on(\"libdwarf\", type=\"build\")\n depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n depends_on(\"mpi\", type=\"build\")\n\n def configure_args(self):\n return ['--without-f77']\n", "path": "var/spack/repos/builtin/packages/mpip/package.py"}]}
| 2,290 | 301 |
gh_patches_debug_33523
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-1945
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not properly outputting in MINGW
System: Windows 7 64bit
steps to reproduce:
1. Open mingw64 (git bash)
2. Run `aws configure`
3. Observe no output
4. Hit enter 4 times
5. Observe output
It looks as if AWS CLI is thinking it is outputting text however it actually isn't.

</issue>
<code>
[start of awscli/compat.py]
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6
7 # http://aws.amazon.com/apache2.0/
8
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14 import os
15 import zipfile
16
17 from botocore.compat import six
18 #import botocore.compat
19
20 # If you ever want to import from the vendored six. Add it here and then
21 # import from awscli.compat. Also try to keep it in alphabetical order.
22 # This may get large.
23 advance_iterator = six.advance_iterator
24 PY3 = six.PY3
25 queue = six.moves.queue
26 shlex_quote = six.moves.shlex_quote
27 StringIO = six.StringIO
28 urlopen = six.moves.urllib.request.urlopen
29
30 # Most, but not all, python installations will have zlib. This is required to
31 # compress any files we send via a push. If we can't compress, we can still
32 # package the files in a zip container.
33 try:
34 import zlib
35 ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED
36 except ImportError:
37 ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED
38
39
40 class BinaryStdout(object):
41 def __enter__(self):
42 if sys.platform == "win32":
43 import msvcrt
44 self.previous_mode = msvcrt.setmode(sys.stdout.fileno(),
45 os.O_BINARY)
46 return sys.stdout
47
48 def __exit__(self, type, value, traceback):
49 if sys.platform == "win32":
50 import msvcrt
51 msvcrt.setmode(sys.stdout.fileno(), self.previous_mode)
52
53
54 if six.PY3:
55 import locale
56 import urllib.parse as urlparse
57
58 from urllib.error import URLError
59
60 raw_input = input
61
62 def get_stdout_text_writer():
63 return sys.stdout
64
65 def compat_open(filename, mode='r', encoding=None):
66 """Back-port open() that accepts an encoding argument.
67
68 In python3 this uses the built in open() and in python2 this
69 uses the io.open() function.
70
71 If the file is not being opened in binary mode, then we'll
72 use locale.getpreferredencoding() to find the preferred
73 encoding.
74
75 """
76 if 'b' not in mode:
77 encoding = locale.getpreferredencoding()
78 return open(filename, mode, encoding=encoding)
79
80 else:
81 import codecs
82 import locale
83 import io
84 import urlparse
85
86 from urllib2 import URLError
87
88 raw_input = raw_input
89
90 def get_stdout_text_writer():
91 # In python3, all the sys.stdout/sys.stderr streams are in text
92 # mode. This means they expect unicode, and will encode the
93 # unicode automatically before actually writing to stdout/stderr.
94 # In python2, that's not the case. In order to provide a consistent
95 # interface, we can create a wrapper around sys.stdout that will take
96 # unicode, and automatically encode it to the preferred encoding.
97 # That way consumers can just call get_stdout_text_writer() and write
98 # unicode to the returned stream. Note that get_stdout_text_writer
99 # just returns sys.stdout in the PY3 section above because python3
100 # handles this.
101 return codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
102
103 def compat_open(filename, mode='r', encoding=None):
104 # See docstring for compat_open in the PY3 section above.
105 if 'b' not in mode:
106 encoding = locale.getpreferredencoding()
107 return io.open(filename, mode, encoding=encoding)
108
[end of awscli/compat.py]
[start of awscli/customizations/configure/configure.py]
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import os
14 import logging
15
16 from botocore.exceptions import ProfileNotFound
17
18 from awscli.compat import raw_input
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.configure.addmodel import AddModelCommand
21 from awscli.customizations.configure.set import ConfigureSetCommand
22 from awscli.customizations.configure.get import ConfigureGetCommand
23 from awscli.customizations.configure.list import ConfigureListCommand
24 from awscli.customizations.configure.writer import ConfigFileWriter
25
26 from . import mask_value
27
28
29 logger = logging.getLogger(__name__)
30
31
32 def register_configure_cmd(cli):
33 cli.register('building-command-table.main',
34 ConfigureCommand.add_command)
35
36
37 class InteractivePrompter(object):
38
39 def get_value(self, current_value, config_name, prompt_text=''):
40 if config_name in ('aws_access_key_id', 'aws_secret_access_key'):
41 current_value = mask_value(current_value)
42 response = raw_input("%s [%s]: " % (prompt_text, current_value))
43 if not response:
44 # If the user hits enter, we return a value of None
45 # instead of an empty string. That way we can determine
46 # whether or not a value has changed.
47 response = None
48 return response
49
50
51 class ConfigureCommand(BasicCommand):
52 NAME = 'configure'
53 DESCRIPTION = BasicCommand.FROM_FILE()
54 SYNOPSIS = ('aws configure [--profile profile-name]')
55 EXAMPLES = (
56 'To create a new configuration::\n'
57 '\n'
58 ' $ aws configure\n'
59 ' AWS Access Key ID [None]: accesskey\n'
60 ' AWS Secret Access Key [None]: secretkey\n'
61 ' Default region name [None]: us-west-2\n'
62 ' Default output format [None]:\n'
63 '\n'
64 'To update just the region name::\n'
65 '\n'
66 ' $ aws configure\n'
67 ' AWS Access Key ID [****]:\n'
68 ' AWS Secret Access Key [****]:\n'
69 ' Default region name [us-west-1]: us-west-2\n'
70 ' Default output format [None]:\n'
71 )
72 SUBCOMMANDS = [
73 {'name': 'list', 'command_class': ConfigureListCommand},
74 {'name': 'get', 'command_class': ConfigureGetCommand},
75 {'name': 'set', 'command_class': ConfigureSetCommand},
76 {'name': 'add-model', 'command_class': AddModelCommand}
77 ]
78
79 # If you want to add new values to prompt, update this list here.
80 VALUES_TO_PROMPT = [
81 # (logical_name, config_name, prompt_text)
82 ('aws_access_key_id', "AWS Access Key ID"),
83 ('aws_secret_access_key', "AWS Secret Access Key"),
84 ('region', "Default region name"),
85 ('output', "Default output format"),
86 ]
87
88 def __init__(self, session, prompter=None, config_writer=None):
89 super(ConfigureCommand, self).__init__(session)
90 if prompter is None:
91 prompter = InteractivePrompter()
92 self._prompter = prompter
93 if config_writer is None:
94 config_writer = ConfigFileWriter()
95 self._config_writer = config_writer
96
97 def _run_main(self, parsed_args, parsed_globals):
98 # Called when invoked with no args "aws configure"
99 new_values = {}
100 # This is the config from the config file scoped to a specific
101 # profile.
102 try:
103 config = self._session.get_scoped_config()
104 except ProfileNotFound:
105 config = {}
106 for config_name, prompt_text in self.VALUES_TO_PROMPT:
107 current_value = config.get(config_name)
108 new_value = self._prompter.get_value(current_value, config_name,
109 prompt_text)
110 if new_value is not None and new_value != current_value:
111 new_values[config_name] = new_value
112 config_filename = os.path.expanduser(
113 self._session.get_config_variable('config_file'))
114 if new_values:
115 self._write_out_creds_file_values(new_values,
116 parsed_globals.profile)
117 if parsed_globals.profile is not None:
118 new_values['__section__'] = (
119 'profile %s' % parsed_globals.profile)
120 self._config_writer.update_config(new_values, config_filename)
121
122 def _write_out_creds_file_values(self, new_values, profile_name):
123 # The access_key/secret_key are now *always* written to the shared
124 # credentials file (~/.aws/credentials), see aws/aws-cli#847.
125 # post-conditions: ~/.aws/credentials will have the updated credential
126 # file values and new_values will have the cred vars removed.
127 credential_file_values = {}
128 if 'aws_access_key_id' in new_values:
129 credential_file_values['aws_access_key_id'] = new_values.pop(
130 'aws_access_key_id')
131 if 'aws_secret_access_key' in new_values:
132 credential_file_values['aws_secret_access_key'] = new_values.pop(
133 'aws_secret_access_key')
134 if credential_file_values:
135 if profile_name is not None:
136 credential_file_values['__section__'] = profile_name
137 shared_credentials_filename = os.path.expanduser(
138 self._session.get_config_variable('credentials_file'))
139 self._config_writer.update_config(
140 credential_file_values,
141 shared_credentials_filename)
142
[end of awscli/customizations/configure/configure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/compat.py b/awscli/compat.py
--- a/awscli/compat.py
+++ b/awscli/compat.py
@@ -105,3 +105,20 @@
if 'b' not in mode:
encoding = locale.getpreferredencoding()
return io.open(filename, mode, encoding=encoding)
+
+
+def compat_input(prompt):
+ """
+ Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32
+ program (such as Win32 python), what that program sees is a pipe instead of
+ a console. This is important because python buffers pipes, and so on a
+ pty-based terminal, text will not necessarily appear immediately. In most
+ cases, this isn't a big deal. But when we're doing an interactive prompt,
+ the result is that the prompts won't display until we fill the buffer. Since
+ raw_input does not flush the prompt, we need to manually write and flush it.
+
+ See https://github.com/mintty/mintty/issues/56 for more details.
+ """
+ sys.stdout.write(prompt)
+ sys.stdout.flush()
+ return raw_input()
diff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py
--- a/awscli/customizations/configure/configure.py
+++ b/awscli/customizations/configure/configure.py
@@ -15,7 +15,7 @@
from botocore.exceptions import ProfileNotFound
-from awscli.compat import raw_input
+from awscli.compat import compat_input
from awscli.customizations.commands import BasicCommand
from awscli.customizations.configure.addmodel import AddModelCommand
from awscli.customizations.configure.set import ConfigureSetCommand
@@ -39,7 +39,7 @@
def get_value(self, current_value, config_name, prompt_text=''):
if config_name in ('aws_access_key_id', 'aws_secret_access_key'):
current_value = mask_value(current_value)
- response = raw_input("%s [%s]: " % (prompt_text, current_value))
+ response = compat_input("%s [%s]: " % (prompt_text, current_value))
if not response:
# If the user hits enter, we return a value of None
# instead of an empty string. That way we can determine
|
{"golden_diff": "diff --git a/awscli/compat.py b/awscli/compat.py\n--- a/awscli/compat.py\n+++ b/awscli/compat.py\n@@ -105,3 +105,20 @@\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n+\n+\n+def compat_input(prompt):\n+ \"\"\"\n+ Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32\n+ program (such as Win32 python), what that program sees is a pipe instead of\n+ a console. This is important because python buffers pipes, and so on a\n+ pty-based terminal, text will not necessarily appear immediately. In most\n+ cases, this isn't a big deal. But when we're doing an interactive prompt,\n+ the result is that the prompts won't display until we fill the buffer. Since\n+ raw_input does not flush the prompt, we need to manually write and flush it.\n+\n+ See https://github.com/mintty/mintty/issues/56 for more details.\n+ \"\"\"\n+ sys.stdout.write(prompt)\n+ sys.stdout.flush()\n+ return raw_input()\ndiff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py\n--- a/awscli/customizations/configure/configure.py\n+++ b/awscli/customizations/configure/configure.py\n@@ -15,7 +15,7 @@\n \n from botocore.exceptions import ProfileNotFound\n \n-from awscli.compat import raw_input\n+from awscli.compat import compat_input\n from awscli.customizations.commands import BasicCommand\n from awscli.customizations.configure.addmodel import AddModelCommand\n from awscli.customizations.configure.set import ConfigureSetCommand\n@@ -39,7 +39,7 @@\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n- response = raw_input(\"%s [%s]: \" % (prompt_text, current_value))\n+ response = compat_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n", "issue": "Not properly outputting in MINGW\nSystem: Windows 7 64bit\n\nsteps to reproduce:\n1. Open mingw64 (git bash) \n2. Run `aws configure`\n3. Observe no output\n4. Hit enter 4 times\n5. Observe output\n\nIt looks as if AWS CLI is thinking it is outputting text however it actually isn't.\n\n\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport os\nimport zipfile\n\nfrom botocore.compat import six\n#import botocore.compat\n\n# If you ever want to import from the vendored six. Add it here and then\n# import from awscli.compat. Also try to keep it in alphabetical order.\n# This may get large.\nadvance_iterator = six.advance_iterator\nPY3 = six.PY3\nqueue = six.moves.queue\nshlex_quote = six.moves.shlex_quote\nStringIO = six.StringIO\nurlopen = six.moves.urllib.request.urlopen\n\n# Most, but not all, python installations will have zlib. This is required to\n# compress any files we send via a push. If we can't compress, we can still\n# package the files in a zip container.\ntry:\n import zlib\n ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED\nexcept ImportError:\n ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED\n\n\nclass BinaryStdout(object):\n def __enter__(self):\n if sys.platform == \"win32\":\n import msvcrt\n self.previous_mode = msvcrt.setmode(sys.stdout.fileno(),\n os.O_BINARY)\n return sys.stdout\n\n def __exit__(self, type, value, traceback):\n if sys.platform == \"win32\":\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) \n\n\nif six.PY3:\n import locale\n import urllib.parse as urlparse\n\n from urllib.error import URLError\n\n raw_input = input\n\n def get_stdout_text_writer():\n return sys.stdout\n\n def compat_open(filename, mode='r', encoding=None):\n \"\"\"Back-port open() that accepts an encoding argument.\n\n In python3 this uses the built in open() and in python2 this\n uses the io.open() function.\n\n If the file is not being opened in binary mode, then we'll\n use locale.getpreferredencoding() to find the preferred\n encoding.\n\n \"\"\"\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return open(filename, mode, encoding=encoding)\n\nelse:\n import codecs\n import locale\n import io\n import urlparse\n\n from urllib2 import URLError\n\n raw_input = raw_input\n\n def get_stdout_text_writer():\n # In python3, all the sys.stdout/sys.stderr streams are in text\n # mode. This means they expect unicode, and will encode the\n # unicode automatically before actually writing to stdout/stderr.\n # In python2, that's not the case. In order to provide a consistent\n # interface, we can create a wrapper around sys.stdout that will take\n # unicode, and automatically encode it to the preferred encoding.\n # That way consumers can just call get_stdout_text_writer() and write\n # unicode to the returned stream. Note that get_stdout_text_writer\n # just returns sys.stdout in the PY3 section above because python3\n # handles this.\n return codecs.getwriter(locale.getpreferredencoding())(sys.stdout)\n\n def compat_open(filename, mode='r', encoding=None):\n # See docstring for compat_open in the PY3 section above.\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n", "path": "awscli/compat.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nfrom awscli.compat import raw_input\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.addmodel import AddModelCommand\nfrom awscli.customizations.configure.set import ConfigureSetCommand\nfrom awscli.customizations.configure.get import ConfigureGetCommand\nfrom awscli.customizations.configure.list import ConfigureListCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import mask_value\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_configure_cmd(cli):\n cli.register('building-command-table.main',\n ConfigureCommand.add_command)\n\n\nclass InteractivePrompter(object):\n\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n response = raw_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n # whether or not a value has changed.\n response = None\n return response\n\n\nclass ConfigureCommand(BasicCommand):\n NAME = 'configure'\n DESCRIPTION = BasicCommand.FROM_FILE()\n SYNOPSIS = ('aws configure [--profile profile-name]')\n EXAMPLES = (\n 'To create a new configuration::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [None]: accesskey\\n'\n ' AWS Secret Access Key [None]: secretkey\\n'\n ' Default region name [None]: us-west-2\\n'\n ' Default output format [None]:\\n'\n '\\n'\n 'To update just the region name::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [****]:\\n'\n ' AWS Secret Access Key [****]:\\n'\n ' Default region name [us-west-1]: us-west-2\\n'\n ' Default output format [None]:\\n'\n )\n SUBCOMMANDS = [\n {'name': 'list', 'command_class': ConfigureListCommand},\n {'name': 'get', 'command_class': ConfigureGetCommand},\n {'name': 'set', 'command_class': ConfigureSetCommand},\n {'name': 'add-model', 'command_class': AddModelCommand}\n ]\n\n # If you want to add new values to prompt, update this list here.\n VALUES_TO_PROMPT = [\n # (logical_name, config_name, prompt_text)\n ('aws_access_key_id', \"AWS Access Key ID\"),\n ('aws_secret_access_key', \"AWS Secret Access Key\"),\n ('region', \"Default region name\"),\n ('output', \"Default output format\"),\n ]\n\n def __init__(self, session, prompter=None, config_writer=None):\n super(ConfigureCommand, self).__init__(session)\n if prompter is None:\n prompter = InteractivePrompter()\n self._prompter = prompter\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, parsed_args, parsed_globals):\n # Called when invoked with no args \"aws configure\"\n new_values = {}\n # This is the config from the config file scoped to a specific\n # profile.\n try:\n config = self._session.get_scoped_config()\n except ProfileNotFound:\n config = {}\n for config_name, prompt_text in self.VALUES_TO_PROMPT:\n current_value = config.get(config_name)\n new_value = self._prompter.get_value(current_value, config_name,\n prompt_text)\n if new_value is not None and new_value != current_value:\n new_values[config_name] = new_value\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n if new_values:\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n new_values['__section__'] = (\n 'profile %s' % parsed_globals.profile)\n self._config_writer.update_config(new_values, config_filename)\n\n def _write_out_creds_file_values(self, new_values, profile_name):\n # The access_key/secret_key are now *always* written to the shared\n # credentials file (~/.aws/credentials), see aws/aws-cli#847.\n # post-conditions: ~/.aws/credentials will have the updated credential\n # file values and new_values will have the cred vars removed.\n credential_file_values = {}\n if 'aws_access_key_id' in new_values:\n credential_file_values['aws_access_key_id'] = new_values.pop(\n 'aws_access_key_id')\n if 'aws_secret_access_key' in new_values:\n credential_file_values['aws_secret_access_key'] = new_values.pop(\n 'aws_secret_access_key')\n if credential_file_values:\n if profile_name is not None:\n credential_file_values['__section__'] = profile_name\n shared_credentials_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n self._config_writer.update_config(\n credential_file_values,\n shared_credentials_filename)\n", "path": "awscli/customizations/configure/configure.py"}]}
| 3,380 | 520 |
gh_patches_debug_8530
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2509
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DBT 0.17.0rc3 - Compilation failure regression
### Describe the bug
When trying to compile a project having some schema tests using `dbt_utils.equality` the compilation fails because the model does not exists.
However calling DBT run works.
The compilation failure happens when using DBT 0.17.0rc3, however compiling the same project using DBT 0.16.1 succeeds.
### Steps To Reproduce
The following ZIP file includes a dummy project to reproduce the issue.
[dummy.zip](https://github.com/fishtown-analytics/dbt/files/4705434/dummy.zip)
You will need to configure a profile for it though.
Extract the project and run:
```
dbt deps
dbt compile
```
### Expected behavior
The compilation succeeds on both DBT 0.16.1 and 0.17.0rc3
### Actual behavior
The compilation succeeds for DBT 0.16.1 and fails for 0.17.0rc3 with the following error:
```
Encountered an error:
Runtime Error
Database Error in test dbt_utils_equality_my_second_dbt_model_id__ref_my_first_dbt_model_ (models/example/schema.yml)
002003 (42S02): SQL compilation error:
Table 'DB.SCHEMA.MY_SECOND_DBT_MODEL' does not exist or not authorized.
```
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [ ] redshift
- [ ] bigquery
- [x] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
dbt --version
installed version: 0.17.0-rc3
latest version: 0.16.1
Your version of dbt is ahead of the latest release!
Plugins:
- snowflake: 0.17.0rc3
- postgres: 0.17.0rc3
- redshift: 0.17.0rc3
- bigquery: 0.17.0rc3
```
**The operating system you're using:** Linux Mint 19.3
**The output of `python --version`:** Python 3.6.9
</issue>
<code>
[start of plugins/snowflake/dbt/adapters/snowflake/impl.py]
1 from dataclasses import dataclass
2 from typing import Mapping, Any, Optional, List, Union
3
4 import agate
5
6 from dbt.adapters.base.impl import AdapterConfig
7 from dbt.adapters.sql import SQLAdapter
8 from dbt.adapters.sql.impl import (
9 LIST_SCHEMAS_MACRO_NAME,
10 LIST_RELATIONS_MACRO_NAME,
11 )
12 from dbt.adapters.snowflake import SnowflakeConnectionManager
13 from dbt.adapters.snowflake import SnowflakeRelation
14 from dbt.adapters.snowflake import SnowflakeColumn
15 from dbt.contracts.graph.manifest import Manifest
16 from dbt.exceptions import RuntimeException, DatabaseException
17 from dbt.utils import filter_null_values
18
19
20 @dataclass
21 class SnowflakeConfig(AdapterConfig):
22 transient: Optional[bool] = None
23 cluster_by: Optional[Union[str, List[str]]] = None
24 automatic_clustering: Optional[bool] = None
25 secure: Optional[bool] = None
26 copy_grants: Optional[bool] = None
27 snowflake_warehouse: Optional[str] = None
28
29
30 class SnowflakeAdapter(SQLAdapter):
31 Relation = SnowflakeRelation
32 Column = SnowflakeColumn
33 ConnectionManager = SnowflakeConnectionManager
34
35 AdapterSpecificConfigs = SnowflakeConfig
36
37 @classmethod
38 def date_function(cls):
39 return "CURRENT_TIMESTAMP()"
40
41 @classmethod
42 def _catalog_filter_table(
43 cls, table: agate.Table, manifest: Manifest
44 ) -> agate.Table:
45 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
46 # the column names to their lowercased forms.
47 lowered = table.rename(
48 column_names=[c.lower() for c in table.column_names]
49 )
50 return super()._catalog_filter_table(lowered, manifest)
51
52 def _make_match_kwargs(self, database, schema, identifier):
53 quoting = self.config.quoting
54 if identifier is not None and quoting["identifier"] is False:
55 identifier = identifier.upper()
56
57 if schema is not None and quoting["schema"] is False:
58 schema = schema.upper()
59
60 if database is not None and quoting["database"] is False:
61 database = database.upper()
62
63 return filter_null_values(
64 {"identifier": identifier, "schema": schema, "database": database}
65 )
66
67 def _get_warehouse(self) -> str:
68 _, table = self.execute(
69 'select current_warehouse() as warehouse',
70 fetch=True
71 )
72 if len(table) == 0 or len(table[0]) == 0:
73 # can this happen?
74 raise RuntimeException(
75 'Could not get current warehouse: no results'
76 )
77 return str(table[0][0])
78
79 def _use_warehouse(self, warehouse: str):
80 """Use the given warehouse. Quotes are never applied."""
81 self.execute('use warehouse {}'.format(warehouse))
82
83 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
84 default_warehouse = self.config.credentials.warehouse
85 warehouse = config.get('snowflake_warehouse', default_warehouse)
86 if warehouse == default_warehouse or warehouse is None:
87 return None
88 previous = self._get_warehouse()
89 self._use_warehouse(warehouse)
90 return previous
91
92 def post_model_hook(
93 self, config: Mapping[str, Any], context: Optional[str]
94 ) -> None:
95 if context is not None:
96 self._use_warehouse(context)
97
98 def list_schemas(self, database: str) -> List[str]:
99 try:
100 results = self.execute_macro(
101 LIST_SCHEMAS_MACRO_NAME,
102 kwargs={'database': database}
103 )
104 except DatabaseException as exc:
105 msg = (
106 f'Database error while listing schemas in database '
107 f'"{database}"\n{exc}'
108 )
109 raise RuntimeException(msg)
110 # this uses 'show terse schemas in database', and the column name we
111 # want is 'name'
112
113 return [row['name'] for row in results]
114
115 def list_relations_without_caching(
116 self, schema_relation: SnowflakeRelation
117 ) -> List[SnowflakeRelation]:
118 kwargs = {'schema_relation': schema_relation}
119 try:
120 results = self.execute_macro(
121 LIST_RELATIONS_MACRO_NAME,
122 kwargs=kwargs
123 )
124 except DatabaseException as exc:
125 # if the schema doesn't exist, we just want to return.
126 # Alternatively, we could query the list of schemas before we start
127 # and skip listing the missing ones, which sounds expensive.
128 if 'Object does not exist' in str(exc):
129 return []
130 raise
131
132 relations = []
133 quote_policy = {
134 'database': True,
135 'schema': True,
136 'identifier': True
137 }
138
139 columns = ['database_name', 'schema_name', 'name', 'kind']
140 for _database, _schema, _identifier, _type in results.select(columns):
141 try:
142 _type = self.Relation.get_relation_type(_type.lower())
143 except ValueError:
144 _type = self.Relation.External
145 relations.append(self.Relation.create(
146 database=_database,
147 schema=_schema,
148 identifier=_identifier,
149 quote_policy=quote_policy,
150 type=_type
151 ))
152
153 return relations
154
[end of plugins/snowflake/dbt/adapters/snowflake/impl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -112,6 +112,15 @@
return [row['name'] for row in results]
+ def get_columns_in_relation(self, relation):
+ try:
+ return super().get_columns_in_relation(relation)
+ except DatabaseException as exc:
+ if 'does not exist or not authorized' in str(exc):
+ return []
+ else:
+ raise
+
def list_relations_without_caching(
self, schema_relation: SnowflakeRelation
) -> List[SnowflakeRelation]:
|
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -112,6 +112,15 @@\n \n return [row['name'] for row in results]\n \n+ def get_columns_in_relation(self, relation):\n+ try:\n+ return super().get_columns_in_relation(relation)\n+ except DatabaseException as exc:\n+ if 'does not exist or not authorized' in str(exc):\n+ return []\n+ else:\n+ raise\n+\n def list_relations_without_caching(\n self, schema_relation: SnowflakeRelation\n ) -> List[SnowflakeRelation]:\n", "issue": "DBT 0.17.0rc3 - Compilation failure regression\n### Describe the bug\r\n\r\nWhen trying to compile a project having some schema tests using `dbt_utils.equality` the compilation fails because the model does not exists.\r\nHowever calling DBT run works.\r\n\r\nThe compilation failure happens when using DBT 0.17.0rc3, however compiling the same project using DBT 0.16.1 succeeds.\r\n\r\n### Steps To Reproduce\r\n\r\nThe following ZIP file includes a dummy project to reproduce the issue.\r\n[dummy.zip](https://github.com/fishtown-analytics/dbt/files/4705434/dummy.zip)\r\n\r\nYou will need to configure a profile for it though.\r\n\r\nExtract the project and run:\r\n```\r\ndbt deps\r\ndbt compile\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nThe compilation succeeds on both DBT 0.16.1 and 0.17.0rc3\r\n\r\n### Actual behavior\r\n\r\nThe compilation succeeds for DBT 0.16.1 and fails for 0.17.0rc3 with the following error:\r\n\r\n```\r\nEncountered an error:\r\nRuntime Error\r\n Database Error in test dbt_utils_equality_my_second_dbt_model_id__ref_my_first_dbt_model_ (models/example/schema.yml)\r\n 002003 (42S02): SQL compilation error:\r\n Table 'DB.SCHEMA.MY_SECOND_DBT_MODEL' does not exist or not authorized.\r\n```\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [ ] bigquery\r\n- [x] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ndbt --version\r\ninstalled version: 0.17.0-rc3\r\n latest version: 0.16.1\r\n\r\nYour version of dbt is ahead of the latest release!\r\n\r\nPlugins:\r\n - snowflake: 0.17.0rc3\r\n - postgres: 0.17.0rc3\r\n - redshift: 0.17.0rc3\r\n - bigquery: 0.17.0rc3\r\n```\r\n\r\n**The operating system you're using:** Linux Mint 19.3\r\n\r\n\r\n**The output of `python --version`:** Python 3.6.9\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Mapping, Any, Optional, List, Union\n\nimport agate\n\nfrom dbt.adapters.base.impl import AdapterConfig\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import (\n LIST_SCHEMAS_MACRO_NAME,\n LIST_RELATIONS_MACRO_NAME,\n)\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\n@dataclass\nclass SnowflakeConfig(AdapterConfig):\n transient: Optional[bool] = None\n cluster_by: Optional[Union[str, List[str]]] = None\n automatic_clustering: Optional[bool] = None\n secure: Optional[bool] = None\n copy_grants: Optional[bool] = None\n snowflake_warehouse: Optional[str] = None\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = SnowflakeConfig\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n\n def list_relations_without_caching(\n self, schema_relation: SnowflakeRelation\n ) -> List[SnowflakeRelation]:\n kwargs = {'schema_relation': schema_relation}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs\n )\n except DatabaseException as exc:\n # if the schema doesn't exist, we just want to return.\n # Alternatively, we could query the list of schemas before we start\n # and skip listing the missing ones, which sounds expensive.\n if 'Object does not exist' in str(exc):\n return []\n raise\n\n relations = []\n quote_policy = {\n 'database': True,\n 'schema': True,\n 'identifier': True\n }\n\n columns = ['database_name', 'schema_name', 'name', 'kind']\n for _database, _schema, _identifier, _type in results.select(columns):\n try:\n _type = self.Relation.get_relation_type(_type.lower())\n except ValueError:\n _type = self.Relation.External\n relations.append(self.Relation.create(\n database=_database,\n schema=_schema,\n identifier=_identifier,\n quote_policy=quote_policy,\n type=_type\n ))\n\n return relations\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]}
| 2,548 | 187 |
gh_patches_debug_12235
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2303
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER
The environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/environment_variables.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
16 """
17 .. envvar:: OTEL_PROPAGATORS
18 """
19
20 OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT"
21 """
22 .. envvar:: OTEL_PYTHON_CONTEXT
23 """
24
25 OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR"
26 """
27 .. envvar:: OTEL_PYTHON_ID_GENERATOR
28 """
29
30 OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER"
31 """
32 .. envvar:: OTEL_TRACES_EXPORTER
33 """
34
35 OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER"
36 """
37 .. envvar:: OTEL_PYTHON_TRACER_PROVIDER
38 """
39
40 _OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER"
41 """
42 .. envvar:: OTEL_PYTHON_METER_PROVIDER
43 """
44
45 _OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
46 """
47 .. envvar:: OTEL_METRICS_EXPORTER
48
49 """
50
[end of opentelemetry-api/src/opentelemetry/environment_variables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py
--- a/opentelemetry-api/src/opentelemetry/environment_variables.py
+++ b/opentelemetry-api/src/opentelemetry/environment_variables.py
@@ -12,6 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
+"""
+.. envvar:: OTEL_METRICS_EXPORTER
+
+"""
+
OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
"""
.. envvar:: OTEL_PROPAGATORS
@@ -41,9 +47,3 @@
"""
.. envvar:: OTEL_PYTHON_METER_PROVIDER
"""
-
-_OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
-"""
-.. envvar:: OTEL_METRICS_EXPORTER
-
-"""
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py\n--- a/opentelemetry-api/src/opentelemetry/environment_variables.py\n+++ b/opentelemetry-api/src/opentelemetry/environment_variables.py\n@@ -12,6 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n+\"\"\"\n+.. envvar:: OTEL_METRICS_EXPORTER\n+\n+\"\"\"\n+\n OTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n \"\"\"\n .. envvar:: OTEL_PROPAGATORS\n@@ -41,9 +47,3 @@\n \"\"\"\n .. envvar:: OTEL_PYTHON_METER_PROVIDER\n \"\"\"\n-\n-_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n-\"\"\"\n-.. envvar:: OTEL_METRICS_EXPORTER\n-\n-\"\"\"\n", "issue": "_OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER\nThe environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n\"\"\"\n.. envvar:: OTEL_PROPAGATORS\n\"\"\"\n\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_CONTEXT\n\"\"\"\n\nOTEL_PYTHON_ID_GENERATOR = \"OTEL_PYTHON_ID_GENERATOR\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_ID_GENERATOR\n\"\"\"\n\nOTEL_TRACES_EXPORTER = \"OTEL_TRACES_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_TRACES_EXPORTER\n\"\"\"\n\nOTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_TRACER_PROVIDER\n\"\"\"\n\n_OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_METER_PROVIDER\n\"\"\"\n\n_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_METRICS_EXPORTER\n\n\"\"\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables.py"}]}
| 1,059 | 206 |
gh_patches_debug_42145
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-1140
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Walmart Spider Error
Something with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US.

</issue>
<code>
[start of locations/spiders/walmart.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7
8 class WalmartSpider(scrapy.Spider):
9 name = "walmart"
10 allowed_domains = ["walmart.com"]
11 start_urls = (
12 'https://www.walmart.com/sitemap_store_main.xml',
13 )
14
15 def store_hours(self, store_hours):
16 if store_hours == 'Mo-Su':
17 return u'24/7'
18 elif store_hours is None:
19 return None
20 else:
21 return store_hours
22
23 def parse(self, response):
24 response.selector.remove_namespaces()
25 for u in response.xpath('//loc/text()').extract():
26 if u.endswith('/details'):
27 yield scrapy.Request(u.strip(), callback=self.parse_store)
28
29 def parse_store(self, response):
30 addr = response.xpath('//div[@itemprop="address"]')[0]
31 yield GeojsonPointItem(
32 lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
33 lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
34 ref=response.url.split('/')[4],
35 phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(),
36 name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(),
37 opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()),
38 addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
39 city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(),
40 state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
41 postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
42 )
43
[end of locations/spiders/walmart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py
--- a/locations/spiders/walmart.py
+++ b/locations/spiders/walmart.py
@@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
import scrapy
import json
+import re
+from collections import defaultdict
from locations.items import GeojsonPointItem
@@ -11,14 +13,39 @@
start_urls = (
'https://www.walmart.com/sitemap_store_main.xml',
)
+ retries = defaultdict(int)
def store_hours(self, store_hours):
- if store_hours == 'Mo-Su':
+ if store_hours.get('operationalHours').get('open24Hours') is True:
return u'24/7'
- elif store_hours is None:
+ elif not store_hours.get('operationalHoursCombined'):
return None
else:
- return store_hours
+ op_hours = store_hours.get('operationalHoursCombined')
+ open_hours = []
+ for op_hour in op_hours:
+ if op_hour.get('dailyHours').get('closed') is True:
+ continue
+
+ if op_hour.get('dailyHours').get('openFullDay') is True:
+ start_hr = '00:00'
+ end_hr = '24:00'
+ else:
+ start_hr = op_hour.get('dailyHours').get('startHr')
+ end_hr = op_hour.get('dailyHours').get('endHr')
+
+ start_day = op_hour.get('startDayName')
+ end_day = op_hour.get('endDayName')
+
+ if end_day is None:
+ end_day = ''
+
+ hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr
+ open_hours.append(hours)
+
+ hours_combined = '; '.join(open_hours)
+
+ return hours_combined
def parse(self, response):
response.selector.remove_namespaces()
@@ -27,16 +54,30 @@
yield scrapy.Request(u.strip(), callback=self.parse_store)
def parse_store(self, response):
- addr = response.xpath('//div[@itemprop="address"]')[0]
+ script = response.xpath("//script[contains(.,'WML_REDUX_INITIAL_STATE')]").extract_first()
+ # In rare cases will hit page before script tag loads with content
+ if script is None:
+ if self.retries.get(response.url, 0) <= 2:
+ self.retries[response.url] += 1
+ yield scrapy.Request(response.url, callback=self.parse_store) # Try again
+ else:
+ raise Exception('Retried too many times')
+
+ script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,
+ flags=re.IGNORECASE | re.DOTALL).group(1)
+
+ store_data = json.loads(script_content).get('store')
+
yield GeojsonPointItem(
- lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
- lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
- ref=response.url.split('/')[4],
- phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(),
- name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(),
- opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()),
- addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
- city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(),
- state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
- postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
+ lat=store_data.get('geoPoint').get('latitude'),
+ lon=store_data.get('geoPoint').get('longitude'),
+ ref=store_data.get('id'),
+ phone=store_data.get('phone'),
+ name=store_data.get('displayName'),
+ opening_hours=self.store_hours(store_data),
+ addr_full=store_data.get('address').get('streetAddress'),
+ city=store_data.get('address').get('city'),
+ state=store_data.get('address').get('state'),
+ postcode=store_data.get('address').get('postalCode'),
+ website=store_data.get('detailsPageURL'),
)
|
{"golden_diff": "diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py\n--- a/locations/spiders/walmart.py\n+++ b/locations/spiders/walmart.py\n@@ -1,7 +1,9 @@\n # -*- coding: utf-8 -*-\n import scrapy\n import json\n+import re\n \n+from collections import defaultdict\n from locations.items import GeojsonPointItem\n \n \n@@ -11,14 +13,39 @@\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n+ retries = defaultdict(int)\n \n def store_hours(self, store_hours):\n- if store_hours == 'Mo-Su':\n+ if store_hours.get('operationalHours').get('open24Hours') is True:\n return u'24/7'\n- elif store_hours is None:\n+ elif not store_hours.get('operationalHoursCombined'):\n return None\n else:\n- return store_hours\n+ op_hours = store_hours.get('operationalHoursCombined')\n+ open_hours = []\n+ for op_hour in op_hours:\n+ if op_hour.get('dailyHours').get('closed') is True:\n+ continue\n+\n+ if op_hour.get('dailyHours').get('openFullDay') is True:\n+ start_hr = '00:00'\n+ end_hr = '24:00'\n+ else:\n+ start_hr = op_hour.get('dailyHours').get('startHr')\n+ end_hr = op_hour.get('dailyHours').get('endHr')\n+\n+ start_day = op_hour.get('startDayName')\n+ end_day = op_hour.get('endDayName')\n+\n+ if end_day is None:\n+ end_day = ''\n+\n+ hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr\n+ open_hours.append(hours)\n+\n+ hours_combined = '; '.join(open_hours)\n+\n+ return hours_combined\n \n def parse(self, response):\n response.selector.remove_namespaces()\n@@ -27,16 +54,30 @@\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n \n def parse_store(self, response):\n- addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n+ script = response.xpath(\"//script[contains(.,'WML_REDUX_INITIAL_STATE')]\").extract_first()\n+ # In rare cases will hit page before script tag loads with content\n+ if script is None:\n+ if self.retries.get(response.url, 0) <= 2:\n+ self.retries[response.url] += 1\n+ yield scrapy.Request(response.url, callback=self.parse_store) # Try again\n+ else:\n+ raise Exception('Retried too many times')\n+\n+ script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,\n+ flags=re.IGNORECASE | re.DOTALL).group(1)\n+\n+ store_data = json.loads(script_content).get('store')\n+\n yield GeojsonPointItem(\n- lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n- lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n- ref=response.url.split('/')[4],\n- phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n- name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n- opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n- addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n- state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n+ lat=store_data.get('geoPoint').get('latitude'),\n+ lon=store_data.get('geoPoint').get('longitude'),\n+ ref=store_data.get('id'),\n+ phone=store_data.get('phone'),\n+ name=store_data.get('displayName'),\n+ opening_hours=self.store_hours(store_data),\n+ addr_full=store_data.get('address').get('streetAddress'),\n+ city=store_data.get('address').get('city'),\n+ state=store_data.get('address').get('state'),\n+ postcode=store_data.get('address').get('postalCode'),\n+ website=store_data.get('detailsPageURL'),\n )\n", "issue": "Walmart Spider Error\nSomething with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass WalmartSpider(scrapy.Spider):\n name = \"walmart\"\n allowed_domains = [\"walmart.com\"]\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n\n def store_hours(self, store_hours):\n if store_hours == 'Mo-Su':\n return u'24/7'\n elif store_hours is None:\n return None\n else:\n return store_hours\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for u in response.xpath('//loc/text()').extract():\n if u.endswith('/details'):\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n\n def parse_store(self, response):\n addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n yield GeojsonPointItem(\n lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n ref=response.url.split('/')[4],\n phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n )\n", "path": "locations/spiders/walmart.py"}]}
| 1,111 | 1,012 |
gh_patches_debug_40967
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-873
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove download_image function.
# Bug report
This is not a kind of problem mimesis solves. So it's pretty annoying when tests fail because of it all the time. It's better just remove it.
</issue>
<code>
[start of mimesis/shortcuts.py]
1 # -*- coding: utf-8 -*-
2
3 """This module is provide internal util functions."""
4
5 import ssl
6 from os import path
7 from typing import Union
8 from urllib import request
9 from uuid import uuid4
10
11 __all__ = ['download_image', 'luhn_checksum']
12
13
14 def luhn_checksum(num: str) -> str:
15 """Calculate a checksum for num using the Luhn algorithm.
16
17 :param num: The number to calculate a checksum for as a string.
18 :return: Checksum for number.
19 """
20 check = 0
21 for i, s in enumerate(reversed(num)):
22 sx = int(s)
23 sx = sx * 2 if i % 2 == 0 else sx
24 sx = sx - 9 if sx > 9 else sx
25 check += sx
26 return str(check * 9 % 10)
27
28
29 def download_image(url: str = '', save_path: str = '',
30 unverified_ctx: bool = False) -> Union[None, str]:
31 """Download image and save in current directory on local machine.
32
33 :param url: URL to image.
34 :param save_path: Saving path.
35 :param unverified_ctx: Create unverified context.
36 :return: Path to downloaded image.
37 :rtype: str or None
38 """
39 if unverified_ctx:
40 ssl._create_default_https_context = ssl._create_unverified_context
41
42 if url:
43 image_name = url.rsplit('/')[-1]
44
45 splitted_name = image_name.rsplit('.')
46 if len(splitted_name) < 2:
47 image_name = '{}.jpg'.format(uuid4())
48 else:
49 image_name = '{}.{}'.format(uuid4(), splitted_name[-1])
50 full_image_path = path.join(save_path, image_name)
51 request.urlretrieve(url, full_image_path)
52 return full_image_path
53 return None
54
[end of mimesis/shortcuts.py]
[start of mimesis/providers/internet.py]
1 # -*- coding: utf-8 -*-
2
3 """Provides data related to internet."""
4
5 import urllib.error
6 import urllib.request
7 from ipaddress import IPv4Address, IPv6Address
8 from typing import List, Optional, Union
9
10 from mimesis.data import (
11 EMOJI,
12 HASHTAGS,
13 HTTP_METHODS,
14 HTTP_STATUS_CODES,
15 HTTP_STATUS_MSGS,
16 NETWORK_PROTOCOLS,
17 TLD,
18 USER_AGENTS,
19 USERNAMES,
20 )
21 from mimesis.enums import Layer, MimeType, PortRange, TLDType
22 from mimesis.exceptions import NonEnumerableError
23 from mimesis.providers.base import BaseProvider
24 from mimesis.providers.file import File
25
26 __all__ = ['Internet']
27
28
29 class Internet(BaseProvider):
30 """Class for generating data related to the internet."""
31
32 def __init__(self, *args, **kwargs):
33 """Initialize attributes.
34
35 :param args: Arguments.
36 :param kwargs: Keyword arguments.
37 """
38 super().__init__(*args, **kwargs)
39 self.__file = File(seed=self.seed)
40 self._MAX_IPV4 = (2 ** 32) - 1
41 self._MAX_IPV6 = (2 ** 128) - 1
42
43 class Meta:
44 """Class for metadata."""
45
46 name = 'internet'
47
48 def content_type(self, mime_type: Optional[MimeType] = None) -> str:
49 """Get a random HTTP content type.
50
51 :return: Content type.
52
53 :Example:
54 Content-Type: application/json
55 """
56 fmt = self.__file.mime_type(type_=mime_type)
57 return 'Content-Type: {}'.format(fmt)
58
59 def http_status_message(self) -> str:
60 """Get a random HTTP status message.
61
62 :return: HTTP status message.
63
64 :Example:
65 200 OK
66 """
67 return self.random.choice(HTTP_STATUS_MSGS)
68
69 def http_status_code(self) -> int:
70 """Get a random HTTP status code.
71
72 :return: HTTP status.
73
74 :Example:
75 200
76 """
77 return self.random.choice(HTTP_STATUS_CODES)
78
79 def http_method(self) -> str:
80 """Get a random HTTP method.
81
82 :return: HTTP method.
83
84 :Example:
85 POST
86 """
87 return self.random.choice(HTTP_METHODS)
88
89 def ip_v4_object(self) -> IPv4Address:
90 """Generate random IPv4Address object.
91
92 See documentation for module ipaddress:
93 https://docs.python.org/3.7/library/ipaddress.html
94
95 :return: IPv4Address object.
96 """
97 return IPv4Address(
98 self.random.randint(0, self._MAX_IPV4),
99 )
100
101 def ip_v4(self, with_port: bool = False,
102 port_range: PortRange = PortRange.ALL) -> str:
103 """Generate a random IPv4 address as string.
104
105 :param port_range: PortRange enum object.
106 :param with_port: Add port from PortRange to IP.
107 :return: IPv4 address as string.
108
109 :Example:
110 19.121.223.58 or 19.121.223.58:8000
111 """
112 ip = str(self.ip_v4_object())
113
114 if with_port:
115 port = self.port(port_range=port_range)
116 return '{}:{}'.format(ip, port)
117
118 return ip
119
120 def ip_v6_object(self) -> IPv6Address:
121 """Generate random IPv6Address object.
122
123 See documentation for module ipaddress:
124 https://docs.python.org/3.7/library/ipaddress.html
125
126 :return: IPv6Address object.
127 """
128 return IPv6Address(
129 self.random.randint(
130 0, self._MAX_IPV6,
131 ),
132 )
133
134 def ip_v6(self) -> str:
135 """Generate a random IPv6 address as string.
136
137 :return: IPv6 address string.
138
139 :Example:
140 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3
141 """
142 return str(self.ip_v6_object())
143
144 def mac_address(self) -> str:
145 """Generate a random MAC address.
146
147 :return: Random MAC address.
148
149 :Example:
150 00:16:3e:25:e7:b1
151 """
152 mac_hex = [
153 0x00, 0x16, 0x3e,
154 self.random.randint(0x00, 0x7f),
155 self.random.randint(0x00, 0xff),
156 self.random.randint(0x00, 0xff),
157 ]
158 mac = map(lambda x: '{:02x}'.format(x), mac_hex)
159 return ':'.join(mac)
160
161 def emoji(self) -> str:
162 """Get a random emoji shortcut code.
163
164 :return: Emoji code.
165
166 :Example:
167 :kissing:
168 """
169 return self.random.choice(EMOJI)
170
171 @staticmethod
172 def image_placeholder(width: Union[int, str] = 1920,
173 height: Union[int, str] = 1080) -> str:
174 """Generate a link to the image placeholder.
175
176 :param width: Width of image.
177 :param height: Height of image.
178 :return: URL to image placeholder.
179 """
180 url = 'http://placehold.it/{width}x{height}'
181 return url.format(width=width, height=height)
182
183 @staticmethod
184 def stock_image(width: Union[int, str] = 1920,
185 height: Union[int, str] = 1080,
186 keywords: Optional[List[str]] = None,
187 writable: bool = False) -> Union[str, bytes]:
188 """Generate random stock image (JPEG) hosted on Unsplash.
189
190 See «Random search term» on https://source.unsplash.com/
191 for more details.
192
193 .. note:: This method required an active HTTP connection.
194
195 :param width: Width of the image.
196 :param height: Height of the image.
197 :param keywords: List of search keywords.
198 :param writable: Return image as sequence ob bytes.
199 :return: Link to the image.
200 """
201 api = 'https://source.unsplash.com/{}x{}?{}'
202
203 if keywords is not None:
204 keywords_str = ','.join(keywords)
205 else:
206 keywords_str = ''
207
208 url = api.format(width, height, keywords_str)
209
210 try:
211 response = urllib.request.urlopen(url)
212 if writable:
213 return response.read()
214 url = response.geturl()
215 return url
216 except urllib.error.URLError:
217 raise urllib.error.URLError(
218 'Required an active HTTP connection')
219
220 def hashtags(self, quantity: int = 4) -> Union[str, list]:
221 """Generate a list of hashtags.
222
223 :param quantity: The quantity of hashtags.
224 :return: The list of hashtags.
225 :raises NonEnumerableError: if category is not in Hashtag.
226
227 :Example:
228 ['#love', '#sky', '#nice']
229 """
230 tags = ['#' + self.random.choice(HASHTAGS)
231 for _ in range(quantity)]
232
233 if int(quantity) == 1:
234 return tags[0]
235
236 return tags
237
238 def home_page(self, tld_type: Optional[TLDType] = None) -> str:
239 """Generate a random home page.
240
241 :param tld_type: TLD type.
242 :return: Random home page.
243
244 :Example:
245 https://fontir.info
246 """
247 resource = self.random.choice(USERNAMES)
248 domain = self.top_level_domain(
249 tld_type=tld_type,
250 )
251
252 return 'https://{}{}'.format(
253 resource, domain)
254
255 def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str:
256 """Return random top level domain.
257
258 :param tld_type: Enum object DomainType
259 :return: Top level domain.
260 :raises NonEnumerableError: if tld_type not in DomainType.
261 """
262 key = self._validate_enum(item=tld_type, enum=TLDType)
263 return self.random.choice(TLD[key])
264
265 def user_agent(self) -> str:
266 """Get a random user agent.
267
268 :return: User agent.
269
270 :Example:
271 Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0)
272 Gecko/20100101 Firefox/15.0.1
273 """
274 return self.random.choice(USER_AGENTS)
275
276 def network_protocol(self, layer: Optional[Layer] = None) -> str:
277 """Get a random network protocol form OSI model.
278
279 :param layer: Enum object Layer.
280 :return: Protocol name.
281
282 :Example:
283 AMQP
284 """
285 key = self._validate_enum(item=layer, enum=Layer)
286 protocols = NETWORK_PROTOCOLS[key]
287 return self.random.choice(protocols)
288
289 def port(self, port_range: PortRange = PortRange.ALL) -> int:
290 """Generate random port.
291
292 :param port_range: PortRange enum object.
293 :return: Port number.
294 :raises NonEnumerableError: if port_range is not in PortRange.
295
296 :Example:
297 8080
298 """
299 if isinstance(port_range, PortRange):
300 return self.random.randint(*port_range.value)
301 else:
302 raise NonEnumerableError(PortRange)
303
[end of mimesis/providers/internet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mimesis/providers/internet.py b/mimesis/providers/internet.py
--- a/mimesis/providers/internet.py
+++ b/mimesis/providers/internet.py
@@ -185,12 +185,13 @@
height: Union[int, str] = 1080,
keywords: Optional[List[str]] = None,
writable: bool = False) -> Union[str, bytes]:
- """Generate random stock image (JPEG) hosted on Unsplash.
+ """Generate random stock image (JPG/JPEG) hosted on Unsplash.
See «Random search term» on https://source.unsplash.com/
for more details.
- .. note:: This method required an active HTTP connection.
+ .. note:: This method required an active HTTP connection
+ if you want to get writable object
:param width: Width of the image.
:param height: Height of the image.
@@ -198,24 +199,23 @@
:param writable: Return image as sequence ob bytes.
:return: Link to the image.
"""
- api = 'https://source.unsplash.com/{}x{}?{}'
+ api_url = 'https://source.unsplash.com/{}x{}?{}'
if keywords is not None:
keywords_str = ','.join(keywords)
else:
keywords_str = ''
- url = api.format(width, height, keywords_str)
+ url = api_url.format(width, height, keywords_str)
- try:
- response = urllib.request.urlopen(url)
- if writable:
+ if writable:
+ try:
+ response = urllib.request.urlopen(url)
return response.read()
- url = response.geturl()
- return url
- except urllib.error.URLError:
- raise urllib.error.URLError(
- 'Required an active HTTP connection')
+ except urllib.error.URLError:
+ raise urllib.error.URLError(
+ 'Required an active HTTP connection')
+ return url
def hashtags(self, quantity: int = 4) -> Union[str, list]:
"""Generate a list of hashtags.
diff --git a/mimesis/shortcuts.py b/mimesis/shortcuts.py
--- a/mimesis/shortcuts.py
+++ b/mimesis/shortcuts.py
@@ -8,7 +8,7 @@
from urllib import request
from uuid import uuid4
-__all__ = ['download_image', 'luhn_checksum']
+__all__ = ['luhn_checksum']
def luhn_checksum(num: str) -> str:
@@ -24,30 +24,3 @@
sx = sx - 9 if sx > 9 else sx
check += sx
return str(check * 9 % 10)
-
-
-def download_image(url: str = '', save_path: str = '',
- unverified_ctx: bool = False) -> Union[None, str]:
- """Download image and save in current directory on local machine.
-
- :param url: URL to image.
- :param save_path: Saving path.
- :param unverified_ctx: Create unverified context.
- :return: Path to downloaded image.
- :rtype: str or None
- """
- if unverified_ctx:
- ssl._create_default_https_context = ssl._create_unverified_context
-
- if url:
- image_name = url.rsplit('/')[-1]
-
- splitted_name = image_name.rsplit('.')
- if len(splitted_name) < 2:
- image_name = '{}.jpg'.format(uuid4())
- else:
- image_name = '{}.{}'.format(uuid4(), splitted_name[-1])
- full_image_path = path.join(save_path, image_name)
- request.urlretrieve(url, full_image_path)
- return full_image_path
- return None
|
{"golden_diff": "diff --git a/mimesis/providers/internet.py b/mimesis/providers/internet.py\n--- a/mimesis/providers/internet.py\n+++ b/mimesis/providers/internet.py\n@@ -185,12 +185,13 @@\n height: Union[int, str] = 1080,\n keywords: Optional[List[str]] = None,\n writable: bool = False) -> Union[str, bytes]:\n- \"\"\"Generate random stock image (JPEG) hosted on Unsplash.\n+ \"\"\"Generate random stock image (JPG/JPEG) hosted on Unsplash.\n \n See \u00abRandom search term\u00bb on https://source.unsplash.com/\n for more details.\n \n- .. note:: This method required an active HTTP connection.\n+ .. note:: This method required an active HTTP connection\n+ if you want to get writable object\n \n :param width: Width of the image.\n :param height: Height of the image.\n@@ -198,24 +199,23 @@\n :param writable: Return image as sequence ob bytes.\n :return: Link to the image.\n \"\"\"\n- api = 'https://source.unsplash.com/{}x{}?{}'\n+ api_url = 'https://source.unsplash.com/{}x{}?{}'\n \n if keywords is not None:\n keywords_str = ','.join(keywords)\n else:\n keywords_str = ''\n \n- url = api.format(width, height, keywords_str)\n+ url = api_url.format(width, height, keywords_str)\n \n- try:\n- response = urllib.request.urlopen(url)\n- if writable:\n+ if writable:\n+ try:\n+ response = urllib.request.urlopen(url)\n return response.read()\n- url = response.geturl()\n- return url\n- except urllib.error.URLError:\n- raise urllib.error.URLError(\n- 'Required an active HTTP connection')\n+ except urllib.error.URLError:\n+ raise urllib.error.URLError(\n+ 'Required an active HTTP connection')\n+ return url\n \n def hashtags(self, quantity: int = 4) -> Union[str, list]:\n \"\"\"Generate a list of hashtags.\ndiff --git a/mimesis/shortcuts.py b/mimesis/shortcuts.py\n--- a/mimesis/shortcuts.py\n+++ b/mimesis/shortcuts.py\n@@ -8,7 +8,7 @@\n from urllib import request\n from uuid import uuid4\n \n-__all__ = ['download_image', 'luhn_checksum']\n+__all__ = ['luhn_checksum']\n \n \n def luhn_checksum(num: str) -> str:\n@@ -24,30 +24,3 @@\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n-\n-\n-def download_image(url: str = '', save_path: str = '',\n- unverified_ctx: bool = False) -> Union[None, str]:\n- \"\"\"Download image and save in current directory on local machine.\n-\n- :param url: URL to image.\n- :param save_path: Saving path.\n- :param unverified_ctx: Create unverified context.\n- :return: Path to downloaded image.\n- :rtype: str or None\n- \"\"\"\n- if unverified_ctx:\n- ssl._create_default_https_context = ssl._create_unverified_context\n-\n- if url:\n- image_name = url.rsplit('/')[-1]\n-\n- splitted_name = image_name.rsplit('.')\n- if len(splitted_name) < 2:\n- image_name = '{}.jpg'.format(uuid4())\n- else:\n- image_name = '{}.{}'.format(uuid4(), splitted_name[-1])\n- full_image_path = path.join(save_path, image_name)\n- request.urlretrieve(url, full_image_path)\n- return full_image_path\n- return None\n", "issue": "Remove download_image function.\n# Bug report\r\n\r\nThis is not a kind of problem mimesis solves. So it's pretty annoying when tests fail because of it all the time. It's better just remove it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module is provide internal util functions.\"\"\"\n\nimport ssl\nfrom os import path\nfrom typing import Union\nfrom urllib import request\nfrom uuid import uuid4\n\n__all__ = ['download_image', 'luhn_checksum']\n\n\ndef luhn_checksum(num: str) -> str:\n \"\"\"Calculate a checksum for num using the Luhn algorithm.\n\n :param num: The number to calculate a checksum for as a string.\n :return: Checksum for number.\n \"\"\"\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n sx = sx * 2 if i % 2 == 0 else sx\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n\n\ndef download_image(url: str = '', save_path: str = '',\n unverified_ctx: bool = False) -> Union[None, str]:\n \"\"\"Download image and save in current directory on local machine.\n\n :param url: URL to image.\n :param save_path: Saving path.\n :param unverified_ctx: Create unverified context.\n :return: Path to downloaded image.\n :rtype: str or None\n \"\"\"\n if unverified_ctx:\n ssl._create_default_https_context = ssl._create_unverified_context\n\n if url:\n image_name = url.rsplit('/')[-1]\n\n splitted_name = image_name.rsplit('.')\n if len(splitted_name) < 2:\n image_name = '{}.jpg'.format(uuid4())\n else:\n image_name = '{}.{}'.format(uuid4(), splitted_name[-1])\n full_image_path = path.join(save_path, image_name)\n request.urlretrieve(url, full_image_path)\n return full_image_path\n return None\n", "path": "mimesis/shortcuts.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to internet.\"\"\"\n\nimport urllib.error\nimport urllib.request\nfrom ipaddress import IPv4Address, IPv6Address\nfrom typing import List, Optional, Union\n\nfrom mimesis.data import (\n EMOJI,\n HASHTAGS,\n HTTP_METHODS,\n HTTP_STATUS_CODES,\n HTTP_STATUS_MSGS,\n NETWORK_PROTOCOLS,\n TLD,\n USER_AGENTS,\n USERNAMES,\n)\nfrom mimesis.enums import Layer, MimeType, PortRange, TLDType\nfrom mimesis.exceptions import NonEnumerableError\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.file import File\n\n__all__ = ['Internet']\n\n\nclass Internet(BaseProvider):\n \"\"\"Class for generating data related to the internet.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__file = File(seed=self.seed)\n self._MAX_IPV4 = (2 ** 32) - 1\n self._MAX_IPV6 = (2 ** 128) - 1\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'internet'\n\n def content_type(self, mime_type: Optional[MimeType] = None) -> str:\n \"\"\"Get a random HTTP content type.\n\n :return: Content type.\n\n :Example:\n Content-Type: application/json\n \"\"\"\n fmt = self.__file.mime_type(type_=mime_type)\n return 'Content-Type: {}'.format(fmt)\n\n def http_status_message(self) -> str:\n \"\"\"Get a random HTTP status message.\n\n :return: HTTP status message.\n\n :Example:\n 200 OK\n \"\"\"\n return self.random.choice(HTTP_STATUS_MSGS)\n\n def http_status_code(self) -> int:\n \"\"\"Get a random HTTP status code.\n\n :return: HTTP status.\n\n :Example:\n 200\n \"\"\"\n return self.random.choice(HTTP_STATUS_CODES)\n\n def http_method(self) -> str:\n \"\"\"Get a random HTTP method.\n\n :return: HTTP method.\n\n :Example:\n POST\n \"\"\"\n return self.random.choice(HTTP_METHODS)\n\n def ip_v4_object(self) -> IPv4Address:\n \"\"\"Generate random IPv4Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv4Address object.\n \"\"\"\n return IPv4Address(\n self.random.randint(0, self._MAX_IPV4),\n )\n\n def ip_v4(self, with_port: bool = False,\n port_range: PortRange = PortRange.ALL) -> str:\n \"\"\"Generate a random IPv4 address as string.\n\n :param port_range: PortRange enum object.\n :param with_port: Add port from PortRange to IP.\n :return: IPv4 address as string.\n\n :Example:\n 19.121.223.58 or 19.121.223.58:8000\n \"\"\"\n ip = str(self.ip_v4_object())\n\n if with_port:\n port = self.port(port_range=port_range)\n return '{}:{}'.format(ip, port)\n\n return ip\n\n def ip_v6_object(self) -> IPv6Address:\n \"\"\"Generate random IPv6Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv6Address object.\n \"\"\"\n return IPv6Address(\n self.random.randint(\n 0, self._MAX_IPV6,\n ),\n )\n\n def ip_v6(self) -> str:\n \"\"\"Generate a random IPv6 address as string.\n\n :return: IPv6 address string.\n\n :Example:\n 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3\n \"\"\"\n return str(self.ip_v6_object())\n\n def mac_address(self) -> str:\n \"\"\"Generate a random MAC address.\n\n :return: Random MAC address.\n\n :Example:\n 00:16:3e:25:e7:b1\n \"\"\"\n mac_hex = [\n 0x00, 0x16, 0x3e,\n self.random.randint(0x00, 0x7f),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n ]\n mac = map(lambda x: '{:02x}'.format(x), mac_hex)\n return ':'.join(mac)\n\n def emoji(self) -> str:\n \"\"\"Get a random emoji shortcut code.\n\n :return: Emoji code.\n\n :Example:\n :kissing:\n \"\"\"\n return self.random.choice(EMOJI)\n\n @staticmethod\n def image_placeholder(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080) -> str:\n \"\"\"Generate a link to the image placeholder.\n\n :param width: Width of image.\n :param height: Height of image.\n :return: URL to image placeholder.\n \"\"\"\n url = 'http://placehold.it/{width}x{height}'\n return url.format(width=width, height=height)\n\n @staticmethod\n def stock_image(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080,\n keywords: Optional[List[str]] = None,\n writable: bool = False) -> Union[str, bytes]:\n \"\"\"Generate random stock image (JPEG) hosted on Unsplash.\n\n See \u00abRandom search term\u00bb on https://source.unsplash.com/\n for more details.\n\n .. note:: This method required an active HTTP connection.\n\n :param width: Width of the image.\n :param height: Height of the image.\n :param keywords: List of search keywords.\n :param writable: Return image as sequence ob bytes.\n :return: Link to the image.\n \"\"\"\n api = 'https://source.unsplash.com/{}x{}?{}'\n\n if keywords is not None:\n keywords_str = ','.join(keywords)\n else:\n keywords_str = ''\n\n url = api.format(width, height, keywords_str)\n\n try:\n response = urllib.request.urlopen(url)\n if writable:\n return response.read()\n url = response.geturl()\n return url\n except urllib.error.URLError:\n raise urllib.error.URLError(\n 'Required an active HTTP connection')\n\n def hashtags(self, quantity: int = 4) -> Union[str, list]:\n \"\"\"Generate a list of hashtags.\n\n :param quantity: The quantity of hashtags.\n :return: The list of hashtags.\n :raises NonEnumerableError: if category is not in Hashtag.\n\n :Example:\n ['#love', '#sky', '#nice']\n \"\"\"\n tags = ['#' + self.random.choice(HASHTAGS)\n for _ in range(quantity)]\n\n if int(quantity) == 1:\n return tags[0]\n\n return tags\n\n def home_page(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Generate a random home page.\n\n :param tld_type: TLD type.\n :return: Random home page.\n\n :Example:\n https://fontir.info\n \"\"\"\n resource = self.random.choice(USERNAMES)\n domain = self.top_level_domain(\n tld_type=tld_type,\n )\n\n return 'https://{}{}'.format(\n resource, domain)\n\n def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Return random top level domain.\n\n :param tld_type: Enum object DomainType\n :return: Top level domain.\n :raises NonEnumerableError: if tld_type not in DomainType.\n \"\"\"\n key = self._validate_enum(item=tld_type, enum=TLDType)\n return self.random.choice(TLD[key])\n\n def user_agent(self) -> str:\n \"\"\"Get a random user agent.\n\n :return: User agent.\n\n :Example:\n Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0)\n Gecko/20100101 Firefox/15.0.1\n \"\"\"\n return self.random.choice(USER_AGENTS)\n\n def network_protocol(self, layer: Optional[Layer] = None) -> str:\n \"\"\"Get a random network protocol form OSI model.\n\n :param layer: Enum object Layer.\n :return: Protocol name.\n\n :Example:\n AMQP\n \"\"\"\n key = self._validate_enum(item=layer, enum=Layer)\n protocols = NETWORK_PROTOCOLS[key]\n return self.random.choice(protocols)\n\n def port(self, port_range: PortRange = PortRange.ALL) -> int:\n \"\"\"Generate random port.\n\n :param port_range: PortRange enum object.\n :return: Port number.\n :raises NonEnumerableError: if port_range is not in PortRange.\n\n :Example:\n 8080\n \"\"\"\n if isinstance(port_range, PortRange):\n return self.random.randint(*port_range.value)\n else:\n raise NonEnumerableError(PortRange)\n", "path": "mimesis/providers/internet.py"}]}
| 4,006 | 847 |
gh_patches_debug_12789
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-1463
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error validating return types when using a Generic in Union
There seems to be an issue with types.
Generics worked fine and dandy until I've tried to use them in a Union.
```python
@strawberry.type
class FaultyType:
code_sent: bool
result: Union[Pet, ErrorNode[Codes]]
```
Here returning an ErrorNode in "result" field fails type check.
Fails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type "<class 'strawberry_sample.ErrorNode'>" of the field "result" is not in the list of the types of the union: "['CodesErrorNode', 'Pet']"
Returning the same generic without unions in type declaration works fine.
Full sample:
https://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6
</issue>
<code>
[start of strawberry/types/types.py]
1 from __future__ import annotations
2
3 import dataclasses
4 from typing import (
5 TYPE_CHECKING,
6 List,
7 Mapping,
8 Optional,
9 Sequence,
10 Type,
11 TypeVar,
12 Union,
13 )
14
15 from strawberry.type import StrawberryType, StrawberryTypeVar
16 from strawberry.utils.typing import is_generic as is_type_generic
17
18
19 if TYPE_CHECKING:
20 from strawberry.field import StrawberryField
21 from strawberry.schema_directive import StrawberrySchemaDirective
22
23
24 @dataclasses.dataclass(eq=False)
25 class TypeDefinition(StrawberryType):
26 name: str
27 is_input: bool
28 is_interface: bool
29 origin: Type
30 description: Optional[str]
31 interfaces: List["TypeDefinition"]
32 extend: bool
33 directives: Optional[Sequence[StrawberrySchemaDirective]]
34
35 _fields: List["StrawberryField"]
36
37 concrete_of: Optional["TypeDefinition"] = None
38 """Concrete implementations of Generic TypeDefinitions fill this in"""
39 type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(
40 default_factory=dict
41 )
42
43 # TODO: remove wrapped cls when we "merge" this with `StrawberryObject`
44 def resolve_generic(self, wrapped_cls: type) -> type:
45 from strawberry.annotation import StrawberryAnnotation
46
47 passed_types = wrapped_cls.__args__ # type: ignore
48 params = wrapped_cls.__origin__.__parameters__ # type: ignore
49
50 # Make sure all passed_types are turned into StrawberryTypes
51 resolved_types = []
52 for passed_type in passed_types:
53 resolved_type = StrawberryAnnotation(passed_type).resolve()
54 resolved_types.append(resolved_type)
55
56 type_var_map = dict(zip(params, resolved_types))
57
58 return self.copy_with(type_var_map)
59
60 # TODO: Return a StrawberryObject
61 def copy_with(
62 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
63 ) -> type:
64 fields = []
65 for field in self.fields:
66 # TODO: Logic unnecessary with StrawberryObject
67 field_type = field.type
68 if hasattr(field_type, "_type_definition"):
69 field_type = field_type._type_definition # type: ignore
70
71 # TODO: All types should end up being StrawberryTypes
72 # The first check is here as a symptom of strawberry.ID being a
73 # Scalar, but not a StrawberryType
74 if isinstance(field_type, StrawberryType) and field_type.is_generic:
75 field = field.copy_with(type_var_map)
76
77 fields.append(field)
78
79 new_type_definition = TypeDefinition(
80 name=self.name,
81 is_input=self.is_input,
82 origin=self.origin,
83 is_interface=self.is_interface,
84 directives=self.directives,
85 interfaces=self.interfaces,
86 description=self.description,
87 extend=self.extend,
88 _fields=fields,
89 concrete_of=self,
90 type_var_map=type_var_map,
91 )
92
93 new_type = type(
94 new_type_definition.name,
95 (self.origin,),
96 {"_type_definition": new_type_definition},
97 )
98
99 new_type_definition.origin = new_type
100
101 return new_type
102
103 def get_field(self, python_name: str) -> Optional["StrawberryField"]:
104 return next(
105 (field for field in self.fields if field.python_name == python_name), None
106 )
107
108 @property
109 def fields(self) -> List["StrawberryField"]:
110 # TODO: rename _fields to fields and remove this property
111 return self._fields
112
113 @property
114 def is_generic(self) -> bool:
115 return is_type_generic(self.origin)
116
117 @property
118 def type_params(self) -> List[TypeVar]:
119 type_params: List[TypeVar] = []
120 for field in self.fields:
121 type_params.extend(field.type_params)
122
123 return type_params
124
125 def is_implemented_by(self, root: Union[type, dict]) -> bool:
126 # TODO: Accept StrawberryObject instead
127 # TODO: Support dicts
128 if isinstance(root, dict):
129 raise NotImplementedError()
130
131 type_definition = root._type_definition # type: ignore
132
133 if type_definition is self:
134 # No generics involved. Exact type match
135 return True
136
137 if type_definition is not self.concrete_of:
138 # Either completely different type, or concrete type of a different generic
139 return False
140
141 # Check the mapping of all fields' TypeVars
142 for generic_field in type_definition.fields:
143 generic_field_type = generic_field.type
144 if not isinstance(generic_field_type, StrawberryTypeVar):
145 continue
146
147 # For each TypeVar found, get the expected type from the copy's type map
148 expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)
149 if expected_concrete_type is None:
150 # TODO: Should this return False?
151 continue
152
153 # Check if the expected type matches the type found on the type_map
154 real_concrete_type = type(getattr(root, generic_field.name))
155 if real_concrete_type is not expected_concrete_type:
156 return False
157
158 # All field mappings succeeded. This is a match
159 return True
160
[end of strawberry/types/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/types/types.py b/strawberry/types/types.py
--- a/strawberry/types/types.py
+++ b/strawberry/types/types.py
@@ -152,6 +152,13 @@
# Check if the expected type matches the type found on the type_map
real_concrete_type = type(getattr(root, generic_field.name))
+
+ # TODO: uniform type var map, at the moment we map object types
+ # to their class (not to TypeDefinition) while we map enum to
+ # the EnumDefinition class. This is why we do this check here:
+ if hasattr(real_concrete_type, "_enum_definition"):
+ real_concrete_type = real_concrete_type._enum_definition
+
if real_concrete_type is not expected_concrete_type:
return False
|
{"golden_diff": "diff --git a/strawberry/types/types.py b/strawberry/types/types.py\n--- a/strawberry/types/types.py\n+++ b/strawberry/types/types.py\n@@ -152,6 +152,13 @@\n \n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n+\n+ # TODO: uniform type var map, at the moment we map object types\n+ # to their class (not to TypeDefinition) while we map enum to\n+ # the EnumDefinition class. This is why we do this check here:\n+ if hasattr(real_concrete_type, \"_enum_definition\"):\n+ real_concrete_type = real_concrete_type._enum_definition\n+\n if real_concrete_type is not expected_concrete_type:\n return False\n", "issue": "Error validating return types when using a Generic in Union\nThere seems to be an issue with types.\r\nGenerics worked fine and dandy until I've tried to use them in a Union.\r\n\r\n```python\r\n@strawberry.type\r\nclass FaultyType:\r\n code_sent: bool\r\n result: Union[Pet, ErrorNode[Codes]]\r\n```\r\nHere returning an ErrorNode in \"result\" field fails type check.\r\nFails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type \"<class 'strawberry_sample.ErrorNode'>\" of the field \"result\" is not in the list of the types of the union: \"['CodesErrorNode', 'Pet']\"\r\n\r\nReturning the same generic without unions in type declaration works fine.\r\n\r\nFull sample:\r\nhttps://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom typing import (\n TYPE_CHECKING,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n)\n\nfrom strawberry.type import StrawberryType, StrawberryTypeVar\nfrom strawberry.utils.typing import is_generic as is_type_generic\n\n\nif TYPE_CHECKING:\n from strawberry.field import StrawberryField\n from strawberry.schema_directive import StrawberrySchemaDirective\n\n\n@dataclasses.dataclass(eq=False)\nclass TypeDefinition(StrawberryType):\n name: str\n is_input: bool\n is_interface: bool\n origin: Type\n description: Optional[str]\n interfaces: List[\"TypeDefinition\"]\n extend: bool\n directives: Optional[Sequence[StrawberrySchemaDirective]]\n\n _fields: List[\"StrawberryField\"]\n\n concrete_of: Optional[\"TypeDefinition\"] = None\n \"\"\"Concrete implementations of Generic TypeDefinitions fill this in\"\"\"\n type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(\n default_factory=dict\n )\n\n # TODO: remove wrapped cls when we \"merge\" this with `StrawberryObject`\n def resolve_generic(self, wrapped_cls: type) -> type:\n from strawberry.annotation import StrawberryAnnotation\n\n passed_types = wrapped_cls.__args__ # type: ignore\n params = wrapped_cls.__origin__.__parameters__ # type: ignore\n\n # Make sure all passed_types are turned into StrawberryTypes\n resolved_types = []\n for passed_type in passed_types:\n resolved_type = StrawberryAnnotation(passed_type).resolve()\n resolved_types.append(resolved_type)\n\n type_var_map = dict(zip(params, resolved_types))\n\n return self.copy_with(type_var_map)\n\n # TODO: Return a StrawberryObject\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> type:\n fields = []\n for field in self.fields:\n # TODO: Logic unnecessary with StrawberryObject\n field_type = field.type\n if hasattr(field_type, \"_type_definition\"):\n field_type = field_type._type_definition # type: ignore\n\n # TODO: All types should end up being StrawberryTypes\n # The first check is here as a symptom of strawberry.ID being a\n # Scalar, but not a StrawberryType\n if isinstance(field_type, StrawberryType) and field_type.is_generic:\n field = field.copy_with(type_var_map)\n\n fields.append(field)\n\n new_type_definition = TypeDefinition(\n name=self.name,\n is_input=self.is_input,\n origin=self.origin,\n is_interface=self.is_interface,\n directives=self.directives,\n interfaces=self.interfaces,\n description=self.description,\n extend=self.extend,\n _fields=fields,\n concrete_of=self,\n type_var_map=type_var_map,\n )\n\n new_type = type(\n new_type_definition.name,\n (self.origin,),\n {\"_type_definition\": new_type_definition},\n )\n\n new_type_definition.origin = new_type\n\n return new_type\n\n def get_field(self, python_name: str) -> Optional[\"StrawberryField\"]:\n return next(\n (field for field in self.fields if field.python_name == python_name), None\n )\n\n @property\n def fields(self) -> List[\"StrawberryField\"]:\n # TODO: rename _fields to fields and remove this property\n return self._fields\n\n @property\n def is_generic(self) -> bool:\n return is_type_generic(self.origin)\n\n @property\n def type_params(self) -> List[TypeVar]:\n type_params: List[TypeVar] = []\n for field in self.fields:\n type_params.extend(field.type_params)\n\n return type_params\n\n def is_implemented_by(self, root: Union[type, dict]) -> bool:\n # TODO: Accept StrawberryObject instead\n # TODO: Support dicts\n if isinstance(root, dict):\n raise NotImplementedError()\n\n type_definition = root._type_definition # type: ignore\n\n if type_definition is self:\n # No generics involved. Exact type match\n return True\n\n if type_definition is not self.concrete_of:\n # Either completely different type, or concrete type of a different generic\n return False\n\n # Check the mapping of all fields' TypeVars\n for generic_field in type_definition.fields:\n generic_field_type = generic_field.type\n if not isinstance(generic_field_type, StrawberryTypeVar):\n continue\n\n # For each TypeVar found, get the expected type from the copy's type map\n expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)\n if expected_concrete_type is None:\n # TODO: Should this return False?\n continue\n\n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n if real_concrete_type is not expected_concrete_type:\n return False\n\n # All field mappings succeeded. This is a match\n return True\n", "path": "strawberry/types/types.py"}]}
| 2,203 | 187 |
gh_patches_debug_15273
|
rasdani/github-patches
|
git_diff
|
nextcloud__appstore-110
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
App title and description must be available in English
Both can be localized however we don't expect everything to be translated. Because we use English as fallback if a language is not present we should require the English contents in the info.xml
</issue>
<code>
[start of nextcloudappstore/core/api/v1/release/parser.py]
1 import re
2 import tarfile # type: ignore
3 import lxml.etree # type: ignore
4 from typing import Dict, Any, Tuple, List, Set
5
6 from nextcloudappstore.core.api.v1.release import ReleaseConfig
7 from nextcloudappstore.core.versioning import pad_max_version, pad_min_version
8 from rest_framework.exceptions import APIException # type: ignore
9
10
11 class MaxSizeAppMetadataXmlException(APIException):
12 pass
13
14
15 class InvalidAppMetadataXmlException(APIException):
16 pass
17
18
19 class UnsupportedAppArchiveException(APIException):
20 pass
21
22
23 class InvalidAppPackageStructureException(APIException):
24 pass
25
26
27 class XMLSyntaxError(APIException):
28 pass
29
30
31 class GunZipAppMetadataExtractor:
32 def __init__(self, config: ReleaseConfig) -> None:
33 """
34 :argument config the config
35 """
36 self.config = config
37 self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*(?:/.*)*$')
38
39 def extract_app_metadata(self, archive_path: str) -> Tuple[str, str]:
40 """
41 Extracts the info.xml from an tar.gz archive
42 :argument archive_path the path to the tar.gz archive
43 :raises InvalidAppPackageStructureException if the first level folder
44 does not equal the app_id or no info.xml file could be found in the
45 appinfo folder
46 :return the info.xml as string
47 """
48 if not tarfile.is_tarfile(archive_path):
49 msg = '%s is not a valid tar.gz archive ' % archive_path
50 raise UnsupportedAppArchiveException(msg)
51
52 with tarfile.open(archive_path, 'r:gz') as tar:
53 result = self._parse_archive(tar)
54 return result
55
56 def _parse_archive(self, tar: Any) -> Tuple[str, str]:
57 folders = self._find_app_folders(tar.getnames())
58 if len(folders) > 1:
59 msg = 'More than one possible app folder found'
60 raise InvalidAppPackageStructureException(msg)
61 elif len(folders) == 0:
62 msg = 'No possible app folder found. App folder must contain ' \
63 'only lowercase ASCII characters or underscores'
64 raise InvalidAppPackageStructureException(msg)
65
66 app_id = folders.pop()
67 info_path = '%s/appinfo/info.xml' % app_id
68 try:
69 info_member = tar.getmember(info_path)
70 possible_links = [info_member]
71 # its complicated, sometimes there are single members, sometimes
72 # there aren't
73 try:
74 possible_links.append(tar.getmember(app_id))
75 except KeyError:
76 pass
77 try:
78 possible_links.append(tar.getmember('%s/appinfo' % app_id))
79 except KeyError:
80 pass
81
82 for possible_link in possible_links:
83 if possible_link.issym() or possible_link.islnk():
84 msg = 'Symlinks and hard links can not be used for %s' % \
85 possible_link
86 raise InvalidAppPackageStructureException(msg)
87 info_file = tar.extractfile(info_member)
88 contents = self._stream_read_file(info_file,
89 self.config.max_info_size)
90 return contents, app_id
91 except KeyError:
92 msg = 'Could not find %s file inside the archive' % info_path
93 raise InvalidAppPackageStructureException(msg)
94
95 def _stream_read_file(self, info_file: Any, max_info_size: int) -> str:
96 """
97 Instead of reading everything in one go which is vulnerable to
98 zip bombs, stream and accumulate the bytes
99 :argument info_file: buffered io reader
100 :argument max_info_size: maximum file size in bytes
101 :raises MaxSizeAppMetadataXmlException if the maximum size was reached
102 :return: the parsed info.xml
103 """
104 # FIXME: If someone finds a less ugly version, please feel free to
105 # improve it
106 size = 0
107 result = b''
108 while True:
109 size += 1024
110 if size > max_info_size:
111 msg = 'info.xml was bigger than allowed %i bytes' % \
112 max_info_size
113 raise MaxSizeAppMetadataXmlException(msg)
114
115 chunk = info_file.read(1024)
116 if not chunk:
117 break
118 result += chunk
119
120 return result.decode('utf-8')
121
122 def _find_app_folders(self, members: List[str]) -> Set[str]:
123 regex = self.app_folder_regex
124 matching_members = filter(lambda f: re.match(regex, f), members)
125 folders = map(lambda m: m.split('/')[0], matching_members)
126 return set(folders)
127
128
129 def element_to_dict(element: Any) -> Dict:
130 type = element.get('type')
131 key = element.tag.replace('-', '_')
132 if type == 'int':
133 return {key: int(element.text)}
134 elif type == 'list':
135 return {key: list(map(element_to_dict, element.iterchildren()))}
136 elif type == 'min-version':
137 return {key: pad_min_version(element.text)}
138 elif type == 'max-version':
139 return {key: pad_max_version(element.text)}
140 elif len(list(element)) > 0:
141 contents = {}
142 for child in element.iterchildren():
143 contents.update(element_to_dict(child))
144 return {key: contents}
145 else:
146 return {key: element.text}
147
148
149 def parse_app_metadata(xml: str, schema: str, pre_xslt: str,
150 xslt: str) -> Dict:
151 """
152 Parses, validates and maps the xml onto a dict
153 :argument xml the info.xml string to parse
154 :argument schema the schema xml as string
155 :argument pre_xslt xslt which is run before validation to ensure that
156 everything is in the correct order and that unknown elements are excluded
157 :argument xslt the xslt to transform it to a matching structure
158 :raises InvalidAppMetadataXmlException if the schema does not validate
159 :return the parsed xml as dict
160 """
161 parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,
162 remove_comments=True, load_dtd=False,
163 remove_blank_text=True, dtd_validation=False
164 )
165 try:
166 doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)
167 except lxml.etree.XMLSyntaxError as e:
168 msg = 'info.xml contains malformed xml: %s' % e
169 raise XMLSyntaxError(msg)
170 for _ in doc.iter(lxml.etree.Entity):
171 raise InvalidAppMetadataXmlException('Must not contain entities')
172 pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))
173 pre_transformed_doc = pre_transform(doc)
174 schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)
175 schema = lxml.etree.XMLSchema(schema_doc)
176 try:
177 schema.assertValid(pre_transformed_doc) # type: ignore
178 except lxml.etree.DocumentInvalid as e:
179 msg = 'info.xml did not validate: %s' % e
180 raise InvalidAppMetadataXmlException(msg)
181 transform = lxml.etree.XSLT(lxml.etree.XML(xslt))
182 transformed_doc = transform(pre_transformed_doc)
183 mapped = element_to_dict(transformed_doc.getroot())
184 return mapped
185
[end of nextcloudappstore/core/api/v1/release/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py
--- a/nextcloudappstore/core/api/v1/release/parser.py
+++ b/nextcloudappstore/core/api/v1/release/parser.py
@@ -181,4 +181,20 @@
transform = lxml.etree.XSLT(lxml.etree.XML(xslt))
transformed_doc = transform(pre_transformed_doc)
mapped = element_to_dict(transformed_doc.getroot())
+ validate_english_present(mapped)
return mapped
+
+
+def validate_english_present(info: Dict) -> None:
+ """
+ Validates that name, summary and description are present in english
+ :param info: the parsed xml
+ :raises: InvalidAppMetadataXmlException if at least one of the required
+ fields is not present in english
+ """
+ app = info['app']
+ translated_fields = ['name', 'summary', 'description']
+ for field in translated_fields:
+ if 'en' not in app[field]:
+ msg = 'At least one element "%s" with lang "en" required' % field
+ raise InvalidAppMetadataXmlException(msg)
|
{"golden_diff": "diff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py\n--- a/nextcloudappstore/core/api/v1/release/parser.py\n+++ b/nextcloudappstore/core/api/v1/release/parser.py\n@@ -181,4 +181,20 @@\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n+ validate_english_present(mapped)\n return mapped\n+\n+\n+def validate_english_present(info: Dict) -> None:\n+ \"\"\"\n+ Validates that name, summary and description are present in english\n+ :param info: the parsed xml\n+ :raises: InvalidAppMetadataXmlException if at least one of the required\n+ fields is not present in english\n+ \"\"\"\n+ app = info['app']\n+ translated_fields = ['name', 'summary', 'description']\n+ for field in translated_fields:\n+ if 'en' not in app[field]:\n+ msg = 'At least one element \"%s\" with lang \"en\" required' % field\n+ raise InvalidAppMetadataXmlException(msg)\n", "issue": "App title and description must be available in English\nBoth can be localized however we don't expect everything to be translated. Because we use English as fallback if a language is not present we should require the English contents in the info.xml\n\n", "before_files": [{"content": "import re\nimport tarfile # type: ignore\nimport lxml.etree # type: ignore\nfrom typing import Dict, Any, Tuple, List, Set\n\nfrom nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom nextcloudappstore.core.versioning import pad_max_version, pad_min_version\nfrom rest_framework.exceptions import APIException # type: ignore\n\n\nclass MaxSizeAppMetadataXmlException(APIException):\n pass\n\n\nclass InvalidAppMetadataXmlException(APIException):\n pass\n\n\nclass UnsupportedAppArchiveException(APIException):\n pass\n\n\nclass InvalidAppPackageStructureException(APIException):\n pass\n\n\nclass XMLSyntaxError(APIException):\n pass\n\n\nclass GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n :argument config the config\n \"\"\"\n self.config = config\n self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*(?:/.*)*$')\n\n def extract_app_metadata(self, archive_path: str) -> Tuple[str, str]:\n \"\"\"\n Extracts the info.xml from an tar.gz archive\n :argument archive_path the path to the tar.gz archive\n :raises InvalidAppPackageStructureException if the first level folder\n does not equal the app_id or no info.xml file could be found in the\n appinfo folder\n :return the info.xml as string\n \"\"\"\n if not tarfile.is_tarfile(archive_path):\n msg = '%s is not a valid tar.gz archive ' % archive_path\n raise UnsupportedAppArchiveException(msg)\n\n with tarfile.open(archive_path, 'r:gz') as tar:\n result = self._parse_archive(tar)\n return result\n\n def _parse_archive(self, tar: Any) -> Tuple[str, str]:\n folders = self._find_app_folders(tar.getnames())\n if len(folders) > 1:\n msg = 'More than one possible app folder found'\n raise InvalidAppPackageStructureException(msg)\n elif len(folders) == 0:\n msg = 'No possible app folder found. App folder must contain ' \\\n 'only lowercase ASCII characters or underscores'\n raise InvalidAppPackageStructureException(msg)\n\n app_id = folders.pop()\n info_path = '%s/appinfo/info.xml' % app_id\n try:\n info_member = tar.getmember(info_path)\n possible_links = [info_member]\n # its complicated, sometimes there are single members, sometimes\n # there aren't\n try:\n possible_links.append(tar.getmember(app_id))\n except KeyError:\n pass\n try:\n possible_links.append(tar.getmember('%s/appinfo' % app_id))\n except KeyError:\n pass\n\n for possible_link in possible_links:\n if possible_link.issym() or possible_link.islnk():\n msg = 'Symlinks and hard links can not be used for %s' % \\\n possible_link\n raise InvalidAppPackageStructureException(msg)\n info_file = tar.extractfile(info_member)\n contents = self._stream_read_file(info_file,\n self.config.max_info_size)\n return contents, app_id\n except KeyError:\n msg = 'Could not find %s file inside the archive' % info_path\n raise InvalidAppPackageStructureException(msg)\n\n def _stream_read_file(self, info_file: Any, max_info_size: int) -> str:\n \"\"\"\n Instead of reading everything in one go which is vulnerable to\n zip bombs, stream and accumulate the bytes\n :argument info_file: buffered io reader\n :argument max_info_size: maximum file size in bytes\n :raises MaxSizeAppMetadataXmlException if the maximum size was reached\n :return: the parsed info.xml\n \"\"\"\n # FIXME: If someone finds a less ugly version, please feel free to\n # improve it\n size = 0\n result = b''\n while True:\n size += 1024\n if size > max_info_size:\n msg = 'info.xml was bigger than allowed %i bytes' % \\\n max_info_size\n raise MaxSizeAppMetadataXmlException(msg)\n\n chunk = info_file.read(1024)\n if not chunk:\n break\n result += chunk\n\n return result.decode('utf-8')\n\n def _find_app_folders(self, members: List[str]) -> Set[str]:\n regex = self.app_folder_regex\n matching_members = filter(lambda f: re.match(regex, f), members)\n folders = map(lambda m: m.split('/')[0], matching_members)\n return set(folders)\n\n\ndef element_to_dict(element: Any) -> Dict:\n type = element.get('type')\n key = element.tag.replace('-', '_')\n if type == 'int':\n return {key: int(element.text)}\n elif type == 'list':\n return {key: list(map(element_to_dict, element.iterchildren()))}\n elif type == 'min-version':\n return {key: pad_min_version(element.text)}\n elif type == 'max-version':\n return {key: pad_max_version(element.text)}\n elif len(list(element)) > 0:\n contents = {}\n for child in element.iterchildren():\n contents.update(element_to_dict(child))\n return {key: contents}\n else:\n return {key: element.text}\n\n\ndef parse_app_metadata(xml: str, schema: str, pre_xslt: str,\n xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n :argument pre_xslt xslt which is run before validation to ensure that\n everything is in the correct order and that unknown elements are excluded\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n remove_blank_text=True, dtd_validation=False\n )\n try:\n doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n except lxml.etree.XMLSyntaxError as e:\n msg = 'info.xml contains malformed xml: %s' % e\n raise XMLSyntaxError(msg)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))\n pre_transformed_doc = pre_transform(doc)\n schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n schema.assertValid(pre_transformed_doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n return mapped\n", "path": "nextcloudappstore/core/api/v1/release/parser.py"}]}
| 2,608 | 271 |
gh_patches_debug_18543
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-9055
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use bibtex in multi_comp.py
convert references in `mne/stats/multi_comp.py` to use footcite / footbibliography
</issue>
<code>
[start of mne/stats/multi_comp.py]
1 # Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
2 # Alexandre Gramfort <alexandre.gramfort@inria.fr>
3 #
4 # Code borrowed from statsmodels
5 #
6 # License: BSD (3-clause)
7
8 import numpy as np
9
10
11 def _ecdf(x):
12 """No frills empirical cdf used in fdrcorrection."""
13 nobs = len(x)
14 return np.arange(1, nobs + 1) / float(nobs)
15
16
17 def fdr_correction(pvals, alpha=0.05, method='indep'):
18 """P-value correction with False Discovery Rate (FDR).
19
20 Correction for multiple comparison using FDR [1]_.
21
22 This covers Benjamini/Hochberg for independent or positively correlated and
23 Benjamini/Yekutieli for general or negatively correlated tests.
24
25 Parameters
26 ----------
27 pvals : array_like
28 Set of p-values of the individual tests.
29 alpha : float
30 Error rate.
31 method : 'indep' | 'negcorr'
32 If 'indep' it implements Benjamini/Hochberg for independent or if
33 'negcorr' it corresponds to Benjamini/Yekutieli.
34
35 Returns
36 -------
37 reject : array, bool
38 True if a hypothesis is rejected, False if not.
39 pval_corrected : array
40 P-values adjusted for multiple hypothesis testing to limit FDR.
41
42 References
43 ----------
44 .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps
45 in functional neuroimaging using the false discovery rate.
46 Neuroimage. 2002 Apr;15(4):870-8.
47 """
48 pvals = np.asarray(pvals)
49 shape_init = pvals.shape
50 pvals = pvals.ravel()
51
52 pvals_sortind = np.argsort(pvals)
53 pvals_sorted = pvals[pvals_sortind]
54 sortrevind = pvals_sortind.argsort()
55
56 if method in ['i', 'indep', 'p', 'poscorr']:
57 ecdffactor = _ecdf(pvals_sorted)
58 elif method in ['n', 'negcorr']:
59 cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
60 ecdffactor = _ecdf(pvals_sorted) / cm
61 else:
62 raise ValueError("Method should be 'indep' and 'negcorr'")
63
64 reject = pvals_sorted < (ecdffactor * alpha)
65 if reject.any():
66 rejectmax = max(np.nonzero(reject)[0])
67 else:
68 rejectmax = 0
69 reject[:rejectmax] = True
70
71 pvals_corrected_raw = pvals_sorted / ecdffactor
72 pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
73 pvals_corrected[pvals_corrected > 1.0] = 1.0
74 pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
75 reject = reject[sortrevind].reshape(shape_init)
76 return reject, pvals_corrected
77
78
79 def bonferroni_correction(pval, alpha=0.05):
80 """P-value correction with Bonferroni method.
81
82 Parameters
83 ----------
84 pval : array_like
85 Set of p-values of the individual tests.
86 alpha : float
87 Error rate.
88
89 Returns
90 -------
91 reject : array, bool
92 True if a hypothesis is rejected, False if not.
93 pval_corrected : array
94 P-values adjusted for multiple hypothesis testing to limit FDR.
95 """
96 pval = np.asarray(pval)
97 pval_corrected = pval * float(pval.size)
98 # p-values must not be larger than 1.
99 pval_corrected = pval_corrected.clip(max=1.)
100 reject = pval_corrected < alpha
101 return reject, pval_corrected
102
[end of mne/stats/multi_comp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
--- a/mne/stats/multi_comp.py
+++ b/mne/stats/multi_comp.py
@@ -17,7 +17,7 @@
def fdr_correction(pvals, alpha=0.05, method='indep'):
"""P-value correction with False Discovery Rate (FDR).
- Correction for multiple comparison using FDR [1]_.
+ Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
@@ -41,9 +41,7 @@
References
----------
- .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps
- in functional neuroimaging using the false discovery rate.
- Neuroimage. 2002 Apr;15(4):870-8.
+ .. footbibliography::
"""
pvals = np.asarray(pvals)
shape_init = pvals.shape
|
{"golden_diff": "diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py\n--- a/mne/stats/multi_comp.py\n+++ b/mne/stats/multi_comp.py\n@@ -17,7 +17,7 @@\n def fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n \n- Correction for multiple comparison using FDR [1]_.\n+ Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.\n \n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n@@ -41,9 +41,7 @@\n \n References\n ----------\n- .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n- in functional neuroimaging using the false discovery rate.\n- Neuroimage. 2002 Apr;15(4):870-8.\n+ .. footbibliography::\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n", "issue": "use bibtex in multi_comp.py\nconvert references in `mne/stats/multi_comp.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n#\n# Code borrowed from statsmodels\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\n\ndef _ecdf(x):\n \"\"\"No frills empirical cdf used in fdrcorrection.\"\"\"\n nobs = len(x)\n return np.arange(1, nobs + 1) / float(nobs)\n\n\ndef fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n\n Correction for multiple comparison using FDR [1]_.\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n\n Parameters\n ----------\n pvals : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n method : 'indep' | 'negcorr'\n If 'indep' it implements Benjamini/Hochberg for independent or if\n 'negcorr' it corresponds to Benjamini/Yekutieli.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n\n References\n ----------\n .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n in functional neuroimaging using the false discovery rate.\n Neuroimage. 2002 Apr;15(4):870-8.\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n pvals = pvals.ravel()\n\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = pvals[pvals_sortind]\n sortrevind = pvals_sortind.argsort()\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))\n ecdffactor = _ecdf(pvals_sorted) / cm\n else:\n raise ValueError(\"Method should be 'indep' and 'negcorr'\")\n\n reject = pvals_sorted < (ecdffactor * alpha)\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n else:\n rejectmax = 0\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1.0] = 1.0\n pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)\n reject = reject[sortrevind].reshape(shape_init)\n return reject, pvals_corrected\n\n\ndef bonferroni_correction(pval, alpha=0.05):\n \"\"\"P-value correction with Bonferroni method.\n\n Parameters\n ----------\n pval : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n \"\"\"\n pval = np.asarray(pval)\n pval_corrected = pval * float(pval.size)\n # p-values must not be larger than 1.\n pval_corrected = pval_corrected.clip(max=1.)\n reject = pval_corrected < alpha\n return reject, pval_corrected\n", "path": "mne/stats/multi_comp.py"}]}
| 1,637 | 258 |
gh_patches_debug_30754
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-441
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add logging for OAuth flows
We merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic.
Some ideas of what we should log:
- [x] The `OAUTH_CLIENT_NAME` used
- [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request
- [x] If an access token fails to be authorized
</issue>
<code>
[start of benefits/oauth/views.py]
1 from django.shortcuts import redirect
2 from django.urls import reverse
3
4 from authlib.integrations.django_client import OAuth
5
6 from benefits.core import session
7 from benefits.settings import OAUTH_CLIENT_NAME
8
9
10 if OAUTH_CLIENT_NAME:
11 _oauth = OAuth()
12 _oauth.register(OAUTH_CLIENT_NAME)
13 oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)
14
15
16 ROUTE_AUTH = "oauth:authorize"
17 ROUTE_START = "eligibility:start"
18 ROUTE_CONFIRM = "eligibility:confirm"
19
20
21 def login(request):
22 if not oauth_client:
23 raise Exception("No OAuth client")
24
25 route = reverse(ROUTE_AUTH)
26 redirect_uri = request.build_absolute_uri(route)
27
28 return oauth_client.authorize_redirect(request, redirect_uri)
29
30
31 def authorize(request):
32 if not oauth_client:
33 raise Exception("No OAuth client")
34
35 token = oauth_client.authorize_access_token(request)
36
37 if token is None:
38 return redirect(ROUTE_START)
39 else:
40 # we are intentionally not storing anything about the user, including their token
41 session.update(request, auth=True)
42 return redirect(ROUTE_CONFIRM)
43
[end of benefits/oauth/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py
--- a/benefits/oauth/views.py
+++ b/benefits/oauth/views.py
@@ -1,3 +1,5 @@
+import logging
+
from django.shortcuts import redirect
from django.urls import reverse
@@ -7,7 +9,12 @@
from benefits.settings import OAUTH_CLIENT_NAME
+logger = logging.getLogger(__name__)
+
+
if OAUTH_CLIENT_NAME:
+ logger.debug(f"Using OAuth client configuration: {OAUTH_CLIENT_NAME}")
+
_oauth = OAuth()
_oauth.register(OAUTH_CLIENT_NAME)
oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)
@@ -25,6 +32,8 @@
route = reverse(ROUTE_AUTH)
redirect_uri = request.build_absolute_uri(route)
+ logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}")
+
return oauth_client.authorize_redirect(request, redirect_uri)
@@ -32,11 +41,14 @@
if not oauth_client:
raise Exception("No OAuth client")
+ logger.debug("Attempting to authorize OAuth access token")
token = oauth_client.authorize_access_token(request)
if token is None:
+ logger.warning("Could not authorize OAuth access token")
return redirect(ROUTE_START)
else:
# we are intentionally not storing anything about the user, including their token
+ logger.debug("OAuth access token authorized")
session.update(request, auth=True)
return redirect(ROUTE_CONFIRM)
|
{"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -1,3 +1,5 @@\n+import logging\n+\n from django.shortcuts import redirect\n from django.urls import reverse\n \n@@ -7,7 +9,12 @@\n from benefits.settings import OAUTH_CLIENT_NAME\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n if OAUTH_CLIENT_NAME:\n+ logger.debug(f\"Using OAuth client configuration: {OAUTH_CLIENT_NAME}\")\n+\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n@@ -25,6 +32,8 @@\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n \n+ logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n+\n return oauth_client.authorize_redirect(request, redirect_uri)\n \n \n@@ -32,11 +41,14 @@\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n \n+ logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n \n if token is None:\n+ logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n+ logger.debug(\"OAuth access token authorized\")\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "issue": "Add logging for OAuth flows\nWe merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic.\r\n\r\nSome ideas of what we should log:\r\n\r\n- [x] The `OAUTH_CLIENT_NAME` used\r\n- [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request\r\n- [x] If an access token fails to be authorized\n", "before_files": [{"content": "from django.shortcuts import redirect\nfrom django.urls import reverse\n\nfrom authlib.integrations.django_client import OAuth\n\nfrom benefits.core import session\nfrom benefits.settings import OAUTH_CLIENT_NAME\n\n\nif OAUTH_CLIENT_NAME:\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\n\n\ndef login(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\ndef authorize(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "path": "benefits/oauth/views.py"}]}
| 936 | 330 |
gh_patches_debug_13770
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1435
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RI: state legislator first names contain middle initials
For example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of "Moira J." and nothing in middle_name for Moira J. Walsh.
</issue>
<code>
[start of openstates/ri/legislators.py]
1 import re
2 import string
3
4 from billy.scrape import NoDataForPeriod
5 from billy.scrape.legislators import LegislatorScraper, Legislator
6 from openstates.utils import LXMLMixin
7
8 import lxml.html
9 import xlrd
10
11 excel_mapping = {
12 'district': 0,
13 'town_represented': 1,
14 'full_name': 2,
15 'party': 3,
16 'address': 4,
17 'email': 5,
18 }
19
20 class RILegislatorScraper(LegislatorScraper, LXMLMixin):
21 jurisdiction = 'ri'
22 latest_only = True
23
24 def scrape(self, chamber, term):
25 if chamber == 'upper':
26 url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')
27 rep_type = 'Senator'
28 source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'
29 source_url_title_replacement = rep_type
30 contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'
31 elif chamber == 'lower':
32 url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')
33 rep_type = 'Representative'
34 source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'
35 source_url_title_replacement = 'Rep. '
36 contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'
37
38 self.urlretrieve(url, 'ri_leg.xls')
39
40 wb = xlrd.open_workbook('ri_leg.xls')
41 sh = wb.sheet_by_index(0)
42
43 # This isn't perfect but it's cheap and better than using the
44 # XLS doc as the source URL for all legislators.
45 # 374: RI: legislator url
46 leg_source_url_map = {}
47 leg_page = self.lxmlize(source_url)
48
49 for link in leg_page.xpath('//td[@class="ms-vb2"]'):
50 leg_name = link.text_content().replace(source_url_title_replacement,'')
51 leg_url = link.xpath("..//a")[0].attrib['href']
52 leg_source_url_map[leg_name] = leg_url
53
54 for rownum in xrange(1, sh.nrows):
55 d = {}
56 for field, col_num in excel_mapping.iteritems():
57 d[field] = sh.cell(rownum, col_num).value
58
59 if d['full_name'].upper() == "VACANT":
60 self.warning(
61 "District {}'s seat is vacant".format(int(d['district'])))
62 continue
63
64 slug = re.match(
65 "(?P<class>sen|rep)-(?P<slug>.*)@(rilin\.state\.ri\.us|rilegislature\.gov)", d['email']
66 )
67
68 if 'asp' in d['email']:
69 d['email'] = None
70
71 if d['email'] is not None:
72 info = slug.groupdict()
73 info['chamber'] = "senators" if info['class'] == 'sen' else "representatives"
74
75 url = ("http://www.rilin.state.ri.us/{chamber}/"
76 "{slug}/Pages/Biography.aspx".format(**info))
77
78 dist = str(int(d['district']))
79 district_name = dist
80
81 assert d['full_name'].startswith(rep_type), "Improper name found"
82 full_name = re.sub(r"^{}(?=\s?[A-Z].*$)".format(rep_type), '', d['full_name']).strip()
83 translate = {
84 "Democrat" : "Democratic",
85 "Republican" : "Republican",
86 "Independent" : "Independent"
87 }
88
89 homepage_url = None
90 url_names = lxml.html.fromstring(self.get(source_url).text)
91 url_names = url_names.xpath('//td[@class="ms-vb2"]/a/@href')
92 modified_name = re.sub(r'[^\w\s]', '', full_name)
93 modified_name = modified_name.replace(' ', '').strip('').lower()
94
95 for el in url_names:
96 if 'default.aspx' in el:
97 el = el.replace('default.aspx', '')
98 el = el.strip('')
99 if el[-1] == '/':
100 el = el[:-1]
101 el = el.lower()
102 url_name_array = el.split('/')
103 if url_name_array[-1] in modified_name:
104 #remove '/default.aspx' and add last name
105 homepage_url = source_url[:-12] + url_name_array[-1]
106
107 kwargs = {
108 "town_represented": d['town_represented'],
109 }
110
111 contact = self.lxmlize(contact_url)
112 contact_phone = contact.xpath('//tr[@valign="TOP"]//td[@class="bodyCopy"]/text() | //td[@class="bodyCopy"]//center/text()')
113
114 phone = None
115 for el in contact_phone:
116 if len(el) <= 2 and dist == el:
117 number = contact_phone.index(el)
118 phone = contact_phone[number + 2]
119 phone = phone.strip()
120
121 email = None
122 if d['email'] is not None:
123 email = d['email']
124
125 if homepage_url is not None:
126 kwargs['url'] = homepage_url
127
128 if d['address'] is '':
129 d['address'] = 'No Address Found'
130
131 leg = Legislator(term, chamber, district_name, full_name,
132 '', '', '',
133 translate[d['party']],
134 **kwargs)
135
136 leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)
137 leg.add_source(source_url)
138 leg.add_source(contact_url)
139 if homepage_url:
140 leg.add_source(homepage_url)
141 self.save_legislator(leg)
142
[end of openstates/ri/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py
--- a/openstates/ri/legislators.py
+++ b/openstates/ri/legislators.py
@@ -128,8 +128,14 @@
if d['address'] is '':
d['address'] = 'No Address Found'
+ # RI is very fond of First M. Last name formats and
+ # they're being misparsed upstream, so fix here
+ (first, middle, last) = ('','','')
+ if re.match(r'^\S+\s[A-Z]\.\s\S+$', full_name):
+ (first, middle, last) = full_name.split()
+
leg = Legislator(term, chamber, district_name, full_name,
- '', '', '',
+ first, last, middle,
translate[d['party']],
**kwargs)
|
{"golden_diff": "diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py\n--- a/openstates/ri/legislators.py\n+++ b/openstates/ri/legislators.py\n@@ -128,8 +128,14 @@\n if d['address'] is '':\n d['address'] = 'No Address Found'\n \n+ # RI is very fond of First M. Last name formats and\n+ # they're being misparsed upstream, so fix here\n+ (first, middle, last) = ('','','')\n+ if re.match(r'^\\S+\\s[A-Z]\\.\\s\\S+$', full_name):\n+ (first, middle, last) = full_name.split()\n+ \n leg = Legislator(term, chamber, district_name, full_name,\n- '', '', '',\n+ first, last, middle,\n translate[d['party']],\n **kwargs)\n", "issue": "RI: state legislator first names contain middle initials\nFor example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of \"Moira J.\" and nothing in middle_name for Moira J. Walsh.\n", "before_files": [{"content": "import re\nimport string\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nimport lxml.html\nimport xlrd\n\nexcel_mapping = {\n 'district': 0,\n 'town_represented': 1,\n 'full_name': 2,\n 'party': 3,\n 'address': 4,\n 'email': 5,\n}\n\nclass RILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ri'\n latest_only = True\n\n def scrape(self, chamber, term):\n if chamber == 'upper':\n url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')\n rep_type = 'Senator'\n source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'\n source_url_title_replacement = rep_type\n contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'\n elif chamber == 'lower':\n url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')\n rep_type = 'Representative'\n source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'\n source_url_title_replacement = 'Rep. '\n contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'\n\n self.urlretrieve(url, 'ri_leg.xls')\n\n wb = xlrd.open_workbook('ri_leg.xls')\n sh = wb.sheet_by_index(0)\n\n # This isn't perfect but it's cheap and better than using the\n # XLS doc as the source URL for all legislators.\n # 374: RI: legislator url\n leg_source_url_map = {}\n leg_page = self.lxmlize(source_url)\n\n for link in leg_page.xpath('//td[@class=\"ms-vb2\"]'):\n leg_name = link.text_content().replace(source_url_title_replacement,'')\n leg_url = link.xpath(\"..//a\")[0].attrib['href']\n leg_source_url_map[leg_name] = leg_url\n\n for rownum in xrange(1, sh.nrows):\n d = {}\n for field, col_num in excel_mapping.iteritems():\n d[field] = sh.cell(rownum, col_num).value\n\n if d['full_name'].upper() == \"VACANT\":\n self.warning(\n \"District {}'s seat is vacant\".format(int(d['district'])))\n continue\n\n slug = re.match(\n \"(?P<class>sen|rep)-(?P<slug>.*)@(rilin\\.state\\.ri\\.us|rilegislature\\.gov)\", d['email']\n )\n \n if 'asp' in d['email']:\n d['email'] = None\n\n if d['email'] is not None:\n info = slug.groupdict()\n info['chamber'] = \"senators\" if info['class'] == 'sen' else \"representatives\"\n\n url = (\"http://www.rilin.state.ri.us/{chamber}/\"\n \"{slug}/Pages/Biography.aspx\".format(**info))\n\n dist = str(int(d['district']))\n district_name = dist\n\n assert d['full_name'].startswith(rep_type), \"Improper name found\"\n full_name = re.sub(r\"^{}(?=\\s?[A-Z].*$)\".format(rep_type), '', d['full_name']).strip()\n translate = {\n \"Democrat\" : \"Democratic\",\n \"Republican\" : \"Republican\",\n \"Independent\" : \"Independent\"\n }\n\n homepage_url = None\n url_names = lxml.html.fromstring(self.get(source_url).text)\n url_names = url_names.xpath('//td[@class=\"ms-vb2\"]/a/@href')\n modified_name = re.sub(r'[^\\w\\s]', '', full_name)\n modified_name = modified_name.replace(' ', '').strip('').lower()\n\n for el in url_names:\n if 'default.aspx' in el:\n el = el.replace('default.aspx', '')\n el = el.strip('')\n if el[-1] == '/':\n el = el[:-1]\n el = el.lower()\n url_name_array = el.split('/')\n if url_name_array[-1] in modified_name:\n #remove '/default.aspx' and add last name\n homepage_url = source_url[:-12] + url_name_array[-1]\n\n kwargs = {\n \"town_represented\": d['town_represented'],\n }\n\n contact = self.lxmlize(contact_url)\n contact_phone = contact.xpath('//tr[@valign=\"TOP\"]//td[@class=\"bodyCopy\"]/text() | //td[@class=\"bodyCopy\"]//center/text()')\n\n phone = None\n for el in contact_phone:\n if len(el) <= 2 and dist == el:\n number = contact_phone.index(el)\n phone = contact_phone[number + 2]\n phone = phone.strip()\n\n email = None\n if d['email'] is not None:\n email = d['email']\n\n if homepage_url is not None:\n kwargs['url'] = homepage_url\n\n if d['address'] is '':\n d['address'] = 'No Address Found'\n\n leg = Legislator(term, chamber, district_name, full_name,\n '', '', '',\n translate[d['party']],\n **kwargs)\n\n leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)\n leg.add_source(source_url)\n leg.add_source(contact_url)\n if homepage_url:\n leg.add_source(homepage_url)\n self.save_legislator(leg)\n", "path": "openstates/ri/legislators.py"}]}
| 2,184 | 203 |
gh_patches_debug_1787
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-9068
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3377] [Regression] `dbt deps` fails on tarball dependencies
### Is this a regression in a recent version of dbt-core?
- [X] I believe this is a regression in dbt-core functionality
- [X] I have searched the existing issues, and I could not find an existing issue for this regression
### Current Behavior
When `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`:
```
11:18:06 Running with dbt=1.7.1
11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml
11:18:06 Encountered an error:
Runtime Error
The packages.yml file in this project is malformed. Please double check
the contents of this file and fix any errors before retrying.
You can find more information on the syntax for this file here:
https://docs.getdbt.com/docs/package-management
Validator Error:
dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils
```
### Expected/Previous Behavior
Expected output:
```
11:27:03 Running with dbt=1.6.8
11:27:03 Installing dbt_utils
11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6)
```
The validator should
- not check the index for tarball dependencies
- not validate the `namespace/package-name` for tarball dependencies
- mention the correct filename (this is a minor thing)
### Steps To Reproduce
1. In a new dbt project
2. With the following `dependencies.yml`:
```yaml
packages:
- tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6
name: 'dbt_utils'
```
3. Run `dbt deps`
4. See error message above
### Relevant log output
_No response_
### Environment
```markdown
- OS: Ubuntu 22.04.3
- Python: 3.11.1
- dbt-core (latest working version): 1.6.8
- dbt-core (earliest regression version): 1.7.0
- dbt-core (latest version): 1.7.1
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of core/dbt/deps/tarball.py]
1 from typing import Dict
2
3 from dbt.contracts.project import RegistryPackageMetadata, TarballPackage
4 from dbt.deps.base import PinnedPackage, UnpinnedPackage
5
6
7 class TarballPackageMixin:
8 def __init__(self, tarball: str) -> None:
9 super().__init__()
10 self.tarball = tarball
11
12 @property
13 def name(self):
14 return self.tarball
15
16 def source_type(self) -> str:
17 return "tarball"
18
19
20 class TarballPinnedPackage(TarballPackageMixin, PinnedPackage):
21 def __init__(self, tarball: str, package: str) -> None:
22 super().__init__(tarball)
23 # setup to recycle RegistryPinnedPackage fns
24 self.package = package
25 self.version = "tarball"
26
27 @property
28 def name(self):
29 return self.package
30
31 def to_dict(self) -> Dict[str, str]:
32 return {
33 "tarball": self.tarball,
34 "version": self.version,
35 "package": self.package,
36 }
37
38 def get_version(self):
39 return self.version
40
41 def nice_version_name(self):
42 return f"tarball (url: {self.tarball})"
43
44 def _fetch_metadata(self, project, renderer):
45 """
46 recycle RegistryPackageMetadata so that we can use the install and
47 download_and_untar from RegistryPinnedPackage next.
48 build RegistryPackageMetadata from info passed via packages.yml since no
49 'metadata' service exists in this case.
50 """
51
52 dct = {
53 "name": self.package,
54 "packages": [], # note: required by RegistryPackageMetadata
55 "downloads": {"tarball": self.tarball},
56 }
57
58 return RegistryPackageMetadata.from_dict(dct)
59
60 def install(self, project, renderer):
61 self._install(project, renderer)
62
63
64 class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):
65 def __init__(
66 self,
67 tarball: str,
68 package: str,
69 ) -> None:
70 super().__init__(tarball)
71 # setup to recycle RegistryPinnedPackage fns
72 self.package = package
73 self.version = "tarball"
74
75 @classmethod
76 def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage":
77 return cls(tarball=contract.tarball, package=contract.name)
78
79 def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage":
80 return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)
81
82 def resolved(self) -> TarballPinnedPackage:
83 return TarballPinnedPackage(tarball=self.tarball, package=self.package)
84
[end of core/dbt/deps/tarball.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py
--- a/core/dbt/deps/tarball.py
+++ b/core/dbt/deps/tarball.py
@@ -31,8 +31,7 @@
def to_dict(self) -> Dict[str, str]:
return {
"tarball": self.tarball,
- "version": self.version,
- "package": self.package,
+ "name": self.package,
}
def get_version(self):
|
{"golden_diff": "diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py\n--- a/core/dbt/deps/tarball.py\n+++ b/core/dbt/deps/tarball.py\n@@ -31,8 +31,7 @@\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n- \"version\": self.version,\n- \"package\": self.package,\n+ \"name\": self.package,\n }\n \n def get_version(self):\n", "issue": "[CT-3377] [Regression] `dbt deps` fails on tarball dependencies\n### Is this a regression in a recent version of dbt-core?\n\n- [X] I believe this is a regression in dbt-core functionality\n- [X] I have searched the existing issues, and I could not find an existing issue for this regression\n\n### Current Behavior\n\nWhen `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`:\r\n\r\n```\r\n11:18:06 Running with dbt=1.7.1\r\n11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml\r\n11:18:06 Encountered an error:\r\nRuntime Error\r\n The packages.yml file in this project is malformed. Please double check\r\n the contents of this file and fix any errors before retrying.\r\n \r\n You can find more information on the syntax for this file here:\r\n https://docs.getdbt.com/docs/package-management\r\n \r\n Validator Error:\r\n dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils\r\n```\n\n### Expected/Previous Behavior\n\nExpected output:\r\n```\r\n11:27:03 Running with dbt=1.6.8\r\n11:27:03 Installing dbt_utils\r\n11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6)\r\n```\r\n\r\nThe validator should \r\n- not check the index for tarball dependencies\r\n- not validate the `namespace/package-name` for tarball dependencies\r\n- mention the correct filename (this is a minor thing)\n\n### Steps To Reproduce\n\n1. In a new dbt project\r\n2. With the following `dependencies.yml`:\r\n```yaml\r\npackages:\r\n - tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6\r\n name: 'dbt_utils'\r\n```\r\n3. Run `dbt deps`\r\n4. See error message above\n\n### Relevant log output\n\n_No response_\n\n### Environment\n\n```markdown\n- OS: Ubuntu 22.04.3\r\n- Python: 3.11.1\r\n- dbt-core (latest working version): 1.6.8\r\n- dbt-core (earliest regression version): 1.7.0\r\n- dbt-core (latest version): 1.7.1\n```\n\n\n### Which database adapter are you using with dbt?\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"version\": self.version,\n \"package\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}]}
| 1,894 | 117 |
gh_patches_debug_38546
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-1129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
zero: Permit stripping album art
It would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins.
The EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media.
What probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well.
</issue>
<code>
[start of beetsplug/zero.py]
1 # This file is part of beets.
2 # Copyright 2013, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """ Clears tag fields in media files."""
16
17 import re
18 import logging
19 from beets.plugins import BeetsPlugin
20 from beets.library import Item
21 from beets.importer import action
22 from beets.util import confit
23
24 __author__ = 'baobab@heresiarch.info'
25 __version__ = '0.10'
26
27 log = logging.getLogger('beets')
28
29
30 class ZeroPlugin(BeetsPlugin):
31
32 _instance = None
33
34 def __init__(self):
35 super(ZeroPlugin, self).__init__()
36
37 # Listeners.
38 self.register_listener('write', self.write_event)
39 self.register_listener('import_task_choice',
40 self.import_task_choice_event)
41
42 self.config.add({
43 'fields': [],
44 })
45
46 self.patterns = {}
47 self.warned = False
48
49 for field in self.config['fields'].as_str_seq():
50 if field in ('id', 'path', 'album_id'):
51 log.warn(u'[zero] field \'{0}\' ignored, zeroing '
52 u'it would be dangerous'.format(field))
53 continue
54 if field not in Item._fields.keys():
55 log.error(u'[zero] invalid field: {0}'.format(field))
56 continue
57
58 try:
59 self.patterns[field] = self.config[field].as_str_seq()
60 except confit.NotFoundError:
61 # Matches everything
62 self.patterns[field] = [u'']
63
64 def import_task_choice_event(self, session, task):
65 """Listen for import_task_choice event."""
66 if task.choice_flag == action.ASIS and not self.warned:
67 log.warn(u'[zero] cannot zero in \"as-is\" mode')
68 self.warned = True
69 # TODO request write in as-is mode
70
71 @classmethod
72 def match_patterns(cls, field, patterns):
73 """Check if field (as string) is matching any of the patterns in
74 the list.
75 """
76 for p in patterns:
77 if re.search(p, unicode(field), flags=re.IGNORECASE):
78 return True
79 return False
80
81 def write_event(self, item, path, tags):
82 """Listen for write event."""
83 if not self.patterns:
84 log.warn(u'[zero] no fields, nothing to do')
85 return
86
87 for field, patterns in self.patterns.items():
88 if field not in tags:
89 log.error(u'[zero] no such field: {0}'.format(field))
90 continue
91
92 value = tags[field]
93 if self.match_patterns(value, patterns):
94 log.debug(u'[zero] {0}: {1} -> None'.format(field, value))
95 tags[field] = None
96
[end of beetsplug/zero.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/beetsplug/zero.py b/beetsplug/zero.py
--- a/beetsplug/zero.py
+++ b/beetsplug/zero.py
@@ -17,7 +17,7 @@
import re
import logging
from beets.plugins import BeetsPlugin
-from beets.library import Item
+from beets.mediafile import MediaFile
from beets.importer import action
from beets.util import confit
@@ -51,7 +51,7 @@
log.warn(u'[zero] field \'{0}\' ignored, zeroing '
u'it would be dangerous'.format(field))
continue
- if field not in Item._fields.keys():
+ if field not in MediaFile.fields():
log.error(u'[zero] invalid field: {0}'.format(field))
continue
@@ -59,7 +59,7 @@
self.patterns[field] = self.config[field].as_str_seq()
except confit.NotFoundError:
# Matches everything
- self.patterns[field] = [u'']
+ self.patterns[field] = True
def import_task_choice_event(self, session, task):
"""Listen for import_task_choice event."""
@@ -73,23 +73,29 @@
"""Check if field (as string) is matching any of the patterns in
the list.
"""
+ if patterns is True:
+ return True
for p in patterns:
if re.search(p, unicode(field), flags=re.IGNORECASE):
return True
return False
def write_event(self, item, path, tags):
- """Listen for write event."""
+ """Set values in tags to `None` if the key and value are matched
+ by `self.patterns`.
+ """
if not self.patterns:
log.warn(u'[zero] no fields, nothing to do')
return
for field, patterns in self.patterns.items():
- if field not in tags:
- log.error(u'[zero] no such field: {0}'.format(field))
- continue
-
- value = tags[field]
- if self.match_patterns(value, patterns):
+ if field in tags:
+ value = tags[field]
+ match = self.match_patterns(tags[field], patterns)
+ else:
+ value = ''
+ match = patterns is True
+
+ if match:
log.debug(u'[zero] {0}: {1} -> None'.format(field, value))
tags[field] = None
|
{"golden_diff": "diff --git a/beetsplug/zero.py b/beetsplug/zero.py\n--- a/beetsplug/zero.py\n+++ b/beetsplug/zero.py\n@@ -17,7 +17,7 @@\n import re\n import logging\n from beets.plugins import BeetsPlugin\n-from beets.library import Item\n+from beets.mediafile import MediaFile\n from beets.importer import action\n from beets.util import confit\n \n@@ -51,7 +51,7 @@\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n- if field not in Item._fields.keys():\n+ if field not in MediaFile.fields():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n \n@@ -59,7 +59,7 @@\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n- self.patterns[field] = [u'']\n+ self.patterns[field] = True\n \n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n@@ -73,23 +73,29 @@\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n+ if patterns is True:\n+ return True\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n \n def write_event(self, item, path, tags):\n- \"\"\"Listen for write event.\"\"\"\n+ \"\"\"Set values in tags to `None` if the key and value are matched\n+ by `self.patterns`.\n+ \"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n \n for field, patterns in self.patterns.items():\n- if field not in tags:\n- log.error(u'[zero] no such field: {0}'.format(field))\n- continue\n-\n- value = tags[field]\n- if self.match_patterns(value, patterns):\n+ if field in tags:\n+ value = tags[field]\n+ match = self.match_patterns(tags[field], patterns)\n+ else:\n+ value = ''\n+ match = patterns is True\n+\n+ if match:\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "issue": "zero: Permit stripping album art\nIt would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins.\n\nThe EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media.\n\nWhat probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Blemjhoo Tezoulbr <baobab@heresiarch.info>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\" Clears tag fields in media files.\"\"\"\n\nimport re\nimport logging\nfrom beets.plugins import BeetsPlugin\nfrom beets.library import Item\nfrom beets.importer import action\nfrom beets.util import confit\n\n__author__ = 'baobab@heresiarch.info'\n__version__ = '0.10'\n\nlog = logging.getLogger('beets')\n\n\nclass ZeroPlugin(BeetsPlugin):\n\n _instance = None\n\n def __init__(self):\n super(ZeroPlugin, self).__init__()\n\n # Listeners.\n self.register_listener('write', self.write_event)\n self.register_listener('import_task_choice',\n self.import_task_choice_event)\n\n self.config.add({\n 'fields': [],\n })\n\n self.patterns = {}\n self.warned = False\n\n for field in self.config['fields'].as_str_seq():\n if field in ('id', 'path', 'album_id'):\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n if field not in Item._fields.keys():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n\n try:\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n self.patterns[field] = [u'']\n\n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n if task.choice_flag == action.ASIS and not self.warned:\n log.warn(u'[zero] cannot zero in \\\"as-is\\\" mode')\n self.warned = True\n # TODO request write in as-is mode\n\n @classmethod\n def match_patterns(cls, field, patterns):\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n\n def write_event(self, item, path, tags):\n \"\"\"Listen for write event.\"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n\n for field, patterns in self.patterns.items():\n if field not in tags:\n log.error(u'[zero] no such field: {0}'.format(field))\n continue\n\n value = tags[field]\n if self.match_patterns(value, patterns):\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "path": "beetsplug/zero.py"}]}
| 1,692 | 549 |
gh_patches_debug_32722
|
rasdani/github-patches
|
git_diff
|
pypa__pip-3443
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Protect a few more requests imports
In Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging.
```
From 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Fri, 29 Jan 2016 16:56:43 -0500
Subject: Debian already unbundles things from requests.
Patch-Name: handle-unbundled-requests.patch
---
pip/_vendor/__init__.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py
index 1cb35a1..c64896a 100644
--- a/pip/_vendor/__init__.py
+++ b/pip/_vendor/__init__.py
@@ -82,8 +82,12 @@ if DEBUNDLED:
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
- vendored("requests.packages.urllib3.packages.ordered_dict")
- vendored("requests.packages.urllib3.packages.six")
+ try:
+ vendored("requests.packages.urllib3.packages.ordered_dict")
+ vendored("requests.packages.urllib3.packages.six")
+ except ImportError:
+ # Debian already unbundles these from requests.
+ pass
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
```
</issue>
<code>
[start of pip/_vendor/__init__.py]
1 """
2 pip._vendor is for vendoring dependencies of pip to prevent needing pip to
3 depend on something external.
4
5 Files inside of pip._vendor should be considered immutable and should only be
6 updated to versions from upstream.
7 """
8 from __future__ import absolute_import
9
10 import glob
11 import os.path
12 import sys
13
14 # Downstream redistributors which have debundled our dependencies should also
15 # patch this value to be true. This will trigger the additional patching
16 # to cause things like "six" to be available as pip.
17 DEBUNDLED = False
18
19 # By default, look in this directory for a bunch of .whl files which we will
20 # add to the beginning of sys.path before attempting to import anything. This
21 # is done to support downstream re-distributors like Debian and Fedora who
22 # wish to create their own Wheels for our dependencies to aid in debundling.
23 WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
24
25
26 # Define a small helper function to alias our vendored modules to the real ones
27 # if the vendored ones do not exist. This idea of this was taken from
28 # https://github.com/kennethreitz/requests/pull/2567.
29 def vendored(modulename):
30 vendored_name = "{0}.{1}".format(__name__, modulename)
31
32 try:
33 __import__(vendored_name, globals(), locals(), level=0)
34 except ImportError:
35 __import__(modulename, globals(), locals(), level=0)
36 sys.modules[vendored_name] = sys.modules[modulename]
37 base, head = vendored_name.rsplit(".", 1)
38 setattr(sys.modules[base], head, sys.modules[modulename])
39
40
41 # If we're operating in a debundled setup, then we want to go ahead and trigger
42 # the aliasing of our vendored libraries as well as looking for wheels to add
43 # to our sys.path. This will cause all of this code to be a no-op typically
44 # however downstream redistributors can enable it in a consistent way across
45 # all platforms.
46 if DEBUNDLED:
47 # Actually look inside of WHEEL_DIR to find .whl files and add them to the
48 # front of our sys.path.
49 sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
50
51 # Actually alias all of our vendored dependencies.
52 vendored("cachecontrol")
53 vendored("colorama")
54 vendored("distlib")
55 vendored("html5lib")
56 vendored("lockfile")
57 vendored("six")
58 vendored("six.moves")
59 vendored("six.moves.urllib")
60 vendored("packaging")
61 vendored("packaging.version")
62 vendored("packaging.specifiers")
63 vendored("pkg_resources")
64 vendored("progress")
65 vendored("retrying")
66 vendored("requests")
67 vendored("requests.packages")
68 vendored("requests.packages.urllib3")
69 vendored("requests.packages.urllib3._collections")
70 vendored("requests.packages.urllib3.connection")
71 vendored("requests.packages.urllib3.connectionpool")
72 vendored("requests.packages.urllib3.contrib")
73 try:
74 vendored("requests.packages.urllib3.contrib.ntlmpool")
75 except ImportError:
76 pass
77 try:
78 vendored("requests.packages.urllib3.contrib.pyopenssl")
79 except ImportError:
80 pass
81 vendored("requests.packages.urllib3.exceptions")
82 vendored("requests.packages.urllib3.fields")
83 vendored("requests.packages.urllib3.filepost")
84 vendored("requests.packages.urllib3.packages")
85 vendored("requests.packages.urllib3.packages.ordered_dict")
86 vendored("requests.packages.urllib3.packages.six")
87 vendored("requests.packages.urllib3.packages.ssl_match_hostname")
88 vendored("requests.packages.urllib3.packages.ssl_match_hostname."
89 "_implementation")
90 vendored("requests.packages.urllib3.poolmanager")
91 vendored("requests.packages.urllib3.request")
92 vendored("requests.packages.urllib3.response")
93 vendored("requests.packages.urllib3.util")
94 vendored("requests.packages.urllib3.util.connection")
95 vendored("requests.packages.urllib3.util.request")
96 vendored("requests.packages.urllib3.util.response")
97 vendored("requests.packages.urllib3.util.retry")
98 vendored("requests.packages.urllib3.util.ssl_")
99 vendored("requests.packages.urllib3.util.timeout")
100 vendored("requests.packages.urllib3.util.url")
101
[end of pip/_vendor/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py
--- a/pip/_vendor/__init__.py
+++ b/pip/_vendor/__init__.py
@@ -32,10 +32,22 @@
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
- __import__(modulename, globals(), locals(), level=0)
- sys.modules[vendored_name] = sys.modules[modulename]
- base, head = vendored_name.rsplit(".", 1)
- setattr(sys.modules[base], head, sys.modules[modulename])
+ try:
+ __import__(modulename, globals(), locals(), level=0)
+ except ImportError:
+ # We can just silently allow import failures to pass here. If we
+ # got to this point it means that ``import pip._vendor.whatever``
+ # failed and so did ``import whatever``. Since we're importing this
+ # upfront in an attempt to alias imports, not erroring here will
+ # just mean we get a regular import error whenever pip *actually*
+ # tries to import one of these modules to use it, which actually
+ # gives us a better error message than we would have otherwise
+ # gotten.
+ pass
+ else:
+ sys.modules[vendored_name] = sys.modules[modulename]
+ base, head = vendored_name.rsplit(".", 1)
+ setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
@@ -70,14 +82,8 @@
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
- try:
- vendored("requests.packages.urllib3.contrib.ntlmpool")
- except ImportError:
- pass
- try:
- vendored("requests.packages.urllib3.contrib.pyopenssl")
- except ImportError:
- pass
+ vendored("requests.packages.urllib3.contrib.ntlmpool")
+ vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
|
{"golden_diff": "diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -32,10 +32,22 @@\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n- __import__(modulename, globals(), locals(), level=0)\n- sys.modules[vendored_name] = sys.modules[modulename]\n- base, head = vendored_name.rsplit(\".\", 1)\n- setattr(sys.modules[base], head, sys.modules[modulename])\n+ try:\n+ __import__(modulename, globals(), locals(), level=0)\n+ except ImportError:\n+ # We can just silently allow import failures to pass here. If we\n+ # got to this point it means that ``import pip._vendor.whatever``\n+ # failed and so did ``import whatever``. Since we're importing this\n+ # upfront in an attempt to alias imports, not erroring here will\n+ # just mean we get a regular import error whenever pip *actually*\n+ # tries to import one of these modules to use it, which actually\n+ # gives us a better error message than we would have otherwise\n+ # gotten.\n+ pass\n+ else:\n+ sys.modules[vendored_name] = sys.modules[modulename]\n+ base, head = vendored_name.rsplit(\".\", 1)\n+ setattr(sys.modules[base], head, sys.modules[modulename])\n \n \n # If we're operating in a debundled setup, then we want to go ahead and trigger\n@@ -70,14 +82,8 @@\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n- try:\n- vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n- except ImportError:\n- pass\n- try:\n- vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n- except ImportError:\n- pass\n+ vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n+ vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n", "issue": "Protect a few more requests imports\nIn Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging.\n\n```\nFrom 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001\nFrom: Barry Warsaw <barry@python.org>\nDate: Fri, 29 Jan 2016 16:56:43 -0500\nSubject: Debian already unbundles things from requests.\n\nPatch-Name: handle-unbundled-requests.patch\n\n---\n pip/_vendor/__init__.py | 8 ++++++--\n 1 file changed, 6 insertions(+), 2 deletions(-)\n\ndiff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\nindex 1cb35a1..c64896a 100644\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -82,8 +82,12 @@ if DEBUNDLED:\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n- vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n- vendored(\"requests.packages.urllib3.packages.six\")\n+ try:\n+ vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n+ vendored(\"requests.packages.urllib3.packages.six\")\n+ except ImportError:\n+ # Debian already unbundles these from requests.\n+ pass\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n```\n\n", "before_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n __import__(modulename, globals(), locals(), level=0)\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n try:\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n except ImportError:\n pass\n try:\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n except ImportError:\n pass\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "pip/_vendor/__init__.py"}]}
| 2,196 | 553 |
gh_patches_debug_14511
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1437
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Firefox VPN target
It seems the Firefox VPN uses `e6eb0d1e856335fc`, previously named `Firefox FPN`
</issue>
<code>
[start of snippets/base/admin/fields.py]
1 from django.core.exceptions import ValidationError
2 from django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,
3 MultiValueField, MultipleChoiceField)
4
5 from snippets.base.models import Addon, TargetedCountry
6
7 from .widgets import JEXLMultiWidget
8
9
10 class MultipleChoiceFieldCSV(MultipleChoiceField):
11 # To be used with in snippets.base.forms.SnippetAdminForm and in
12 # combination with DynamicField. We don't directly save() this field in the
13 # database so get_prep_value has not been implemented.
14
15 def prepare_value(self, value):
16 value = super(MultipleChoiceFieldCSV, self).prepare_value(value)
17 if not isinstance(value, list):
18 value = value.split(';')
19 return value
20
21 def clean(self, value):
22 value = super(MultipleChoiceFieldCSV, self).clean(value)
23 return ';'.join(value)
24
25
26 class JEXLBaseField():
27 def to_jexl(self, value):
28 if value:
29 return self.jexl.format(attr_name=self.attr_name, value=value)
30
31 return None
32
33
34 class JEXLChoiceField(JEXLBaseField, ChoiceField):
35 def __init__(self, attr_name, *args, **kwargs):
36 self.attr_name = attr_name
37 self.jexl = '{attr_name} == {value}'
38 self.jexl = kwargs.pop('jexl', self.jexl)
39 return super().__init__(*args, **kwargs)
40
41 def to_jexl(self, value):
42 if value:
43 return self.jexl.format(attr_name=self.attr_name, value=value)
44
45
46 class JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):
47 def __init__(self, attr_name, *args, **kwargs):
48 self.attr_name = attr_name
49 self.jexl = '{attr_name} in {value}'
50 self.jexl = kwargs.pop('jexl', self.jexl)
51 return super().__init__(*args, **kwargs)
52
53 def prepare_value(self, value):
54 if isinstance(value, str):
55 value = value.split(';')
56 return super().prepare_value(value)
57
58 def clean(self, value):
59 value = super().clean(value)
60 return ';'.join([str(x.id) for x in value])
61
62
63 class JEXLCountryField(JEXLModelMultipleChoiceField):
64 def to_jexl(self, value):
65 if value:
66 values = TargetedCountry.objects.filter(id__in=value.split(";"))
67 return f'region in {[x.code for x in values]}'
68 return None
69
70
71 class JEXLRangeField(JEXLBaseField, MultiValueField):
72 def __init__(self, attr_name, choices, **kwargs):
73 self.attr_name = attr_name
74 self.jexl = {
75 'minimum': '{value} <= {attr_name}',
76 'maximum': '{attr_name} < {value}'
77 }
78 self.jexl = kwargs.pop('jexl', self.jexl)
79 fields = (
80 ChoiceField(choices=choices),
81 ChoiceField(choices=choices),
82 )
83 super().__init__(fields, **kwargs)
84 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],
85 template_name='widgets/jexlrange.html')
86
87 def compress(self, data_list):
88 return ','.join(data_list)
89
90 def to_jexl(self, value):
91 final_jexl = []
92 if value:
93 minimum, maximum = value.split(',')
94 if minimum:
95 final_jexl.append(
96 self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)
97 )
98 if maximum:
99 final_jexl.append(
100 self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)
101 )
102 return ' && '.join(final_jexl)
103
104 def validate(self, value):
105 minimum, maximum = value.split(',')
106 self.fields[0].validate(minimum)
107 self.fields[1].validate(maximum)
108
109 if minimum and maximum and int(minimum) > int(maximum):
110 raise ValidationError('Minimum value must be lower or equal to maximum value.')
111 return value
112
113
114 class JEXLFirefoxRangeField(JEXLRangeField):
115 def __init__(self, **kwargs):
116 # Include only versions greater than 63, where ASRSnippets exist.
117 min_version = 64
118 # Need to be able to dynamically change this, probably using
119 # product_details. Issue #855
120 max_version = 84
121
122 choices = (
123 [(None, 'No limit')] +
124 [(x, x) for x in reversed(range(min_version, max_version + 1))]
125 )
126 super().__init__('firefoxVersion', choices, **kwargs)
127
128 def validate(self, value):
129 minimum, maximum = value.split(',')
130 self.fields[0].validate(minimum)
131 self.fields[1].validate(maximum)
132
133 if minimum and maximum and minimum > maximum:
134 raise ValidationError('Minimum value must be lower or equal to maximum value.')
135 return value
136
137
138 class JEXLAddonField(MultiValueField):
139 def __init__(self, **kwargs):
140 choices = (
141 (None, "I don't care"),
142 ('not_installed', 'Not Installed'),
143 ('installed', 'Installed'),
144 )
145 fields = (
146 ChoiceField(choices=choices),
147 ModelChoiceField(queryset=Addon.objects.all(), required=False),
148 )
149 super().__init__(fields, **kwargs)
150 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
151
152 def compress(self, data_list):
153 if data_list:
154 return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))
155 return ''
156
157 def to_jexl(self, value):
158 check, addon_id = value.split(',')
159 if not check or not addon_id:
160 return ''
161
162 addon = Addon.objects.get(id=addon_id)
163 if check == 'not_installed':
164 jexl = '("{}" in addonsInfo.addons|keys) == false'.format(addon.guid)
165 elif check == 'installed':
166 jexl = '("{}" in addonsInfo.addons|keys) == true'.format(addon.guid)
167
168 return jexl
169
170 def validate(self, value):
171 check, addon_id = value.split(',')
172
173 self.fields[0].validate(check)
174 self.fields[1].validate(addon_id)
175
176 if check and not addon_id:
177 raise ValidationError('You must select an add-on')
178
179 if not check and addon_id:
180 raise ValidationError('You must select a check')
181 return value
182
183
184 class JEXLFirefoxServicesField(MultiValueField):
185 def __init__(self, **kwargs):
186 check_choices = (
187 (None, "I don't care"),
188 ('no_account', "User hasn't signed up for"),
189 ('has_account', 'User has signed up for'),
190 )
191 # Verify IDs using
192 # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .
193 # Incomplete list of IDs
194 # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa
195 service_choices = (
196 (None, '---------'),
197 ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),
198 ('802d56ef2a9af9fa', 'Firefox Monitor'),
199 ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),
200 ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),
201 ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),
202 ('7ad9917f6c55fb77', 'Firefox Reality'),
203 ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),
204
205 )
206 fields = (
207 ChoiceField(choices=check_choices),
208 ChoiceField(choices=service_choices),
209 )
210 super().__init__(fields, **kwargs)
211 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
212
213 def compress(self, data_list):
214 if data_list:
215 return f'{data_list[0]},{data_list[1]}'
216 return ''
217
218 def to_jexl(self, value):
219 check, ids = value.split(',')
220 ids = ids.split('|') if ids else ''
221
222 if not check or not ids:
223 return ''
224
225 jexl = '('
226 for id in ids:
227 jexl += f'("{id}" in attachedFxAOAuthClients|mapToProperty("id")) || '
228 jexl = jexl[:-4]
229
230 if check == 'no_account':
231 jexl += ') == false'
232 elif check == 'has_account':
233 jexl += ') == true'
234
235 return jexl
236
237 def validate(self, value):
238 check, service_name = value.split(',')
239
240 self.fields[0].validate(check)
241 self.fields[1].validate(service_name)
242
243 if check and not service_name:
244 raise ValidationError('You must select an Service.')
245
246 if not check and service_name:
247 raise ValidationError('You must select a check.')
248 return value
249
[end of snippets/base/admin/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py
--- a/snippets/base/admin/fields.py
+++ b/snippets/base/admin/fields.py
@@ -197,7 +197,8 @@
('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),
('802d56ef2a9af9fa', 'Firefox Monitor'),
('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),
- ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),
+ ('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'),
+ ('e6eb0d1e856335fc', 'Firefox VPN'),
('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),
('7ad9917f6c55fb77', 'Firefox Reality'),
('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),
|
{"golden_diff": "diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py\n--- a/snippets/base/admin/fields.py\n+++ b/snippets/base/admin/fields.py\n@@ -197,7 +197,8 @@\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n- ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n+ ('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'),\n+ ('e6eb0d1e856335fc', 'Firefox VPN'),\n ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n", "issue": "Add Firefox VPN target\nIt seems the Firefox VPN uses `e6eb0d1e856335fc`, previously named `Firefox FPN`\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,\n MultiValueField, MultipleChoiceField)\n\nfrom snippets.base.models import Addon, TargetedCountry\n\nfrom .widgets import JEXLMultiWidget\n\n\nclass MultipleChoiceFieldCSV(MultipleChoiceField):\n # To be used with in snippets.base.forms.SnippetAdminForm and in\n # combination with DynamicField. We don't directly save() this field in the\n # database so get_prep_value has not been implemented.\n\n def prepare_value(self, value):\n value = super(MultipleChoiceFieldCSV, self).prepare_value(value)\n if not isinstance(value, list):\n value = value.split(';')\n return value\n\n def clean(self, value):\n value = super(MultipleChoiceFieldCSV, self).clean(value)\n return ';'.join(value)\n\n\nclass JEXLBaseField():\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n return None\n\n\nclass JEXLChoiceField(JEXLBaseField, ChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} == {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n\nclass JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} in {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, str):\n value = value.split(';')\n return super().prepare_value(value)\n\n def clean(self, value):\n value = super().clean(value)\n return ';'.join([str(x.id) for x in value])\n\n\nclass JEXLCountryField(JEXLModelMultipleChoiceField):\n def to_jexl(self, value):\n if value:\n values = TargetedCountry.objects.filter(id__in=value.split(\";\"))\n return f'region in {[x.code for x in values]}'\n return None\n\n\nclass JEXLRangeField(JEXLBaseField, MultiValueField):\n def __init__(self, attr_name, choices, **kwargs):\n self.attr_name = attr_name\n self.jexl = {\n 'minimum': '{value} <= {attr_name}',\n 'maximum': '{attr_name} < {value}'\n }\n self.jexl = kwargs.pop('jexl', self.jexl)\n fields = (\n ChoiceField(choices=choices),\n ChoiceField(choices=choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],\n template_name='widgets/jexlrange.html')\n\n def compress(self, data_list):\n return ','.join(data_list)\n\n def to_jexl(self, value):\n final_jexl = []\n if value:\n minimum, maximum = value.split(',')\n if minimum:\n final_jexl.append(\n self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)\n )\n if maximum:\n final_jexl.append(\n self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)\n )\n return ' && '.join(final_jexl)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and int(minimum) > int(maximum):\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLFirefoxRangeField(JEXLRangeField):\n def __init__(self, **kwargs):\n # Include only versions greater than 63, where ASRSnippets exist.\n min_version = 64\n # Need to be able to dynamically change this, probably using\n # product_details. Issue #855\n max_version = 84\n\n choices = (\n [(None, 'No limit')] +\n [(x, x) for x in reversed(range(min_version, max_version + 1))]\n )\n super().__init__('firefoxVersion', choices, **kwargs)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and minimum > maximum:\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLAddonField(MultiValueField):\n def __init__(self, **kwargs):\n choices = (\n (None, \"I don't care\"),\n ('not_installed', 'Not Installed'),\n ('installed', 'Installed'),\n )\n fields = (\n ChoiceField(choices=choices),\n ModelChoiceField(queryset=Addon.objects.all(), required=False),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))\n return ''\n\n def to_jexl(self, value):\n check, addon_id = value.split(',')\n if not check or not addon_id:\n return ''\n\n addon = Addon.objects.get(id=addon_id)\n if check == 'not_installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == false'.format(addon.guid)\n elif check == 'installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == true'.format(addon.guid)\n\n return jexl\n\n def validate(self, value):\n check, addon_id = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(addon_id)\n\n if check and not addon_id:\n raise ValidationError('You must select an add-on')\n\n if not check and addon_id:\n raise ValidationError('You must select a check')\n return value\n\n\nclass JEXLFirefoxServicesField(MultiValueField):\n def __init__(self, **kwargs):\n check_choices = (\n (None, \"I don't care\"),\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n # Verify IDs using\n # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .\n # Incomplete list of IDs\n # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa\n service_choices = (\n (None, '---------'),\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n\n )\n fields = (\n ChoiceField(choices=check_choices),\n ChoiceField(choices=service_choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return f'{data_list[0]},{data_list[1]}'\n return ''\n\n def to_jexl(self, value):\n check, ids = value.split(',')\n ids = ids.split('|') if ids else ''\n\n if not check or not ids:\n return ''\n\n jexl = '('\n for id in ids:\n jexl += f'(\"{id}\" in attachedFxAOAuthClients|mapToProperty(\"id\")) || '\n jexl = jexl[:-4]\n\n if check == 'no_account':\n jexl += ') == false'\n elif check == 'has_account':\n jexl += ') == true'\n\n return jexl\n\n def validate(self, value):\n check, service_name = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(service_name)\n\n if check and not service_name:\n raise ValidationError('You must select an Service.')\n\n if not check and service_name:\n raise ValidationError('You must select a check.')\n return value\n", "path": "snippets/base/admin/fields.py"}]}
| 3,337 | 351 |
gh_patches_debug_20500
|
rasdani/github-patches
|
git_diff
|
AlexsLemonade__refinebio-3299
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache Docker Images by Branch
### Context
We want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.
The current prepare_images.sh does this but there was an issue with the definition for branch_name.
We also don't want to remove support non-ccdl members developing locally.

### Solution or next step
- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.
- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.
Determine:
- If they don't have access to the docker repo should we just build locally and not push?
- How long can docker tags be / are they compatible with our longer branch names.
</issue>
<code>
[start of common/setup.py]
1 import os
2
3 from setuptools import find_packages, setup
4
5 # allow setup.py to be run from any path
6 os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
7
8 VERSION_FILE = "version"
9 try:
10 with open(VERSION_FILE, "rt") as version_file:
11 version_string = version_file.read().strip().split("-")[0]
12 except OSError:
13 print(
14 "Cannot read version to determine System Version."
15 " Please create a file common/version containing an up to date System Version."
16 )
17 raise
18
19 setup(
20 name="data-refinery-common",
21 version=version_string,
22 packages=find_packages(),
23 include_package_data=True,
24 # These values are based on what is in common/requirements.txt.
25 install_requires=[
26 "boto3>=1.9.16",
27 "coverage>=4.5.1",
28 "daiquiri>=1.5.0",
29 "django>=3.2,<4",
30 "raven>=6.9.0",
31 "requests>=2.10.1",
32 "retrying>=1.3.3",
33 "psycopg2-binary>=2.7.5",
34 ],
35 license="BSD License",
36 description="Common functionality to be shared between Data Refinery sub-projects.",
37 url="https://www.greenelab.com",
38 author="Kurt Wheeler",
39 author_email="team@greenelab.com",
40 classifiers=[
41 "Environment :: Web Environment",
42 "Framework :: Django",
43 "Intended Audience :: Developers",
44 "License :: OSI Approved :: BSD License",
45 "Operating System :: Ubuntu",
46 "Programming Language :: Python",
47 "Programming Language :: Python :: 3.5",
48 "Programming Language :: Python :: 3.6",
49 "Topic :: Internet :: WWW/HTTP",
50 ],
51 )
52
[end of common/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/common/setup.py b/common/setup.py
--- a/common/setup.py
+++ b/common/setup.py
@@ -1,4 +1,6 @@
import os
+import re
+from datetime import datetime
from setuptools import find_packages, setup
@@ -11,11 +13,21 @@
version_string = version_file.read().strip().split("-")[0]
except OSError:
print(
- "Cannot read version to determine System Version."
- " Please create a file common/version containing an up to date System Version."
+ "Cannot read version file to determine system version. "
+ "Please create a file common/version containing an up to date system version."
)
raise
+version_re = re.compile(
+ r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)"
+ "(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))"
+ "?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$"
+)
+if not version_re.match(version_string):
+ # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.
+ now = datetime.now()
+ version_string = f"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}"
+
setup(
name="data-refinery-common",
version=version_string,
|
{"golden_diff": "diff --git a/common/setup.py b/common/setup.py\n--- a/common/setup.py\n+++ b/common/setup.py\n@@ -1,4 +1,6 @@\n import os\n+import re\n+from datetime import datetime\n \n from setuptools import find_packages, setup\n \n@@ -11,11 +13,21 @@\n version_string = version_file.read().strip().split(\"-\")[0]\n except OSError:\n print(\n- \"Cannot read version to determine System Version.\"\n- \" Please create a file common/version containing an up to date System Version.\"\n+ \"Cannot read version file to determine system version. \"\n+ \"Please create a file common/version containing an up to date system version.\"\n )\n raise\n \n+version_re = re.compile(\n+ r\"^([1-9][0-9]*!)?(0|[1-9][0-9]*)\"\n+ \"(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))\"\n+ \"?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$\"\n+)\n+if not version_re.match(version_string):\n+ # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.\n+ now = datetime.now()\n+ version_string = f\"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}\"\n+\n setup(\n name=\"data-refinery-common\",\n version=version_string,\n", "issue": "Cache Docker Images by Branch\n### Context\r\n\r\nWe want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.\r\nThe current prepare_images.sh does this but there was an issue with the definition for branch_name.\r\nWe also don't want to remove support non-ccdl members developing locally.\r\n\r\n\r\n\r\n\r\n\r\n### Solution or next step\r\n\r\n- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.\r\n- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.\r\n\r\nDetermine:\r\n- If they don't have access to the docker repo should we just build locally and not push?\r\n- How long can docker tags be / are they compatible with our longer branch names.\r\n\n", "before_files": [{"content": "import os\n\nfrom setuptools import find_packages, setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nVERSION_FILE = \"version\"\ntry:\n with open(VERSION_FILE, \"rt\") as version_file:\n version_string = version_file.read().strip().split(\"-\")[0]\nexcept OSError:\n print(\n \"Cannot read version to determine System Version.\"\n \" Please create a file common/version containing an up to date System Version.\"\n )\n raise\n\nsetup(\n name=\"data-refinery-common\",\n version=version_string,\n packages=find_packages(),\n include_package_data=True,\n # These values are based on what is in common/requirements.txt.\n install_requires=[\n \"boto3>=1.9.16\",\n \"coverage>=4.5.1\",\n \"daiquiri>=1.5.0\",\n \"django>=3.2,<4\",\n \"raven>=6.9.0\",\n \"requests>=2.10.1\",\n \"retrying>=1.3.3\",\n \"psycopg2-binary>=2.7.5\",\n ],\n license=\"BSD License\",\n description=\"Common functionality to be shared between Data Refinery sub-projects.\",\n url=\"https://www.greenelab.com\",\n author=\"Kurt Wheeler\",\n author_email=\"team@greenelab.com\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Ubuntu\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "common/setup.py"}]}
| 1,246 | 353 |
gh_patches_debug_25112
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-668
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
capture_backtrace raises AttributeError on PEP-420 namespace packages
The new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package.
This is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32:
```python
module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
```
If `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise
```
AttributeError: 'NoneType' object has no attribute 'rsplit'
```
### Steps to reproduce
Create a namespace package, with some modules inside, e.g.:
```
namespace/
foo/
__init__.py
bar/
__init__.py
```
Then on an interactive Python shell:
```
>>> from scout_apm.core.backtrace import module_filepath
>>> from namespace import foo
>>> module_filepath("namespace.foo", "namespace")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py", line 32, in module_filepath
module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
AttributeError: 'NoneType' object has no attribute 'rsplit'
```
### Details
- Tested with version 2.21.0
- Current workaround is to pin version to 2.20.0
[PEP-420]: https://www.python.org/dev/peps/pep-0420/
</issue>
<code>
[start of src/scout_apm/core/backtrace.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import itertools
5 import os
6 import sys
7 import sysconfig
8 import traceback
9 import warnings
10
11 # Maximum non-Scout frames to target retrieving
12 LIMIT = 50
13 # How many upper frames from inside Scout to ignore
14 IGNORED = 1
15
16
17 def filter_frames(frames):
18 """Filter the stack trace frames down to non-library code."""
19 paths = sysconfig.get_paths()
20 library_paths = {paths["purelib"], paths["platlib"]}
21 for frame in frames:
22 if not any(frame["file"].startswith(exclusion) for exclusion in library_paths):
23 yield frame
24
25
26 def module_filepath(module, filepath):
27 """Get the filepath relative to the base module."""
28 root_module = module.split(".", 1)[0]
29 if root_module == module:
30 return os.path.basename(filepath)
31
32 module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
33 return filepath.split(module_dir, 1)[-1].lstrip(os.sep)
34
35
36 def filepath(frame):
37 """Get the filepath for frame."""
38 module = frame.f_globals.get("__name__", None)
39 filepath = frame.f_code.co_filename
40
41 if filepath.endswith(".pyc"):
42 filepath = filepath[:-1]
43
44 if not module:
45 return filepath
46 return module_filepath(module, filepath)
47
48
49 if sys.version_info >= (3, 5):
50
51 def stacktrace_walker(tb):
52 """Iterate over each frame of the stack downards for exceptions."""
53 for frame, lineno in traceback.walk_tb(tb):
54 name = frame.f_code.co_name
55 yield {"file": filepath(frame), "line": lineno, "function": name}
56
57 def backtrace_walker():
58 """Iterate over each frame of the stack upwards.
59
60 Taken from python3/traceback.ExtractSummary.extract to support
61 iterating over the entire stack, but without creating a large
62 data structure.
63 """
64 start_frame = sys._getframe().f_back
65 for frame, lineno in traceback.walk_stack(start_frame):
66 name = frame.f_code.co_name
67 yield {"file": filepath(frame), "line": lineno, "function": name}
68
69
70 else:
71
72 def stacktrace_walker(tb):
73 """Iterate over each frame of the stack downards for exceptions."""
74 while tb is not None:
75 lineno = tb.tb_lineno
76 name = tb.tb_frame.f_code.co_name
77 yield {
78 "file": filepath(tb.tb_frame),
79 "line": lineno,
80 "function": name,
81 }
82 tb = tb.tb_next
83
84 def backtrace_walker():
85 """Iterate over each frame of the stack upwards.
86
87 Taken from python2.7/traceback.extract_stack to support iterating
88 over the entire stack, but without creating a large data structure.
89 """
90 try:
91 raise ZeroDivisionError
92 except ZeroDivisionError:
93 # Get the current frame
94 frame = sys.exc_info()[2].tb_frame.f_back
95
96 while frame is not None:
97 lineno = frame.f_lineno
98 name = frame.f_code.co_name
99 yield {"file": filepath(frame), "line": lineno, "function": name}
100 frame = frame.f_back
101
102
103 def capture_backtrace():
104 walker = filter_frames(backtrace_walker())
105 return list(itertools.islice(walker, LIMIT))
106
107
108 def capture_stacktrace(tb):
109 walker = stacktrace_walker(tb)
110 return list(reversed(list(itertools.islice(walker, LIMIT))))
111
112
113 def capture():
114 warnings.warn(
115 "capture is deprecated, instead use capture_backtrace instead.",
116 DeprecationWarning,
117 2,
118 )
119 return capture_backtrace()
120
[end of src/scout_apm/core/backtrace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py
--- a/src/scout_apm/core/backtrace.py
+++ b/src/scout_apm/core/backtrace.py
@@ -7,6 +7,9 @@
import sysconfig
import traceback
import warnings
+from logging import getLogger
+
+logger = getLogger(__name__)
# Maximum non-Scout frames to target retrieving
LIMIT = 50
@@ -25,11 +28,25 @@
def module_filepath(module, filepath):
"""Get the filepath relative to the base module."""
- root_module = module.split(".", 1)[0]
- if root_module == module:
+ root_module_name = module.split(".", 1)[0]
+ if root_module_name == module:
return os.path.basename(filepath)
- module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
+ root_module = sys.modules[root_module_name]
+ if root_module.__file__:
+ module_dir = root_module.__file__.rsplit(os.sep, 2)[0]
+ elif root_module.__path__:
+ # Default to using the first path specified for the module.
+ module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]
+ if len(root_module.__path__) > 1:
+ logger.debug(
+ "{} has {} paths. Use the first and ignore the rest.".format(
+ root_module, len(root_module.__path__)
+ )
+ )
+ else:
+ # If the file path don't exist, then return the full path.
+ return filepath
return filepath.split(module_dir, 1)[-1].lstrip(os.sep)
|
{"golden_diff": "diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py\n--- a/src/scout_apm/core/backtrace.py\n+++ b/src/scout_apm/core/backtrace.py\n@@ -7,6 +7,9 @@\n import sysconfig\n import traceback\n import warnings\n+from logging import getLogger\n+\n+logger = getLogger(__name__)\n \n # Maximum non-Scout frames to target retrieving\n LIMIT = 50\n@@ -25,11 +28,25 @@\n \n def module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n- root_module = module.split(\".\", 1)[0]\n- if root_module == module:\n+ root_module_name = module.split(\".\", 1)[0]\n+ if root_module_name == module:\n return os.path.basename(filepath)\n \n- module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n+ root_module = sys.modules[root_module_name]\n+ if root_module.__file__:\n+ module_dir = root_module.__file__.rsplit(os.sep, 2)[0]\n+ elif root_module.__path__:\n+ # Default to using the first path specified for the module.\n+ module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]\n+ if len(root_module.__path__) > 1:\n+ logger.debug(\n+ \"{} has {} paths. Use the first and ignore the rest.\".format(\n+ root_module, len(root_module.__path__)\n+ )\n+ )\n+ else:\n+ # If the file path don't exist, then return the full path.\n+ return filepath\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n", "issue": "capture_backtrace raises AttributeError on PEP-420 namespace packages\nThe new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package.\r\n\r\nThis is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32:\r\n\r\n```python\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\n```\r\n\r\nIf `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nCreate a namespace package, with some modules inside, e.g.:\r\n```\r\nnamespace/\r\n foo/\r\n __init__.py\r\n bar/\r\n __init__.py\r\n```\r\n\r\nThen on an interactive Python shell:\r\n\r\n```\r\n>>> from scout_apm.core.backtrace import module_filepath\r\n>>> from namespace import foo\r\n>>> module_filepath(\"namespace.foo\", \"namespace\")\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py\", line 32, in module_filepath\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Details\r\n\r\n- Tested with version 2.21.0\r\n- Current workaround is to pin version to 2.20.0\r\n\r\n[PEP-420]: https://www.python.org/dev/peps/pep-0420/\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport os\nimport sys\nimport sysconfig\nimport traceback\nimport warnings\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\ndef module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n root_module = module.split(\".\", 1)[0]\n if root_module == module:\n return os.path.basename(filepath)\n\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n\n\ndef filepath(frame):\n \"\"\"Get the filepath for frame.\"\"\"\n module = frame.f_globals.get(\"__name__\", None)\n filepath = frame.f_code.co_filename\n\n if filepath.endswith(\".pyc\"):\n filepath = filepath[:-1]\n\n if not module:\n return filepath\n return module_filepath(module, filepath)\n\n\nif sys.version_info >= (3, 5):\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n for frame, lineno in traceback.walk_tb(tb):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n start_frame = sys._getframe().f_back\n for frame, lineno in traceback.walk_stack(start_frame):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n\nelse:\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n while tb is not None:\n lineno = tb.tb_lineno\n name = tb.tb_frame.f_code.co_name\n yield {\n \"file\": filepath(tb.tb_frame),\n \"line\": lineno,\n \"function\": name,\n }\n tb = tb.tb_next\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n frame = sys.exc_info()[2].tb_frame.f_back\n\n while frame is not None:\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n frame = frame.f_back\n\n\ndef capture_backtrace():\n walker = filter_frames(backtrace_walker())\n return list(itertools.islice(walker, LIMIT))\n\n\ndef capture_stacktrace(tb):\n walker = stacktrace_walker(tb)\n return list(reversed(list(itertools.islice(walker, LIMIT))))\n\n\ndef capture():\n warnings.warn(\n \"capture is deprecated, instead use capture_backtrace instead.\",\n DeprecationWarning,\n 2,\n )\n return capture_backtrace()\n", "path": "src/scout_apm/core/backtrace.py"}]}
| 2,028 | 388 |
gh_patches_debug_14653
|
rasdani/github-patches
|
git_diff
|
conda__conda-4327
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Channels in centrally installed .condarc file are being ignored in conda 4.3.4
Hi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\Program Files\Anaconda3`. I have a condarc file under `C:\Program Files\Anaconda3\.condarc`.
When I run `conda info` it tells me that my config file is under the correct location.
config file : C:\Program Files\Anaconda3\.condarc
I have configured a few custom channels in this `.condarc` file, e.g.:
channels:
- http://some.internal/url
I can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value.
But when I run `conda config --system --show`, the list of channels is always set to:
channels:
- defaults
It seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success.
Using conda 4.3.4 on win-64.
</issue>
<code>
[start of conda/__init__.py]
1 # (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 """OS-agnostic, system-level binary package manager."""
7 from __future__ import absolute_import, division, print_function, unicode_literals
8
9 from os.path import dirname
10
11 from ._vendor.auxlib.packaging import get_version
12 from .common.compat import iteritems, text_type
13
14 __all__ = [
15 "__name__", "__version__", "__author__",
16 "__email__", "__license__", "__copyright__",
17 "__summary__", "__url__",
18 ]
19
20 __name__ = "conda"
21 __version__ = get_version(__file__)
22 __author__ = "Continuum Analytics, Inc."
23 __email__ = "conda@continuum.io"
24 __license__ = "BSD"
25 __summary__ = __doc__
26 __url__ = "https://github.com/conda/conda"
27
28 CONDA_PACKAGE_ROOT = dirname(__file__)
29
30
31 class CondaError(Exception):
32 def __init__(self, message, **kwargs):
33 self.message = message
34 self._kwargs = kwargs
35 super(CondaError, self).__init__(message)
36
37 def __repr__(self):
38 return '%s: %s\n' % (self.__class__.__name__, text_type(self))
39
40 def __str__(self):
41 return text_type(self.message % self._kwargs)
42
43 def dump_map(self):
44 result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))
45 result.update(exception_type=text_type(type(self)),
46 exception_name=self.__class__.__name__,
47 message=text_type(self),
48 error=repr(self),
49 **self._kwargs)
50 return result
51
52
53 class CondaMultiError(CondaError):
54
55 def __init__(self, errors):
56 self.errors = errors
57 super(CondaError, self).__init__(None)
58
59 def __repr__(self):
60 return '\n'.join(repr(e) for e in self.errors) + '\n'
61
62 def __str__(self):
63 return '\n'.join(text_type(e) for e in self.errors) + '\n'
64
65 def dump_map(self):
66 return dict(exception_type=text_type(type(self)),
67 exception_name=self.__class__.__name__,
68 errors=tuple(error.dump_map() for error in self.errors),
69 error="Multiple Errors Encountered.",
70 )
71
72
73 class CondaExitZero(CondaError):
74 pass
75
[end of conda/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/__init__.py b/conda/__init__.py
--- a/conda/__init__.py
+++ b/conda/__init__.py
@@ -6,7 +6,9 @@
"""OS-agnostic, system-level binary package manager."""
from __future__ import absolute_import, division, print_function, unicode_literals
+import os
from os.path import dirname
+import sys
from ._vendor.auxlib.packaging import get_version
from .common.compat import iteritems, text_type
@@ -25,6 +27,10 @@
__summary__ = __doc__
__url__ = "https://github.com/conda/conda"
+
+if os.getenv('CONDA_ROOT') is None:
+ os.environ['CONDA_ROOT'] = sys.prefix
+
CONDA_PACKAGE_ROOT = dirname(__file__)
|
{"golden_diff": "diff --git a/conda/__init__.py b/conda/__init__.py\n--- a/conda/__init__.py\n+++ b/conda/__init__.py\n@@ -6,7 +6,9 @@\n \"\"\"OS-agnostic, system-level binary package manager.\"\"\"\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import os\n from os.path import dirname\n+import sys\n \n from ._vendor.auxlib.packaging import get_version\n from .common.compat import iteritems, text_type\n@@ -25,6 +27,10 @@\n __summary__ = __doc__\n __url__ = \"https://github.com/conda/conda\"\n \n+\n+if os.getenv('CONDA_ROOT') is None:\n+ os.environ['CONDA_ROOT'] = sys.prefix\n+\n CONDA_PACKAGE_ROOT = dirname(__file__)\n", "issue": "Channels in centrally installed .condarc file are being ignored in conda 4.3.4\nHi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\\Program Files\\Anaconda3`. I have a condarc file under `C:\\Program Files\\Anaconda3\\.condarc`.\r\n\r\nWhen I run `conda info` it tells me that my config file is under the correct location.\r\n\r\n config file : C:\\Program Files\\Anaconda3\\.condarc\r\n\r\nI have configured a few custom channels in this `.condarc` file, e.g.:\r\n\r\n channels:\r\n - http://some.internal/url\r\n\r\nI can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value.\r\n\r\nBut when I run `conda config --system --show`, the list of channels is always set to:\r\n\r\n channels:\r\n - defaults\r\n\r\nIt seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success.\r\n\r\nUsing conda 4.3.4 on win-64.\r\n\n", "before_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import dirname\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n \"__summary__\", \"__url__\",\n]\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"conda@continuum.io\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, **kwargs):\n self.message = message\n self._kwargs = kwargs\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n", "path": "conda/__init__.py"}]}
| 1,514 | 181 |
gh_patches_debug_19684
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-2985
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The parameter for --administration-members is incorrectly stated as optional
For the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional.
If you leave this parameter out, it will give this error:
**BadRequestError: At least one capacity administrator is required**
---
#### Document Details
⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.*
* ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879
* Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd
* Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest)
* Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml)
* GitHub Login: @rloutlaw
* Microsoft Alias: **routlaw**
</issue>
<code>
[start of src/powerbidedicated/azext_powerbidedicated/_params.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5 # pylint: disable=line-too-long
6 # pylint: disable=too-many-lines
7 # pylint: disable=too-many-statements
8
9 from knack.arguments import CLIArgumentType
10
11 from azure.cli.core.commands.parameters import (
12 tags_type,
13 get_enum_type,
14 resource_group_name_type,
15 get_location_type
16 )
17
18
19 def load_arguments(self, _):
20 name_type = CLIArgumentType(
21 options_list=['--name', '-n'],
22 help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')
23 sku_name_type = CLIArgumentType(
24 arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),
25 help='Name of the SKU level. For more information, please refer to '
26 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'
27 )
28 sku_tier_type = CLIArgumentType(
29 arg_type=get_enum_type(['PBIE_Azure']),
30 help='The name of the Azure pricing tier to which the SKU applies.'
31 )
32 administration_type = CLIArgumentType(
33 help='An array of administrator user identities.', nargs='+'
34 )
35
36 with self.argument_context('powerbi embedded-capacity') as c:
37 c.argument('resource_group_name', resource_group_name_type)
38 c.argument('name', name_type)
39
40 with self.argument_context('powerbi embedded-capacity create') as c:
41 c.argument('sku_name', sku_name_type)
42 c.argument('sku_tier', sku_tier_type)
43 c.argument('tags', tags_type)
44 c.argument('administration_members', administration_type)
45 c.argument('location', get_location_type(self.cli_ctx))
46
47 with self.argument_context('powerbi embedded-capacity update') as c:
48 c.argument('sku_name', sku_name_type)
49 c.argument('sku_tier', sku_tier_type)
50 c.argument('tags', tags_type)
51 c.argument('administration_members', administration_type)
52
[end of src/powerbidedicated/azext_powerbidedicated/_params.py]
[start of src/powerbidedicated/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.1.1'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 2',
29 'Programming Language :: Python :: 2.7',
30 'Programming Language :: Python :: 3',
31 'Programming Language :: Python :: 3.4',
32 'Programming Language :: Python :: 3.5',
33 'Programming Language :: Python :: 3.6',
34 'License :: OSI Approved :: MIT License',
35 ]
36
37 # TODO: Add any additional SDK dependencies here
38 DEPENDENCIES = []
39
40 with open('README.md', 'r', encoding='utf-8') as f:
41 README = f.read()
42 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
43 HISTORY = f.read()
44
45 setup(
46 name='powerbidedicated',
47 version=VERSION,
48 description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',
49 # TODO: Update author and email, if applicable
50 author='Microsoft Corporation',
51 author_email='azpycli@microsoft.com',
52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',
53 long_description=README + '\n\n' + HISTORY,
54 license='MIT',
55 classifiers=CLASSIFIERS,
56 packages=find_packages(),
57 install_requires=DEPENDENCIES,
58 package_data={'azext_powerbidedicated': ['azext_metadata.json']},
59 )
60
[end of src/powerbidedicated/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py
--- a/src/powerbidedicated/azext_powerbidedicated/_params.py
+++ b/src/powerbidedicated/azext_powerbidedicated/_params.py
@@ -41,7 +41,7 @@
c.argument('sku_name', sku_name_type)
c.argument('sku_tier', sku_tier_type)
c.argument('tags', tags_type)
- c.argument('administration_members', administration_type)
+ c.argument('administration_members', administration_type, required=True)
c.argument('location', get_location_type(self.cli_ctx))
with self.argument_context('powerbi embedded-capacity update') as c:
diff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py
--- a/src/powerbidedicated/setup.py
+++ b/src/powerbidedicated/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.1'
+VERSION = '0.2.0'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
{"golden_diff": "diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py\n--- a/src/powerbidedicated/azext_powerbidedicated/_params.py\n+++ b/src/powerbidedicated/azext_powerbidedicated/_params.py\n@@ -41,7 +41,7 @@\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n- c.argument('administration_members', administration_type)\n+ c.argument('administration_members', administration_type, required=True)\n c.argument('location', get_location_type(self.cli_ctx))\n \n with self.argument_context('powerbi embedded-capacity update') as c:\ndiff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py\n--- a/src/powerbidedicated/setup.py\n+++ b/src/powerbidedicated/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.1'\n+VERSION = '0.2.0'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "The parameter for --administration-members is incorrectly stated as optional \nFor the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional.\r\nIf you leave this parameter out, it will give this error:\r\n**BadRequestError: At least one capacity administrator is required**\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879\r\n* Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd\r\n* Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n\nfrom knack.arguments import CLIArgumentType\n\nfrom azure.cli.core.commands.parameters import (\n tags_type,\n get_enum_type,\n resource_group_name_type,\n get_location_type\n)\n\n\ndef load_arguments(self, _):\n name_type = CLIArgumentType(\n options_list=['--name', '-n'],\n help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')\n sku_name_type = CLIArgumentType(\n arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),\n help='Name of the SKU level. For more information, please refer to '\n 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'\n )\n sku_tier_type = CLIArgumentType(\n arg_type=get_enum_type(['PBIE_Azure']),\n help='The name of the Azure pricing tier to which the SKU applies.'\n )\n administration_type = CLIArgumentType(\n help='An array of administrator user identities.', nargs='+'\n )\n\n with self.argument_context('powerbi embedded-capacity') as c:\n c.argument('resource_group_name', resource_group_name_type)\n c.argument('name', name_type)\n\n with self.argument_context('powerbi embedded-capacity create') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n c.argument('location', get_location_type(self.cli_ctx))\n\n with self.argument_context('powerbi embedded-capacity update') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n", "path": "src/powerbidedicated/azext_powerbidedicated/_params.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='powerbidedicated',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='azpycli@microsoft.com',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_powerbidedicated': ['azext_metadata.json']},\n)\n", "path": "src/powerbidedicated/setup.py"}]}
| 2,016 | 297 |
gh_patches_debug_10979
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-10074
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOCS] Page wise display of documentation search
**Is your feature request related to a problem?**
Yes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query.
**Describe the solution you'd like**
I would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them.
**Describe alternatives you've considered**
If not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help.
**Additional context**

</issue>
<code>
[start of sphinx/docserver.py]
1 import os
2 import sys
3 import threading
4 import time
5 import webbrowser
6
7 import flask
8 import tornado
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11 from tornado.wsgi import WSGIContainer
12
13 _basedir = os.path.join("..", os.path.dirname(__file__))
14
15 app = flask.Flask(__name__, static_folder="/unused")
16 PORT=5009
17 http_server = HTTPServer(WSGIContainer(app))
18
19 @app.route('/')
20 def welcome():
21 return """
22 <h1>Welcome to the Bokeh documentation server</h1>
23 You probably want to go to <a href="/en/latest/index.html"> Index</a>
24 """
25
26 @app.route('/versions.json')
27 def send_versions():
28 return flask.send_from_directory(
29 os.path.join(_basedir, "sphinx"), "test_versions.json")
30
31 @app.route('/alert.html')
32 def send_alert():
33 return os.environ.get("BOKEH_DOCS_ALERT", "")
34
35 @app.route('/en/latest/<path:filename>')
36 def send_docs(filename):
37 return flask.send_from_directory(
38 os.path.join(_basedir, "sphinx/build/html/"), filename)
39
40 def open_browser():
41 # Child process
42 time.sleep(0.5)
43 webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
44
45 data = {}
46
47 def serve_http():
48 data['ioloop'] = IOLoop()
49 http_server.listen(PORT)
50 IOLoop.current().start()
51
52 def shutdown_server():
53 ioloop = data['ioloop']
54 ioloop.add_callback(ioloop.stop)
55 print("Asked Server to shut down.")
56
57 def ui():
58 try:
59 time.sleep(0.5)
60 input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input]
61 except KeyboardInterrupt:
62 pass
63
64 if __name__ == "__main__":
65
66 if tornado.version_info[0] == 4:
67 print('docserver.py script requires tornado 5 or higher')
68 sys.exit(1)
69
70 print("\nStarting Bokeh plot server on port %d..." % PORT)
71 print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
72
73 t_server = threading.Thread(target=serve_http)
74 t_server.start()
75 t_browser = threading.Thread(target=open_browser)
76 t_browser.start()
77
78 ui()
79
80 shutdown_server()
81 t_server.join()
82 t_browser.join()
83 print("Server shut down.")
84
[end of sphinx/docserver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sphinx/docserver.py b/sphinx/docserver.py
--- a/sphinx/docserver.py
+++ b/sphinx/docserver.py
@@ -1,3 +1,4 @@
+import asyncio
import os
import sys
import threading
@@ -10,6 +11,11 @@
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
+# Needed for Windows + Python 3.8 config
+if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
+
+
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_folder="/unused")
|
{"golden_diff": "diff --git a/sphinx/docserver.py b/sphinx/docserver.py\n--- a/sphinx/docserver.py\n+++ b/sphinx/docserver.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import os\n import sys\n import threading\n@@ -10,6 +11,11 @@\n from tornado.ioloop import IOLoop\n from tornado.wsgi import WSGIContainer\n \n+# Needed for Windows + Python 3.8 config\n+if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):\n+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n+\n+\n _basedir = os.path.join(\"..\", os.path.dirname(__file__))\n \n app = flask.Flask(__name__, static_folder=\"/unused\")\n", "issue": "[DOCS] Page wise display of documentation search \n**Is your feature request related to a problem?**\r\nYes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query. \r\n\r\n**Describe the solution you'd like**\r\nI would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them.\r\n\r\n**Describe alternatives you've considered**\r\nIf not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help.\r\n\r\n**Additional context**\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\n@app.route('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\n@app.route('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\n@app.route('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\n@app.route('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}]}
| 1,529 | 170 |
gh_patches_debug_31758
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not support sslv3 (poodle)
In Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported.
Ping @shin-
</issue>
<code>
[start of docker/tls.py]
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 verify = None
10 ssl_version = None
11
12 def __init__(self, client_cert=None, ca_cert=None, verify=None,
13 ssl_version=None, assert_hostname=None):
14 # Argument compatibility/mapping with
15 # http://docs.docker.com/examples/https/
16 # This diverges from the Docker CLI in that users can specify 'tls'
17 # here, but also disable any public/default CA pool verification by
18 # leaving tls_verify=False
19
20 # urllib3 sets a default ssl_version if ssl_version is None
21 # http://tinyurl.com/kxga8hb
22 self.ssl_version = ssl_version
23 self.assert_hostname = assert_hostname
24
25 # "tls" and "tls_verify" must have both or neither cert/key files
26 # In either case, Alert the user when both are expected, but any are
27 # missing.
28
29 if client_cert:
30 try:
31 tls_cert, tls_key = client_cert
32 except ValueError:
33 raise errors.TLSParameterError(
34 'client_config must be a tuple of'
35 ' (client certificate, key file)'
36 )
37
38 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
39 not os.path.isfile(tls_key)):
40 raise errors.TLSParameterError(
41 'Path to a certificate and key files must be provided'
42 ' through the client_config param'
43 )
44 self.cert = (tls_cert, tls_key)
45
46 # Either set verify to True (public/default CA checks) or to the
47 # path of a CA Cert file.
48 if verify is not None:
49 if not ca_cert:
50 self.verify = verify
51 elif os.path.isfile(ca_cert):
52 if not verify:
53 raise errors.TLSParameterError(
54 'verify can not be False when a CA cert is'
55 ' provided.'
56 )
57 self.verify = ca_cert
58 else:
59 raise errors.TLSParameterError(
60 'Invalid CA certificate provided for `tls_ca_cert`.'
61 )
62
63 def configure_client(self, client):
64 client.ssl_version = self.ssl_version
65 if self.verify is not None:
66 client.verify = self.verify
67 if self.cert:
68 client.cert = self.cert
69 client.mount('https://', ssladapter.SSLAdapter(
70 ssl_version=self.ssl_version,
71 assert_hostname=self.assert_hostname,
72 ))
73
[end of docker/tls.py]
[start of docker/ssladapter/ssladapter.py]
1 """ Resolves OpenSSL issues in some servers:
2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
3 https://github.com/kennethreitz/requests/pull/799
4 """
5 from distutils.version import StrictVersion
6 from requests.adapters import HTTPAdapter
7 try:
8 import requests.packages.urllib3 as urllib3
9 except ImportError:
10 import urllib3
11
12
13 PoolManager = urllib3.poolmanager.PoolManager
14
15
16 class SSLAdapter(HTTPAdapter):
17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
18 def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
19 self.ssl_version = ssl_version
20 self.assert_hostname = assert_hostname
21 super(SSLAdapter, self).__init__(**kwargs)
22
23 def init_poolmanager(self, connections, maxsize, block=False):
24 kwargs = {
25 'num_pools': connections,
26 'maxsize': maxsize,
27 'block': block,
28 'assert_hostname': self.assert_hostname,
29 }
30 if self.can_override_ssl_version():
31 kwargs['ssl_version'] = self.ssl_version
32
33 self.poolmanager = PoolManager(**kwargs)
34
35 def can_override_ssl_version(self):
36 urllib_ver = urllib3.__version__.split('-')[0]
37 if urllib_ver is None:
38 return False
39 if urllib_ver == 'dev':
40 return True
41 return StrictVersion(urllib_ver) > StrictVersion('1.5')
42
[end of docker/ssladapter/ssladapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -4,6 +4,7 @@
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+import ssl
try:
import requests.packages.urllib3 as urllib3
except ImportError:
@@ -13,9 +14,19 @@
PoolManager = urllib3.poolmanager.PoolManager
+def get_max_tls_protocol():
+ protocols = ('PROTOCOL_TLSv1_2',
+ 'PROTOCOL_TLSv1_1',
+ 'PROTOCOL_TLSv1')
+ for proto in protocols:
+ if hasattr(ssl, proto):
+ return proto
+
+
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
+ ssl_version = ssl_version or get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
super(SSLAdapter, self).__init__(**kwargs)
diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -17,8 +17,11 @@
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
- # urllib3 sets a default ssl_version if ssl_version is None
- # http://tinyurl.com/kxga8hb
+ # urllib3 sets a default ssl_version if ssl_version is None,
+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,
+ # so we override the default with the maximum supported in the running
+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)
+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
|
{"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -4,6 +4,7 @@\n \"\"\"\n from distutils.version import StrictVersion\n from requests.adapters import HTTPAdapter\n+import ssl\n try:\n import requests.packages.urllib3 as urllib3\n except ImportError:\n@@ -13,9 +14,19 @@\n PoolManager = urllib3.poolmanager.PoolManager\n \n \n+def get_max_tls_protocol():\n+ protocols = ('PROTOCOL_TLSv1_2',\n+ 'PROTOCOL_TLSv1_1',\n+ 'PROTOCOL_TLSv1')\n+ for proto in protocols:\n+ if hasattr(ssl, proto):\n+ return proto\n+\n+\n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n+ ssl_version = ssl_version or get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -17,8 +17,11 @@\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n \n- # urllib3 sets a default ssl_version if ssl_version is None\n- # http://tinyurl.com/kxga8hb\n+ # urllib3 sets a default ssl_version if ssl_version is None,\n+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,\n+ # so we override the default with the maximum supported in the running\n+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)\n+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n", "issue": "Do not support sslv3 (poodle)\nIn Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported.\nPing @shin- \n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}]}
| 1,655 | 458 |
gh_patches_debug_36699
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-2078
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeErrror: module 'os' has no attribute 'popen2' when using voip module
Hello I have installed the last scapy version, I want to use the module voip but i has the issue
AttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function.
</issue>
<code>
[start of scapy/modules/voip.py]
1 # This file is part of Scapy
2 # See http://www.secdev.org/projects/scapy for more information
3 # Copyright (C) Philippe Biondi <phil@secdev.org>
4 # This program is published under a GPLv2 license
5
6 """
7 VoIP (Voice over IP) related functions
8 """
9
10 from __future__ import absolute_import
11 import os
12 ###################
13 # Listen VoIP #
14 ###################
15
16 from scapy.sendrecv import sniff
17 from scapy.layers.inet import IP, UDP
18 from scapy.layers.rtp import RTP
19 from scapy.consts import WINDOWS
20 from scapy.config import conf
21 from scapy.modules.six.moves import range
22
23
24 sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp"
25
26 if WINDOWS:
27 if conf.prog.sox is None:
28 raise OSError("Sox must be installed to play VoIP packets")
29 sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio"
30
31
32 def _merge_sound_bytes(x, y, sample_size=2):
33 # TODO: find a better way to merge sound bytes
34 # This will only add them one next to each other:
35 # \xff + \xff ==> \xff\xff
36 m = ""
37 ss = sample_size
38 min_ = 0
39 if len(x) >= len(y):
40 min_ = y
41 elif len(x) < len(y):
42 min_ = x
43 r_ = len(min_)
44 for i in range(r_ / ss):
45 m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]
46 return x[r_:], y[r_:], m
47
48
49 def voip_play(s1, lst=None, **kargs):
50 """Play VoIP packets with RAW data that
51 are either sniffed either from an IP, or
52 specified as a list.
53
54 It will play only the incoming packets !
55
56 :param s1: The IP of the src of all VoIP packets.
57 :param lst: (optional) A list of packets to load
58 :type s1: string
59 :type lst: list
60
61 :Example:
62
63 >>> voip_play("64.2.142.189")
64 while calling '411@ideasip.com'
65
66 >>> voip_play("64.2.142.189", lst)
67 with list a list of packets with VoIP data
68 in their RAW layer
69
70 .. seealso:: voip_play2
71 to play both the outcoming and incoming packets
72 at the same time.
73
74 .. seealso:: voip_play3
75 to read RTP VoIP packets
76 """
77
78 dsp, rd = os.popen2(sox_base % "")
79
80 def play(pkt):
81 if not pkt:
82 return
83 if not pkt.haslayer(UDP) or not pkt.haslayer(IP):
84 return
85 ip = pkt.getlayer(IP)
86 if s1 == ip.src:
87 dsp.write(pkt.getlayer(conf.raw_layer).load[12:])
88 try:
89 if lst is None:
90 sniff(store=0, prn=play, **kargs)
91 else:
92 for p in lst:
93 play(p)
94 finally:
95 dsp.close()
96 rd.close()
97
98
99 def voip_play1(s1, lst=None, **kargs):
100 """Same than voip_play, backward compatibility
101 """
102 return voip_play(s1, lst, **kargs)
103
104
105 def voip_play2(s1, **kargs):
106 """
107 Same than voip_play, but will play
108 both incoming and outcoming packets.
109 The sound will surely suffer distortion.
110
111 Only supports sniffing.
112
113 .. seealso:: voip_play
114 to play only incoming packets.
115 """
116 dsp, rd = os.popen2(sox_base % "-c 2")
117 global x1, x2
118 x1 = ""
119 x2 = ""
120
121 def play(pkt):
122 global x1, x2
123 if not pkt:
124 return
125 if not pkt.haslayer(UDP) or not pkt.haslayer(IP):
126 return
127 ip = pkt.getlayer(IP)
128 if s1 in [ip.src, ip.dst]:
129 if ip.dst == s1:
130 x1 += pkt.getlayer(conf.raw_layer).load[12:]
131 else:
132 x2 += pkt.getlayer(conf.raw_layer).load[12:]
133 x1, x2, r = _merge_sound_bytes(x1, x2)
134 dsp.write(r)
135
136 sniff(store=0, prn=play, **kargs)
137
138
139 def voip_play3(lst=None, **kargs):
140 """Same than voip_play, but made to
141 read and play VoIP RTP packets, without
142 checking IP.
143
144 .. seealso:: voip_play
145 for basic VoIP packets
146 """
147 dsp, rd = os.popen2(sox_base % "")
148
149 def play(pkt, dsp=dsp):
150 if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):
151 dsp.write(pkt.getlayer(RTP).load)
152 try:
153 if lst is None:
154 sniff(store=0, prn=play, **kargs)
155 else:
156 for p in lst:
157 play(p)
158 finally:
159 try:
160 dsp.close()
161 rd.close()
162 except Exception:
163 pass
164
[end of scapy/modules/voip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py
--- a/scapy/modules/voip.py
+++ b/scapy/modules/voip.py
@@ -8,7 +8,7 @@
"""
from __future__ import absolute_import
-import os
+import subprocess
###################
# Listen VoIP #
###################
@@ -21,12 +21,12 @@
from scapy.modules.six.moves import range
-sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp"
+sox_base = (["sox", "-t", ".ul"], ["-", "-t", "ossdsp", "/dev/dsp"])
if WINDOWS:
if conf.prog.sox is None:
raise OSError("Sox must be installed to play VoIP packets")
- sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio"
+ sox_base = ([conf.prog.sox, "-t", ".ul"], ["-", "-t", "waveaudio"])
def _merge_sound_bytes(x, y, sample_size=2):
@@ -75,7 +75,9 @@
to read RTP VoIP packets
"""
- dsp, rd = os.popen2(sox_base % "")
+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
def play(pkt):
if not pkt:
@@ -113,7 +115,9 @@
.. seealso:: voip_play
to play only incoming packets.
"""
- dsp, rd = os.popen2(sox_base % "-c 2")
+ proc = subprocess.Popen(sox_base[0] + ["-c", "2"] + sox_base[1],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
global x1, x2
x1 = ""
x2 = ""
@@ -133,7 +137,14 @@
x1, x2, r = _merge_sound_bytes(x1, x2)
dsp.write(r)
- sniff(store=0, prn=play, **kargs)
+ try:
+ sniff(store=0, prn=play, **kargs)
+ finally:
+ try:
+ dsp.close()
+ rd.close()
+ except Exception:
+ pass
def voip_play3(lst=None, **kargs):
@@ -144,7 +155,9 @@
.. seealso:: voip_play
for basic VoIP packets
"""
- dsp, rd = os.popen2(sox_base % "")
+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
def play(pkt, dsp=dsp):
if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):
|
{"golden_diff": "diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py\n--- a/scapy/modules/voip.py\n+++ b/scapy/modules/voip.py\n@@ -8,7 +8,7 @@\n \"\"\"\n \n from __future__ import absolute_import\n-import os\n+import subprocess\n ###################\n # Listen VoIP #\n ###################\n@@ -21,12 +21,12 @@\n from scapy.modules.six.moves import range\n \n \n-sox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n+sox_base = ([\"sox\", \"-t\", \".ul\"], [\"-\", \"-t\", \"ossdsp\", \"/dev/dsp\"])\n \n if WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n- sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n+ sox_base = ([conf.prog.sox, \"-t\", \".ul\"], [\"-\", \"-t\", \"waveaudio\"])\n \n \n def _merge_sound_bytes(x, y, sample_size=2):\n@@ -75,7 +75,9 @@\n to read RTP VoIP packets\n \"\"\"\n \n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt):\n if not pkt:\n@@ -113,7 +115,9 @@\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"-c 2\")\n+ proc = subprocess.Popen(sox_base[0] + [\"-c\", \"2\"] + sox_base[1],\n+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n@@ -133,7 +137,14 @@\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n \n- sniff(store=0, prn=play, **kargs)\n+ try:\n+ sniff(store=0, prn=play, **kargs)\n+ finally:\n+ try:\n+ dsp.close()\n+ rd.close()\n+ except Exception:\n+ pass\n \n \n def voip_play3(lst=None, **kargs):\n@@ -144,7 +155,9 @@\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n", "issue": "AttributeErrror: module 'os' has no attribute 'popen2' when using voip module\nHello I have installed the last scapy version, I want to use the module voip but i has the issue \r\nAttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function. \n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVoIP (Voice over IP) related functions\n\"\"\"\n\nfrom __future__ import absolute_import\nimport os\n###################\n# Listen VoIP #\n###################\n\nfrom scapy.sendrecv import sniff\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.rtp import RTP\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.modules.six.moves import range\n\n\nsox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n\nif WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n\n\ndef _merge_sound_bytes(x, y, sample_size=2):\n # TODO: find a better way to merge sound bytes\n # This will only add them one next to each other:\n # \\xff + \\xff ==> \\xff\\xff\n m = \"\"\n ss = sample_size\n min_ = 0\n if len(x) >= len(y):\n min_ = y\n elif len(x) < len(y):\n min_ = x\n r_ = len(min_)\n for i in range(r_ / ss):\n m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]\n return x[r_:], y[r_:], m\n\n\ndef voip_play(s1, lst=None, **kargs):\n \"\"\"Play VoIP packets with RAW data that\n are either sniffed either from an IP, or\n specified as a list.\n\n It will play only the incoming packets !\n\n :param s1: The IP of the src of all VoIP packets.\n :param lst: (optional) A list of packets to load\n :type s1: string\n :type lst: list\n\n :Example:\n\n >>> voip_play(\"64.2.142.189\")\n while calling '411@ideasip.com'\n\n >>> voip_play(\"64.2.142.189\", lst)\n with list a list of packets with VoIP data\n in their RAW layer\n\n .. seealso:: voip_play2\n to play both the outcoming and incoming packets\n at the same time.\n\n .. seealso:: voip_play3\n to read RTP VoIP packets\n \"\"\"\n\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt):\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 == ip.src:\n dsp.write(pkt.getlayer(conf.raw_layer).load[12:])\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n dsp.close()\n rd.close()\n\n\ndef voip_play1(s1, lst=None, **kargs):\n \"\"\"Same than voip_play, backward compatibility\n \"\"\"\n return voip_play(s1, lst, **kargs)\n\n\ndef voip_play2(s1, **kargs):\n \"\"\"\n Same than voip_play, but will play\n both incoming and outcoming packets.\n The sound will surely suffer distortion.\n\n Only supports sniffing.\n\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"-c 2\")\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n\n def play(pkt):\n global x1, x2\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 in [ip.src, ip.dst]:\n if ip.dst == s1:\n x1 += pkt.getlayer(conf.raw_layer).load[12:]\n else:\n x2 += pkt.getlayer(conf.raw_layer).load[12:]\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n\n sniff(store=0, prn=play, **kargs)\n\n\ndef voip_play3(lst=None, **kargs):\n \"\"\"Same than voip_play, but made to\n read and play VoIP RTP packets, without\n checking IP.\n\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n dsp.write(pkt.getlayer(RTP).load)\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n", "path": "scapy/modules/voip.py"}]}
| 2,193 | 692 |
gh_patches_debug_40329
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1943
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a clear cache command to kinto
For debugging purposes mainly we want to be able to clear the cache on time to time
Add a clear cache command to kinto
For debugging purposes mainly we want to be able to clear the cache on time to time
Add a clear cache command to kinto
For debugging purposes mainly we want to be able to clear the cache on time to time
</issue>
<code>
[start of kinto/core/scripts.py]
1 """
2 kinto.core.scripts: utilities to build admin scripts for kinto-based services
3 """
4 import logging
5
6 from pyramid.settings import asbool
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def migrate(env, dry_run=False):
13 """
14 User-friendly frontend to run database migrations.
15 """
16 registry = env["registry"]
17 settings = registry.settings
18 readonly_backends = ("storage", "permission")
19 readonly_mode = asbool(settings.get("readonly", False))
20
21 for backend in ("cache", "storage", "permission"):
22 if hasattr(registry, backend):
23 if readonly_mode and backend in readonly_backends:
24 message = f"Cannot migrate the {backend} backend while in readonly mode."
25 logger.error(message)
26 else:
27 getattr(registry, backend).initialize_schema(dry_run=dry_run)
28
[end of kinto/core/scripts.py]
[start of kinto/__main__.py]
1 import argparse
2 import os
3 import subprocess
4 import sys
5 import logging
6 import logging.config
7
8 from kinto.core import scripts as core_scripts
9 from kinto import scripts as kinto_scripts
10 from kinto.plugins.accounts import scripts as accounts_scripts
11 from pyramid.scripts import pserve
12 from pyramid.paster import bootstrap
13 from kinto import __version__
14 from kinto.config import init
15
16 DEFAULT_CONFIG_FILE = os.getenv("KINTO_INI", "config/kinto.ini")
17 DEFAULT_PORT = 8888
18 DEFAULT_LOG_LEVEL = logging.INFO
19 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s"
20
21
22 def main(args=None):
23 """The main routine."""
24 if args is None:
25 args = sys.argv[1:]
26
27 parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
28 commands = (
29 "init",
30 "start",
31 "migrate",
32 "delete-collection",
33 "version",
34 "rebuild-quotas",
35 "create-user",
36 )
37 subparsers = parser.add_subparsers(
38 title="subcommands",
39 description="Main Kinto CLI commands",
40 dest="subcommand",
41 help="Choose and run with --help",
42 )
43 subparsers.required = True
44
45 for command in commands:
46 subparser = subparsers.add_parser(command)
47 subparser.set_defaults(which=command)
48
49 subparser.add_argument(
50 "--ini",
51 help="Application configuration file",
52 dest="ini_file",
53 required=False,
54 default=DEFAULT_CONFIG_FILE,
55 )
56
57 subparser.add_argument(
58 "-q",
59 "--quiet",
60 action="store_const",
61 const=logging.CRITICAL,
62 dest="verbosity",
63 help="Show only critical errors.",
64 )
65
66 subparser.add_argument(
67 "-v",
68 "--debug",
69 action="store_const",
70 const=logging.DEBUG,
71 dest="verbosity",
72 help="Show all messages, including debug messages.",
73 )
74
75 if command == "init":
76 subparser.add_argument(
77 "--backend",
78 help="{memory,redis,postgresql}",
79 dest="backend",
80 required=False,
81 default=None,
82 )
83 subparser.add_argument(
84 "--cache-backend",
85 help="{memory,redis,postgresql,memcached}",
86 dest="cache-backend",
87 required=False,
88 default=None,
89 )
90 subparser.add_argument(
91 "--host",
92 help="Host to listen() on.",
93 dest="host",
94 required=False,
95 default="127.0.0.1",
96 )
97 elif command == "migrate":
98 subparser.add_argument(
99 "--dry-run",
100 action="store_true",
101 help="Simulate the migration operations " "and show information",
102 dest="dry_run",
103 required=False,
104 default=False,
105 )
106 elif command == "delete-collection":
107 subparser.add_argument(
108 "--bucket", help="The bucket where the collection " "belongs to.", required=True
109 )
110 subparser.add_argument("--collection", help="The collection to remove.", required=True)
111
112 elif command == "rebuild-quotas":
113 subparser.add_argument(
114 "--dry-run",
115 action="store_true",
116 help="Simulate the rebuild operation " "and show information",
117 dest="dry_run",
118 required=False,
119 default=False,
120 )
121
122 elif command == "start":
123 subparser.add_argument(
124 "--reload",
125 action="store_true",
126 help="Restart when code or config changes",
127 required=False,
128 default=False,
129 )
130 subparser.add_argument(
131 "--port",
132 type=int,
133 help="Listening port number",
134 required=False,
135 default=DEFAULT_PORT,
136 )
137
138 elif command == "create-user":
139 subparser.add_argument(
140 "-u", "--username", help="Superuser username", required=False, default=None
141 )
142 subparser.add_argument(
143 "-p", "--password", help="Superuser password", required=False, default=None
144 )
145
146 # Parse command-line arguments
147 parsed_args = vars(parser.parse_args(args))
148
149 config_file = parsed_args["ini_file"]
150 which_command = parsed_args["which"]
151
152 # Initialize logging from
153 level = parsed_args.get("verbosity") or DEFAULT_LOG_LEVEL
154 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)
155
156 if which_command == "init":
157 if os.path.exists(config_file):
158 print(f"{config_file} already exists.", file=sys.stderr)
159 return 1
160
161 backend = parsed_args["backend"]
162 cache_backend = parsed_args["cache-backend"]
163 if not backend:
164 while True:
165 prompt = (
166 "Select the backend you would like to use: "
167 "(1 - postgresql, 2 - redis, default - memory) "
168 )
169 answer = input(prompt).strip()
170 try:
171 backends = {"1": "postgresql", "2": "redis", "": "memory"}
172 backend = backends[answer]
173 break
174 except KeyError:
175 pass
176
177 if not cache_backend:
178 while True:
179 prompt = (
180 "Select the cache backend you would like to use: "
181 "(1 - postgresql, 2 - redis, 3 - memcached, default - memory) "
182 )
183 answer = input(prompt).strip()
184 try:
185 cache_backends = {
186 "1": "postgresql",
187 "2": "redis",
188 "3": "memcached",
189 "": "memory",
190 }
191 cache_backend = cache_backends[answer]
192 break
193 except KeyError:
194 pass
195
196 init(config_file, backend, cache_backend, parsed_args["host"])
197
198 # Install postgresql libraries if necessary
199 if backend == "postgresql" or cache_backend == "postgresql":
200 try:
201 import psycopg2 # NOQA
202 except ImportError:
203 subprocess.check_call(
204 [sys.executable, "-m", "pip", "install", "kinto[postgresql]"]
205 )
206 elif backend == "redis" or cache_backend == "redis":
207 try:
208 import kinto_redis # NOQA
209 except ImportError:
210 subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[redis]"])
211 elif cache_backend == "memcached":
212 try:
213 import memcache # NOQA
214 except ImportError:
215 subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[memcached]"])
216
217 elif which_command == "migrate":
218 dry_run = parsed_args["dry_run"]
219 env = bootstrap(config_file, options={"command": "migrate"})
220 core_scripts.migrate(env, dry_run=dry_run)
221
222 elif which_command == "delete-collection":
223 env = bootstrap(config_file, options={"command": "delete-collection"})
224 return kinto_scripts.delete_collection(
225 env, parsed_args["bucket"], parsed_args["collection"]
226 )
227
228 elif which_command == "rebuild-quotas":
229 dry_run = parsed_args["dry_run"]
230 env = bootstrap(config_file, options={"command": "rebuild-quotas"})
231 return kinto_scripts.rebuild_quotas(env, dry_run=dry_run)
232
233 elif which_command == "create-user":
234 username = parsed_args["username"]
235 password = parsed_args["password"]
236 env = bootstrap(config_file, options={"command": "create-user"})
237 return accounts_scripts.create_user(env, username=username, password=password)
238
239 elif which_command == "start":
240 pserve_argv = ["pserve"]
241
242 if parsed_args["reload"]:
243 pserve_argv.append("--reload")
244
245 if level == logging.DEBUG:
246 pserve_argv.append("-v")
247
248 if level == logging.CRITICAL:
249 pserve_argv.append("-q")
250
251 pserve_argv.append(config_file)
252 pserve_argv.append(f"http_port={parsed_args['port']}")
253 pserve.main(argv=pserve_argv)
254
255 else:
256 print(__version__)
257
258 return 0
259
[end of kinto/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -24,12 +24,13 @@
if args is None:
args = sys.argv[1:]
- parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
+ parser = argparse.ArgumentParser(description="Kinto Command-Line Interface")
commands = (
"init",
"start",
"migrate",
"delete-collection",
+ "flush-cache",
"version",
"rebuild-quotas",
"create-user",
@@ -94,26 +95,34 @@
required=False,
default="127.0.0.1",
)
+
elif command == "migrate":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the migration operations " "and show information",
+ help="Simulate the migration operations and show information",
dest="dry_run",
required=False,
default=False,
)
+
elif command == "delete-collection":
subparser.add_argument(
- "--bucket", help="The bucket where the collection " "belongs to.", required=True
+ "--bucket",
+ help="The bucket where the collection belongs to.",
+ required=True
+ )
+ subparser.add_argument(
+ "--collection",
+ help="The collection to remove.",
+ required=True
)
- subparser.add_argument("--collection", help="The collection to remove.", required=True)
elif command == "rebuild-quotas":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the rebuild operation " "and show information",
+ help="Simulate the rebuild operation and show information",
dest="dry_run",
required=False,
default=False,
@@ -225,6 +234,10 @@
env, parsed_args["bucket"], parsed_args["collection"]
)
+ elif which_command == "flush-cache":
+ env = bootstrap(config_file, options={"command": "flush-cache"})
+ core_scripts.flush_cache(env)
+
elif which_command == "rebuild-quotas":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "rebuild-quotas"})
diff --git a/kinto/core/scripts.py b/kinto/core/scripts.py
--- a/kinto/core/scripts.py
+++ b/kinto/core/scripts.py
@@ -25,3 +25,10 @@
logger.error(message)
else:
getattr(registry, backend).initialize_schema(dry_run=dry_run)
+
+
+def flush_cache(env):
+ registry = env["registry"]
+ registry.cache.flush()
+ logger.info(f"Cache has been cleared.")
+ return 0
|
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -24,12 +24,13 @@\n if args is None:\n args = sys.argv[1:]\n \n- parser = argparse.ArgumentParser(description=\"Kinto Command-Line \" \"Interface\")\n+ parser = argparse.ArgumentParser(description=\"Kinto Command-Line Interface\")\n commands = (\n \"init\",\n \"start\",\n \"migrate\",\n \"delete-collection\",\n+ \"flush-cache\",\n \"version\",\n \"rebuild-quotas\",\n \"create-user\",\n@@ -94,26 +95,34 @@\n required=False,\n default=\"127.0.0.1\",\n )\n+ \n elif command == \"migrate\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n- help=\"Simulate the migration operations \" \"and show information\",\n+ help=\"Simulate the migration operations and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n+\n elif command == \"delete-collection\":\n subparser.add_argument(\n- \"--bucket\", help=\"The bucket where the collection \" \"belongs to.\", required=True\n+ \"--bucket\",\n+ help=\"The bucket where the collection belongs to.\",\n+ required=True\n+ )\n+ subparser.add_argument(\n+ \"--collection\",\n+ help=\"The collection to remove.\",\n+ required=True\n )\n- subparser.add_argument(\"--collection\", help=\"The collection to remove.\", required=True)\n \n elif command == \"rebuild-quotas\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n- help=\"Simulate the rebuild operation \" \"and show information\",\n+ help=\"Simulate the rebuild operation and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n@@ -225,6 +234,10 @@\n env, parsed_args[\"bucket\"], parsed_args[\"collection\"]\n )\n \n+ elif which_command == \"flush-cache\":\n+ env = bootstrap(config_file, options={\"command\": \"flush-cache\"})\n+ core_scripts.flush_cache(env)\n+\n elif which_command == \"rebuild-quotas\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"rebuild-quotas\"})\ndiff --git a/kinto/core/scripts.py b/kinto/core/scripts.py\n--- a/kinto/core/scripts.py\n+++ b/kinto/core/scripts.py\n@@ -25,3 +25,10 @@\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)\n+\n+\n+def flush_cache(env):\n+ registry = env[\"registry\"]\n+ registry.cache.flush()\n+ logger.info(f\"Cache has been cleared.\")\n+ return 0\n", "issue": "Add a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\nAdd a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\nAdd a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\n", "before_files": [{"content": "\"\"\"\nkinto.core.scripts: utilities to build admin scripts for kinto-based services\n\"\"\"\nimport logging\n\nfrom pyramid.settings import asbool\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate(env, dry_run=False):\n \"\"\"\n User-friendly frontend to run database migrations.\n \"\"\"\n registry = env[\"registry\"]\n settings = registry.settings\n readonly_backends = (\"storage\", \"permission\")\n readonly_mode = asbool(settings.get(\"readonly\", False))\n\n for backend in (\"cache\", \"storage\", \"permission\"):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = f\"Cannot migrate the {backend} backend while in readonly mode.\"\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)\n", "path": "kinto/core/scripts.py"}, {"content": "import argparse\nimport os\nimport subprocess\nimport sys\nimport logging\nimport logging.config\n\nfrom kinto.core import scripts as core_scripts\nfrom kinto import scripts as kinto_scripts\nfrom kinto.plugins.accounts import scripts as accounts_scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = os.getenv(\"KINTO_INI\", \"config/kinto.ini\")\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \" \"Interface\")\n commands = (\n \"init\",\n \"start\",\n \"migrate\",\n \"delete-collection\",\n \"version\",\n \"rebuild-quotas\",\n \"create-user\",\n )\n subparsers = parser.add_subparsers(\n title=\"subcommands\",\n description=\"Main Kinto CLI commands\",\n dest=\"subcommand\",\n help=\"Choose and run with --help\",\n )\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n subparser.add_argument(\n \"--ini\",\n help=\"Application configuration file\",\n dest=\"ini_file\",\n required=False,\n default=DEFAULT_CONFIG_FILE,\n )\n\n subparser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n const=logging.CRITICAL,\n dest=\"verbosity\",\n help=\"Show only critical errors.\",\n )\n\n subparser.add_argument(\n \"-v\",\n \"--debug\",\n action=\"store_const\",\n const=logging.DEBUG,\n dest=\"verbosity\",\n help=\"Show all messages, including debug messages.\",\n )\n\n if command == \"init\":\n subparser.add_argument(\n \"--backend\",\n help=\"{memory,redis,postgresql}\",\n dest=\"backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--cache-backend\",\n help=\"{memory,redis,postgresql,memcached}\",\n dest=\"cache-backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--host\",\n help=\"Host to listen() on.\",\n dest=\"host\",\n required=False,\n default=\"127.0.0.1\",\n )\n elif command == \"migrate\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the migration operations \" \"and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n elif command == \"delete-collection\":\n subparser.add_argument(\n \"--bucket\", help=\"The bucket where the collection \" \"belongs to.\", required=True\n )\n subparser.add_argument(\"--collection\", help=\"The collection to remove.\", required=True)\n\n elif command == \"rebuild-quotas\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the rebuild operation \" \"and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n\n elif command == \"start\":\n subparser.add_argument(\n \"--reload\",\n action=\"store_true\",\n help=\"Restart when code or config changes\",\n required=False,\n default=False,\n )\n subparser.add_argument(\n \"--port\",\n type=int,\n help=\"Listening port number\",\n required=False,\n default=DEFAULT_PORT,\n )\n\n elif command == \"create-user\":\n subparser.add_argument(\n \"-u\", \"--username\", help=\"Superuser username\", required=False, default=None\n )\n subparser.add_argument(\n \"-p\", \"--password\", help=\"Superuser password\", required=False, default=None\n )\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args[\"ini_file\"]\n which_command = parsed_args[\"which\"]\n\n # Initialize logging from\n level = parsed_args.get(\"verbosity\") or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == \"init\":\n if os.path.exists(config_file):\n print(f\"{config_file} already exists.\", file=sys.stderr)\n return 1\n\n backend = parsed_args[\"backend\"]\n cache_backend = parsed_args[\"cache-backend\"]\n if not backend:\n while True:\n prompt = (\n \"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n if not cache_backend:\n while True:\n prompt = (\n \"Select the cache backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, 3 - memcached, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n cache_backends = {\n \"1\": \"postgresql\",\n \"2\": \"redis\",\n \"3\": \"memcached\",\n \"\": \"memory\",\n }\n cache_backend = cache_backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend, cache_backend, parsed_args[\"host\"])\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\" or cache_backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"kinto[postgresql]\"]\n )\n elif backend == \"redis\" or cache_backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[redis]\"])\n elif cache_backend == \"memcached\":\n try:\n import memcache # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[memcached]\"])\n\n elif which_command == \"migrate\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"migrate\"})\n core_scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == \"delete-collection\":\n env = bootstrap(config_file, options={\"command\": \"delete-collection\"})\n return kinto_scripts.delete_collection(\n env, parsed_args[\"bucket\"], parsed_args[\"collection\"]\n )\n\n elif which_command == \"rebuild-quotas\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"rebuild-quotas\"})\n return kinto_scripts.rebuild_quotas(env, dry_run=dry_run)\n\n elif which_command == \"create-user\":\n username = parsed_args[\"username\"]\n password = parsed_args[\"password\"]\n env = bootstrap(config_file, options={\"command\": \"create-user\"})\n return accounts_scripts.create_user(env, username=username, password=password)\n\n elif which_command == \"start\":\n pserve_argv = [\"pserve\"]\n\n if parsed_args[\"reload\"]:\n pserve_argv.append(\"--reload\")\n\n if level == logging.DEBUG:\n pserve_argv.append(\"-v\")\n\n if level == logging.CRITICAL:\n pserve_argv.append(\"-q\")\n\n pserve_argv.append(config_file)\n pserve_argv.append(f\"http_port={parsed_args['port']}\")\n pserve.main(argv=pserve_argv)\n\n else:\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]}
| 3,227 | 636 |
gh_patches_debug_9038
|
rasdani/github-patches
|
git_diff
|
beeware__toga-2384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
iOS Camera take_photo
### Describe the bug
When using the documented:
```python3
if await app.camera.request_permission():
photo = await app.camera.take_photo()
```
### Steps to reproduce
1. request permission to use camera
2. await the take_photo function.
### Expected behavior
Return a toga.Image or None as documented here https://toga.readthedocs.io/en/stable/reference/api/hardware/camera.html#toga.hardware.camera.Camera.take_photo
### Screenshots
Successfully get permission to use camera: <img width="378" alt="Screenshot 2024-02-08 at 22 14 03" src="https://github.com/beeware/toga/assets/25908768/5867a18e-b5c5-4141-bd72-a4b737c0fbd1">
No camera screen comes up.
### Environment
- Operating System: iOS 16.4 & 17.2 (built and development from Macbook Air M1 macOS 14.3.1)
- Python version: 3.10.2
- Software versions:
- Briefcase: 0.3.17
- Toga: 4.0.2
### Logs
Simulator Log
```
Error in async handler: 'NoneType' object has no attribute 'result'
Traceback (most recent call last):
File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/handlers.py", line 38, in handler_with_cleanup
result = await handler(interface, *args, **kwargs)
File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app/healthapp/app.py", line 71, in analyse_gait_handler
photo = await self.app.camera.take_photo()
File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/hardware/camera.py", line 122, in take_photo
self._impl.take_photo(photo, device=device, flash=flash)
File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga_iOS/hardware/camera.py", line 159, in take_photo
self.native.delegate.result = result
AttributeError: 'NoneType' object has no attribute 'result'
```
Physical iOS device testing log:
```
2024-02-08 22:27:03.097595+0000 Health App[2216:513956] [TraitCollection] Class CKBrowserSwitcherViewController overrides the -traitCollection getter, which is not supported. If you're trying to override traits, you must use the appropriate API.
2024-02-08 22:27:03.179852+0000 Health App[2216:513956] Error in async handler: 'NoneType' object has no attribute 'result'
2024-02-08 22:27:03.183351+0000 Health App[2216:513956] Traceback (most recent call last):
2024-02-08 22:27:03.183705+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/handlers.py", line 38, in handler_with_cleanup
2024-02-08 22:27:03.183927+0000 Health App[2216:513956] result = await handler(interface, *args, **kwargs)
2024-02-08 22:27:03.184161+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app/healthapp/app.py", line 71, in analyse_gait_handler
2024-02-08 22:27:03.184361+0000 Health App[2216:513956] photo = await self.app.camera.take_photo()
2024-02-08 22:27:03.184604+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/hardware/camera.py", line 122, in take_photo
2024-02-08 22:27:03.184827+0000 Health App[2216:513956] self._impl.take_photo(photo, device=device, flash=flash)
2024-02-08 22:27:03.185124+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga_iOS/hardware/camera.py", line 159, in take_photo
2024-02-08 22:27:03.185537+0000 Health App[2216:513956] self.native.delegate.result = result
2024-02-08 22:27:03.185785+0000 Health App[2216:513956] AttributeError: 'NoneType' object has no attribute 'result'
```
### Additional context
Fully works on android, same problem on macOS I believe.
</issue>
<code>
[start of iOS/src/toga_iOS/hardware/camera.py]
1 import warnings
2
3 from rubicon.objc import Block, NSObject, objc_method
4
5 import toga
6 from toga.constants import FlashMode
7
8 # for classes that need to be monkeypatched for testing
9 from toga_iOS import libs as iOS
10 from toga_iOS.libs import (
11 AVAuthorizationStatus,
12 AVMediaTypeVideo,
13 NSBundle,
14 UIImagePickerControllerCameraCaptureMode,
15 UIImagePickerControllerCameraDevice,
16 UIImagePickerControllerCameraFlashMode,
17 UIImagePickerControllerSourceTypeCamera,
18 )
19
20
21 class CameraDevice:
22 def __init__(self, id, name, native):
23 self._id = id
24 self._name = name
25 self.native = native
26
27 def id(self):
28 return self._id
29
30 def name(self):
31 return self._name
32
33 def has_flash(self):
34 return iOS.UIImagePickerController.isFlashAvailableForCameraDevice(self.native)
35
36
37 def native_flash_mode(flash):
38 return {
39 FlashMode.ON: UIImagePickerControllerCameraFlashMode.On,
40 FlashMode.OFF: UIImagePickerControllerCameraFlashMode.Off,
41 }.get(flash, UIImagePickerControllerCameraFlashMode.Auto)
42
43
44 # def native_video_quality(quality):
45 # return {
46 # VideoQuality.HIGH: UIImagePickerControllerQualityType.High,
47 # VideoQuality.LOW: UIImagePickerControllerQualityType.Low,
48 # }.get(quality, UIImagePickerControllerQualityType.Medium)
49
50
51 class TogaImagePickerDelegate(NSObject):
52 @objc_method
53 def imagePickerController_didFinishPickingMediaWithInfo_(
54 self, picker, info
55 ) -> None:
56 picker.dismissViewControllerAnimated(True, completion=None)
57
58 image = toga.Image(info["UIImagePickerControllerOriginalImage"])
59 self.result.set_result(image)
60
61 @objc_method
62 def imagePickerControllerDidCancel_(self, picker) -> None:
63 picker.dismissViewControllerAnimated(True, completion=None)
64 self.result.set_result(None)
65
66
67 class Camera:
68 def __init__(self, interface):
69 self.interface = interface
70
71 if NSBundle.mainBundle.objectForInfoDictionaryKey("NSCameraUsageDescription"):
72 if iOS.UIImagePickerController.isSourceTypeAvailable(
73 UIImagePickerControllerSourceTypeCamera
74 ):
75 self.native = iOS.UIImagePickerController.new()
76 self.native.sourceType = UIImagePickerControllerSourceTypeCamera
77 self.native.delegate = TogaImagePickerDelegate.new()
78 else:
79 self.native = None
80 else: # pragma: no cover
81 # The app doesn't have the NSCameraUsageDescription key (e.g., via
82 # `permission.camera` in Briefcase). No-cover because we can't manufacture
83 # this condition in testing.
84 raise RuntimeError(
85 "Application metadata does not declare that the app will use the camera."
86 )
87
88 def has_permission(self, allow_unknown=False):
89 if allow_unknown:
90 valid_values = {
91 AVAuthorizationStatus.Authorized.value,
92 AVAuthorizationStatus.NotDetermined.value,
93 }
94 else:
95 valid_values = {AVAuthorizationStatus.Authorized.value}
96
97 return (
98 iOS.AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)
99 in valid_values
100 )
101
102 def request_permission(self, future):
103 # This block is invoked when the permission is granted; however, permission is
104 # granted from a different (inaccessible) thread, so it isn't picked up by
105 # coverage.
106 def permission_complete(result) -> None:
107 future.set_result(result)
108
109 iOS.AVCaptureDevice.requestAccessForMediaType(
110 AVMediaTypeVideo, completionHandler=Block(permission_complete, None, bool)
111 )
112
113 def get_devices(self):
114 return (
115 [
116 CameraDevice(
117 id="Rear",
118 name="Rear",
119 native=UIImagePickerControllerCameraDevice.Rear,
120 )
121 ]
122 if iOS.UIImagePickerController.isCameraDeviceAvailable(
123 UIImagePickerControllerCameraDevice.Rear
124 )
125 else []
126 ) + (
127 [
128 CameraDevice(
129 id="Front",
130 name="Front",
131 native=UIImagePickerControllerCameraDevice.Front,
132 )
133 ]
134 if iOS.UIImagePickerController.isCameraDeviceAvailable(
135 UIImagePickerControllerCameraDevice.Front
136 )
137 else []
138 )
139
140 def take_photo(self, result, device, flash):
141 if self.native is None:
142 warnings.warn("No camera is available")
143 result.set_result(None)
144 elif self.has_permission(allow_unknown=True):
145 # Configure the controller to take a photo
146 self.native.cameraCaptureMode = (
147 UIImagePickerControllerCameraCaptureMode.Photo
148 )
149
150 self.native.showsCameraControls = True
151 self.native.cameraDevice = (
152 device._impl.native
153 if device
154 else UIImagePickerControllerCameraDevice.Rear
155 )
156 self.native.cameraFlashMode = native_flash_mode(flash)
157
158 # Attach the result to the delegate
159 self.native.delegate.result = result
160
161 # Show the pane
162 toga.App.app.current_window._impl.native.rootViewController.presentViewController(
163 self.native, animated=True, completion=None
164 )
165 else:
166 raise PermissionError("App does not have permission to take photos")
167
[end of iOS/src/toga_iOS/hardware/camera.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/iOS/src/toga_iOS/hardware/camera.py b/iOS/src/toga_iOS/hardware/camera.py
--- a/iOS/src/toga_iOS/hardware/camera.py
+++ b/iOS/src/toga_iOS/hardware/camera.py
@@ -74,7 +74,8 @@
):
self.native = iOS.UIImagePickerController.new()
self.native.sourceType = UIImagePickerControllerSourceTypeCamera
- self.native.delegate = TogaImagePickerDelegate.new()
+ self.delegate_link = TogaImagePickerDelegate.new()
+ self.native.delegate = self.delegate_link
else:
self.native = None
else: # pragma: no cover
|
{"golden_diff": "diff --git a/iOS/src/toga_iOS/hardware/camera.py b/iOS/src/toga_iOS/hardware/camera.py\n--- a/iOS/src/toga_iOS/hardware/camera.py\n+++ b/iOS/src/toga_iOS/hardware/camera.py\n@@ -74,7 +74,8 @@\n ):\n self.native = iOS.UIImagePickerController.new()\n self.native.sourceType = UIImagePickerControllerSourceTypeCamera\n- self.native.delegate = TogaImagePickerDelegate.new()\n+ self.delegate_link = TogaImagePickerDelegate.new()\n+ self.native.delegate = self.delegate_link\n else:\n self.native = None\n else: # pragma: no cover\n", "issue": "iOS Camera take_photo\n### Describe the bug\r\n\r\nWhen using the documented:\r\n\r\n```python3\r\nif await app.camera.request_permission():\r\n photo = await app.camera.take_photo()\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. request permission to use camera\r\n2. await the take_photo function.\r\n\r\n\r\n### Expected behavior\r\n\r\nReturn a toga.Image or None as documented here https://toga.readthedocs.io/en/stable/reference/api/hardware/camera.html#toga.hardware.camera.Camera.take_photo\r\n\r\n### Screenshots\r\n\r\nSuccessfully get permission to use camera: <img width=\"378\" alt=\"Screenshot 2024-02-08 at 22 14 03\" src=\"https://github.com/beeware/toga/assets/25908768/5867a18e-b5c5-4141-bd72-a4b737c0fbd1\">\r\n\r\nNo camera screen comes up.\r\n\r\n\r\n### Environment\r\n\r\n- Operating System: iOS 16.4 & 17.2 (built and development from Macbook Air M1 macOS 14.3.1)\r\n- Python version: 3.10.2\r\n- Software versions:\r\n - Briefcase: 0.3.17\r\n - Toga: 4.0.2\r\n\r\n\r\n### Logs\r\n\r\nSimulator Log\r\n```\r\nError in async handler: 'NoneType' object has no attribute 'result'\r\nTraceback (most recent call last):\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/handlers.py\", line 38, in handler_with_cleanup\r\n result = await handler(interface, *args, **kwargs)\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app/healthapp/app.py\", line 71, in analyse_gait_handler\r\n photo = await self.app.camera.take_photo()\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/hardware/camera.py\", line 122, in take_photo\r\n self._impl.take_photo(photo, device=device, flash=flash)\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga_iOS/hardware/camera.py\", line 159, in take_photo\r\n self.native.delegate.result = result\r\nAttributeError: 'NoneType' object has no attribute 'result'\r\n```\r\n\r\n\r\nPhysical iOS device testing log:\r\n```\r\n2024-02-08 22:27:03.097595+0000 Health App[2216:513956] [TraitCollection] Class CKBrowserSwitcherViewController overrides the -traitCollection getter, which is not supported. If you're trying to override traits, you must use the appropriate API.\r\n2024-02-08 22:27:03.179852+0000 Health App[2216:513956] Error in async handler: 'NoneType' object has no attribute 'result'\r\n\r\n2024-02-08 22:27:03.183351+0000 Health App[2216:513956] Traceback (most recent call last):\r\n\r\n2024-02-08 22:27:03.183705+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/handlers.py\", line 38, in handler_with_cleanup\r\n\r\n2024-02-08 22:27:03.183927+0000 Health App[2216:513956] result = await handler(interface, *args, **kwargs)\r\n\r\n2024-02-08 22:27:03.184161+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app/healthapp/app.py\", line 71, in analyse_gait_handler\r\n\r\n2024-02-08 22:27:03.184361+0000 Health App[2216:513956] photo = await self.app.camera.take_photo()\r\n\r\n2024-02-08 22:27:03.184604+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/hardware/camera.py\", line 122, in take_photo\r\n\r\n2024-02-08 22:27:03.184827+0000 Health App[2216:513956] self._impl.take_photo(photo, device=device, flash=flash)\r\n\r\n2024-02-08 22:27:03.185124+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga_iOS/hardware/camera.py\", line 159, in take_photo\r\n\r\n2024-02-08 22:27:03.185537+0000 Health App[2216:513956] self.native.delegate.result = result\r\n\r\n2024-02-08 22:27:03.185785+0000 Health App[2216:513956] AttributeError: 'NoneType' object has no attribute 'result'\r\n```\r\n\r\n### Additional context\r\n\r\nFully works on android, same problem on macOS I believe.\n", "before_files": [{"content": "import warnings\n\nfrom rubicon.objc import Block, NSObject, objc_method\n\nimport toga\nfrom toga.constants import FlashMode\n\n# for classes that need to be monkeypatched for testing\nfrom toga_iOS import libs as iOS\nfrom toga_iOS.libs import (\n AVAuthorizationStatus,\n AVMediaTypeVideo,\n NSBundle,\n UIImagePickerControllerCameraCaptureMode,\n UIImagePickerControllerCameraDevice,\n UIImagePickerControllerCameraFlashMode,\n UIImagePickerControllerSourceTypeCamera,\n)\n\n\nclass CameraDevice:\n def __init__(self, id, name, native):\n self._id = id\n self._name = name\n self.native = native\n\n def id(self):\n return self._id\n\n def name(self):\n return self._name\n\n def has_flash(self):\n return iOS.UIImagePickerController.isFlashAvailableForCameraDevice(self.native)\n\n\ndef native_flash_mode(flash):\n return {\n FlashMode.ON: UIImagePickerControllerCameraFlashMode.On,\n FlashMode.OFF: UIImagePickerControllerCameraFlashMode.Off,\n }.get(flash, UIImagePickerControllerCameraFlashMode.Auto)\n\n\n# def native_video_quality(quality):\n# return {\n# VideoQuality.HIGH: UIImagePickerControllerQualityType.High,\n# VideoQuality.LOW: UIImagePickerControllerQualityType.Low,\n# }.get(quality, UIImagePickerControllerQualityType.Medium)\n\n\nclass TogaImagePickerDelegate(NSObject):\n @objc_method\n def imagePickerController_didFinishPickingMediaWithInfo_(\n self, picker, info\n ) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n\n image = toga.Image(info[\"UIImagePickerControllerOriginalImage\"])\n self.result.set_result(image)\n\n @objc_method\n def imagePickerControllerDidCancel_(self, picker) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n self.result.set_result(None)\n\n\nclass Camera:\n def __init__(self, interface):\n self.interface = interface\n\n if NSBundle.mainBundle.objectForInfoDictionaryKey(\"NSCameraUsageDescription\"):\n if iOS.UIImagePickerController.isSourceTypeAvailable(\n UIImagePickerControllerSourceTypeCamera\n ):\n self.native = iOS.UIImagePickerController.new()\n self.native.sourceType = UIImagePickerControllerSourceTypeCamera\n self.native.delegate = TogaImagePickerDelegate.new()\n else:\n self.native = None\n else: # pragma: no cover\n # The app doesn't have the NSCameraUsageDescription key (e.g., via\n # `permission.camera` in Briefcase). No-cover because we can't manufacture\n # this condition in testing.\n raise RuntimeError(\n \"Application metadata does not declare that the app will use the camera.\"\n )\n\n def has_permission(self, allow_unknown=False):\n if allow_unknown:\n valid_values = {\n AVAuthorizationStatus.Authorized.value,\n AVAuthorizationStatus.NotDetermined.value,\n }\n else:\n valid_values = {AVAuthorizationStatus.Authorized.value}\n\n return (\n iOS.AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)\n in valid_values\n )\n\n def request_permission(self, future):\n # This block is invoked when the permission is granted; however, permission is\n # granted from a different (inaccessible) thread, so it isn't picked up by\n # coverage.\n def permission_complete(result) -> None:\n future.set_result(result)\n\n iOS.AVCaptureDevice.requestAccessForMediaType(\n AVMediaTypeVideo, completionHandler=Block(permission_complete, None, bool)\n )\n\n def get_devices(self):\n return (\n [\n CameraDevice(\n id=\"Rear\",\n name=\"Rear\",\n native=UIImagePickerControllerCameraDevice.Rear,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Rear\n )\n else []\n ) + (\n [\n CameraDevice(\n id=\"Front\",\n name=\"Front\",\n native=UIImagePickerControllerCameraDevice.Front,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Front\n )\n else []\n )\n\n def take_photo(self, result, device, flash):\n if self.native is None:\n warnings.warn(\"No camera is available\")\n result.set_result(None)\n elif self.has_permission(allow_unknown=True):\n # Configure the controller to take a photo\n self.native.cameraCaptureMode = (\n UIImagePickerControllerCameraCaptureMode.Photo\n )\n\n self.native.showsCameraControls = True\n self.native.cameraDevice = (\n device._impl.native\n if device\n else UIImagePickerControllerCameraDevice.Rear\n )\n self.native.cameraFlashMode = native_flash_mode(flash)\n\n # Attach the result to the delegate\n self.native.delegate.result = result\n\n # Show the pane\n toga.App.app.current_window._impl.native.rootViewController.presentViewController(\n self.native, animated=True, completion=None\n )\n else:\n raise PermissionError(\"App does not have permission to take photos\")\n", "path": "iOS/src/toga_iOS/hardware/camera.py"}]}
| 3,737 | 149 |
gh_patches_debug_26215
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1769
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[UI idea] Improve pre-join class UI
**Idea incl level**
Currently the pre-join page of a class is nothing more than plain-text with a link. It would be nice to improve the UI a bit to better align with the rest of the Hedy website. See the screenshot below for the current situation:

</issue>
<code>
[start of website/teacher.py]
1 import json
2
3 from website.auth import requires_login, is_teacher, current_user
4 import utils
5 import uuid
6 from flask import g, request, jsonify, redirect
7 from flask_helpers import render_template
8 import os
9 import hedyweb
10 import hedy_content
11 TRANSLATIONS = hedyweb.Translations ()
12 from config import config
13 cookie_name = config ['session'] ['cookie_name']
14
15
16 def routes (app, database, achievements):
17 global DATABASE
18 global ACHIEVEMENTS
19 DATABASE = database
20 ACHIEVEMENTS = achievements
21
22 @app.route('/classes', methods=['GET'])
23 @requires_login
24 def get_classes (user):
25 if not is_teacher(user):
26 return utils.error_page_403(error=403, ui_message='retrieve_class')
27 return jsonify (DATABASE.get_teacher_classes (user ['username'], True))
28
29 @app.route('/for-teachers/class/<class_id>', methods=['GET'])
30 @requires_login
31 def get_class (user, class_id):
32 app.logger.info('This is info output')
33 if not is_teacher(user):
34 return utils.error_page_403(error=403, ui_message='retrieve_class')
35 Class = DATABASE.get_class (class_id)
36 if not Class or Class ['teacher'] != user ['username']:
37 return utils.error_page(error=404, ui_message='no_such_class')
38 students = []
39 for student_username in Class.get ('students', []):
40 student = DATABASE.user_by_username (student_username)
41 programs = DATABASE.programs_for_user(student_username)
42 highest_level = max(program['level'] for program in programs) if len(programs) else 0
43 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
44 if sorted_public_programs:
45 latest_shared = sorted_public_programs[-1]
46 latest_shared['link'] = f"/hedy/{latest_shared['id']}/view"
47 else:
48 latest_shared = None
49 students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
50
51 if utils.is_testing_request (request):
52 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
53
54 achievement = None
55 if len(students) > 20:
56 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "full_house")
57 if achievement:
58 achievement = json.dumps(achievement)
59
60 teachers = os.getenv('BETA_TEACHERS', '').split(',')
61 is_beta_teacher = user['username'] in teachers
62
63 return render_template ('class-overview.html', current_page='my-profile',
64 page_title=hedyweb.get_page_title('class overview'),
65 achievement=achievement,
66 is_beta_teacher=is_beta_teacher,
67 class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],
68 'name': Class ['name'], 'id': Class ['id']})
69
70 @app.route('/class', methods=['POST'])
71 @requires_login
72 def create_class (user):
73 if not is_teacher(user):
74 return 'Only teachers can create classes', 403
75
76 body = request.json
77 # Validations
78 if not isinstance(body, dict):
79 return 'body must be an object', 400
80 if not isinstance(body.get('name'), str):
81 return 'name must be a string', 400
82
83 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
84 Classes = DATABASE.get_teacher_classes(user['username'], True)
85 for Class in Classes:
86 if Class['name'] == body['name']:
87 return "duplicate", 200
88
89 Class = {
90 'id': uuid.uuid4().hex,
91 'date': utils.timems (),
92 'teacher': user ['username'],
93 'link': utils.random_id_generator (7),
94 'name': body ['name']
95 }
96
97 DATABASE.store_class (Class)
98 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "ready_set_education")
99 if achievement:
100 return {'id': Class['id'], 'achievement': achievement}, 200
101 return {'id': Class['id']}, 200
102
103 @app.route('/class/<class_id>', methods=['PUT'])
104 @requires_login
105 def update_class (user, class_id):
106 if not is_teacher(user):
107 return 'Only teachers can update classes', 403
108
109 body = request.json
110 # Validations
111 if not isinstance(body, dict):
112 return 'body must be an object', 400
113 if not isinstance(body.get('name'), str):
114 return 'name must be a string', 400
115
116 Class = DATABASE.get_class (class_id)
117 if not Class or Class ['teacher'] != user ['username']:
118 return 'No such class', 404
119
120 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
121 Classes = DATABASE.get_teacher_classes(user ['username'], True)
122 for Class in Classes:
123 if Class['name'] == body['name']:
124 return "duplicate", 200
125
126 Class = DATABASE.update_class (class_id, body ['name'])
127 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "on_second_thoughts")
128 if achievement:
129 return {'achievement': achievement}, 200
130 return {}, 200
131
132 @app.route('/class/<class_id>', methods=['DELETE'])
133 @requires_login
134 def delete_class (user, class_id):
135 Class = DATABASE.get_class (class_id)
136 if not Class or Class ['teacher'] != user ['username']:
137 return 'No such class', 404
138
139 DATABASE.delete_class (Class)
140 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "end_of_semester")
141 if achievement:
142 return {'achievement': achievement}, 200
143 return {}, 200
144
145 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])
146 def prejoin_class (class_id, link):
147 Class = DATABASE.get_class (class_id)
148 if not Class or Class ['link'] != link:
149 return utils.error_page(error=404, ui_message='invalid_class_link')
150 user = {}
151 if request.cookies.get (cookie_name):
152 token = DATABASE.get_token(request.cookies.get (cookie_name))
153 if token:
154 if token ['username'] in Class.get ('students', []):
155 return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),
156 current_page='my-profile', class_info={'name': Class ['name']})
157 user = DATABASE.user_by_username(token ['username'])
158
159 return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),
160 current_page='my-profile',
161 class_info={
162 'id': Class ['id'],
163 'name': Class ['name'],
164 })
165
166 @app.route('/class/join', methods=['POST'])
167 @requires_login
168 def join_class(user):
169 body = request.json
170 if 'id' in body:
171 Class = DATABASE.get_class(body['id'])
172 if not Class or Class ['id'] != body['id']:
173 return utils.error_page(error=404, ui_message='invalid_class_link')
174
175 DATABASE.add_student_to_class(Class['id'], user['username'])
176 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "epic_education")
177 if achievement:
178 return {'achievement': achievement}, 200
179 return {}, 200
180
181 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])
182 @requires_login
183 def leave_class (user, class_id, student_id):
184 Class = DATABASE.get_class (class_id)
185 if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:
186 return 'No such class', 404
187
188 DATABASE.remove_student_from_class (Class ['id'], student_id)
189 if Class['teacher'] == user['username']:
190 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "detention")
191 if achievement:
192 return {'achievement': achievement}, 200
193 return {}, 200
194
195 @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])
196 @requires_login
197 def get_class_info(user, class_id):
198 if not is_teacher(user):
199 return utils.error_page_403(error=403, ui_message='retrieve_class')
200 Class = DATABASE.get_class(class_id)
201 if not Class or Class['teacher'] != user['username']:
202 return utils.error_page(error=404, ui_message='no_such_class')
203
204 if hedy_content.Adventures(g.lang).has_adventures():
205 adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()
206 else:
207 adventures = hedy_content.Adventures("en").get_adventure_keyname_name_levels()
208 levels = hedy_content.LevelDefaults(g.lang).levels
209 preferences = DATABASE.get_customizations_class(class_id)
210
211 return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),
212 class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,
213 adventures=adventures, preferences=preferences, current_page='my-profile')
214
215 @app.route('/customize-class/<class_id>', methods=['PUT'])
216 @requires_login
217 def update_level_preferences(user, class_id):
218 if not is_teacher(user):
219 return 'Only teachers can update class preferences', 403
220
221 body = request.json
222 print(body)
223 # Validations
224 if not isinstance(body, dict):
225 return 'body must be an object', 400
226 if not isinstance(body.get('example_programs'), bool):
227 return 'amount of example programs must be an integer', 400
228 if not isinstance(body.get('hide_level'), bool):
229 return 'level switch must be a boolean', 400
230 if not isinstance(body.get('hide_prev_level'), bool):
231 return 'level switch must be a boolean', 400
232 if not isinstance(body.get('hide_next_level'), bool):
233 return 'level switch must be a boolean', 400
234 if not isinstance(int(body.get('level')), int):
235 return 'level must ben an integer', 400
236
237 Class = DATABASE.get_class(class_id)
238 if not Class or Class['teacher'] != user['username']:
239 return 'No such class', 404
240
241 customizations = {}
242 customizations['id'] = class_id
243 customizations['level'] = int(body.get('level'))
244 customizations['adventures'] = body.get('adventures')
245 customizations['example_programs'] = body.get('example_programs')
246 customizations['hide'] = body.get('hide_level')
247 customizations['hide_prev_level'] = body.get('hide_prev_level')
248 customizations['hide_next_level'] = body.get('hide_next_level')
249
250 DATABASE.update_customizations_class(customizations)
251 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "my_class_my_rules")
252 if achievement:
253 return {'achievement': achievement}, 200
254 return {}, 200
255
256 @app.route('/hedy/l/<link_id>', methods=['GET'])
257 def resolve_class_link (link_id):
258 Class = DATABASE.resolve_class_link (link_id)
259 if not Class:
260 return utils.error_page(error=404, ui_message='invalid_class_link')
261 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)
262
[end of website/teacher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/teacher.py b/website/teacher.py
--- a/website/teacher.py
+++ b/website/teacher.py
@@ -152,16 +152,18 @@
token = DATABASE.get_token(request.cookies.get (cookie_name))
if token:
if token ['username'] in Class.get ('students', []):
- return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),
+ return render_template ('class-prejoin.html', joined=True,
+ page_title=hedyweb.get_page_title('join class'),
current_page='my-profile', class_info={'name': Class ['name']})
user = DATABASE.user_by_username(token ['username'])
- return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),
- current_page='my-profile',
- class_info={
- 'id': Class ['id'],
- 'name': Class ['name'],
- })
+ return render_template ('class-prejoin.html', joined=False,
+ page_title=hedyweb.get_page_title('join class'),
+ current_page='my-profile',
+ class_info={
+ 'id': Class ['id'],
+ 'name': Class ['name'],
+ })
@app.route('/class/join', methods=['POST'])
@requires_login
|
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -152,16 +152,18 @@\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n- return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n+ return render_template ('class-prejoin.html', joined=True,\n+ page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n \n- return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n- current_page='my-profile',\n- class_info={\n- 'id': Class ['id'],\n- 'name': Class ['name'],\n- })\n+ return render_template ('class-prejoin.html', joined=False,\n+ page_title=hedyweb.get_page_title('join class'),\n+ current_page='my-profile',\n+ class_info={\n+ 'id': Class ['id'],\n+ 'name': Class ['name'],\n+ })\n \n @app.route('/class/join', methods=['POST'])\n @requires_login\n", "issue": "[UI idea] Improve pre-join class UI\n**Idea incl level**\r\nCurrently the pre-join page of a class is nothing more than plain-text with a link. It would be nice to improve the UI a bit to better align with the rest of the Hedy website. See the screenshot below for the current situation:\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\n\ndef routes (app, database, achievements):\n global DATABASE\n global ACHIEVEMENTS\n DATABASE = database\n ACHIEVEMENTS = achievements\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n achievement = None\n if len(students) > 20:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"full_house\")\n if achievement:\n achievement = json.dumps(achievement)\n\n teachers = os.getenv('BETA_TEACHERS', '').split(',')\n is_beta_teacher = user['username'] in teachers\n\n return render_template ('class-overview.html', current_page='my-profile',\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"ready_set_education\")\n if achievement:\n return {'id': Class['id'], 'achievement': achievement}, 200\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"on_second_thoughts\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"end_of_semester\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile',\n class_info={\n 'id': Class ['id'],\n 'name': Class ['name'],\n })\n\n @app.route('/class/join', methods=['POST'])\n @requires_login\n def join_class(user):\n body = request.json\n if 'id' in body:\n Class = DATABASE.get_class(body['id'])\n if not Class or Class ['id'] != body['id']:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n\n DATABASE.add_student_to_class(Class['id'], user['username'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"epic_education\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n if Class['teacher'] == user['username']:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"detention\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='my-profile')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n DATABASE.update_customizations_class(customizations)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"my_class_my_rules\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
| 4,050 | 299 |
gh_patches_debug_5862
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-3177
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"extra" config not respected for TypedDicts in functions
### Checks
* [x] I added a descriptive title to this issue
* [x] I have searched (google, github) for similar issues and couldn't find anything
* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug
<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.2
pydantic compiled: False
install path: /home/msharma216/.local/lib/python3.8/site-packages/pydantic
python version: 3.8.0 (default, Feb 25 2021, 22:10:10) [GCC 8.4.0]
platform: Linux-4.4.0-19041-Microsoft-x86_64-with-glibc2.27
optional deps. installed: ['typing-extensions']
```
<!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version -->
<!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to
confirm your bug hasn't already been reported. -->
<!-- Where possible please include a self-contained code snippet describing your bug: -->
When trying to use the `validate_arguments` decorator, I face the issue of getting validation errors for an extra attribute for a TypedDict as below:
```py
from typing_extensions import TypedDict
from pydantic import validate_arguments, Extra
class TypedTest(TypedDict):
y: str
@validate_arguments(config={'extra': Extra.allow})
def test(other: TypedTest):
pass
test(other={'y': 'b', 'z': 'a'})
```
Output:
```
pydantic.error_wrappers.ValidationError: 1 validation error for Test
other -> z
extra fields not permitted (type=value_error.extra)
```
Expected: No errors
Anything I have missed that would let this validation pass for extra attributes in the TypedDict?
Thanks very much!
</issue>
<code>
[start of pydantic/decorator.py]
1 from functools import wraps
2 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload
3
4 from . import validator
5 from .config import Extra
6 from .errors import ConfigError
7 from .main import BaseModel, create_model
8 from .typing import get_all_type_hints
9 from .utils import to_camel
10
11 __all__ = ('validate_arguments',)
12
13 if TYPE_CHECKING:
14 from .typing import AnyCallable
15
16 AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)
17 ConfigType = Union[None, Type[Any], Dict[str, Any]]
18
19
20 @overload
21 def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:
22 ...
23
24
25 @overload
26 def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':
27 ...
28
29
30 def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:
31 """
32 Decorator to validate the arguments passed to a function.
33 """
34
35 def validate(_func: 'AnyCallable') -> 'AnyCallable':
36 vd = ValidatedFunction(_func, config)
37
38 @wraps(_func)
39 def wrapper_function(*args: Any, **kwargs: Any) -> Any:
40 return vd.call(*args, **kwargs)
41
42 wrapper_function.vd = vd # type: ignore
43 wrapper_function.validate = vd.init_model_instance # type: ignore
44 wrapper_function.raw_function = vd.raw_function # type: ignore
45 wrapper_function.model = vd.model # type: ignore
46 return wrapper_function
47
48 if func:
49 return validate(func)
50 else:
51 return validate
52
53
54 ALT_V_ARGS = 'v__args'
55 ALT_V_KWARGS = 'v__kwargs'
56 V_POSITIONAL_ONLY_NAME = 'v__positional_only'
57 V_DUPLICATE_KWARGS = 'v__duplicate_kwargs'
58
59
60 class ValidatedFunction:
61 def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
62 from inspect import Parameter, signature
63
64 parameters: Mapping[str, Parameter] = signature(function).parameters
65
66 if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:
67 raise ConfigError(
68 f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" '
69 f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator'
70 )
71
72 self.raw_function = function
73 self.arg_mapping: Dict[int, str] = {}
74 self.positional_only_args = set()
75 self.v_args_name = 'args'
76 self.v_kwargs_name = 'kwargs'
77
78 type_hints = get_all_type_hints(function)
79 takes_args = False
80 takes_kwargs = False
81 fields: Dict[str, Tuple[Any, Any]] = {}
82 for i, (name, p) in enumerate(parameters.items()):
83 if p.annotation is p.empty:
84 annotation = Any
85 else:
86 annotation = type_hints[name]
87
88 default = ... if p.default is p.empty else p.default
89 if p.kind == Parameter.POSITIONAL_ONLY:
90 self.arg_mapping[i] = name
91 fields[name] = annotation, default
92 fields[V_POSITIONAL_ONLY_NAME] = List[str], None
93 self.positional_only_args.add(name)
94 elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
95 self.arg_mapping[i] = name
96 fields[name] = annotation, default
97 fields[V_DUPLICATE_KWARGS] = List[str], None
98 elif p.kind == Parameter.KEYWORD_ONLY:
99 fields[name] = annotation, default
100 elif p.kind == Parameter.VAR_POSITIONAL:
101 self.v_args_name = name
102 fields[name] = Tuple[annotation, ...], None
103 takes_args = True
104 else:
105 assert p.kind == Parameter.VAR_KEYWORD, p.kind
106 self.v_kwargs_name = name
107 fields[name] = Dict[str, annotation], None # type: ignore
108 takes_kwargs = True
109
110 # these checks avoid a clash between "args" and a field with that name
111 if not takes_args and self.v_args_name in fields:
112 self.v_args_name = ALT_V_ARGS
113
114 # same with "kwargs"
115 if not takes_kwargs and self.v_kwargs_name in fields:
116 self.v_kwargs_name = ALT_V_KWARGS
117
118 if not takes_args:
119 # we add the field so validation below can raise the correct exception
120 fields[self.v_args_name] = List[Any], None
121
122 if not takes_kwargs:
123 # same with kwargs
124 fields[self.v_kwargs_name] = Dict[Any, Any], None
125
126 self.create_model(fields, takes_args, takes_kwargs, config)
127
128 def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:
129 values = self.build_values(args, kwargs)
130 return self.model(**values)
131
132 def call(self, *args: Any, **kwargs: Any) -> Any:
133 m = self.init_model_instance(*args, **kwargs)
134 return self.execute(m)
135
136 def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
137 values: Dict[str, Any] = {}
138 if args:
139 arg_iter = enumerate(args)
140 while True:
141 try:
142 i, a = next(arg_iter)
143 except StopIteration:
144 break
145 arg_name = self.arg_mapping.get(i)
146 if arg_name is not None:
147 values[arg_name] = a
148 else:
149 values[self.v_args_name] = [a] + [a for _, a in arg_iter]
150 break
151
152 var_kwargs = {}
153 wrong_positional_args = []
154 duplicate_kwargs = []
155 non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}
156 for k, v in kwargs.items():
157 if k in non_var_fields:
158 if k in self.positional_only_args:
159 wrong_positional_args.append(k)
160 if k in values:
161 duplicate_kwargs.append(k)
162 values[k] = v
163 else:
164 var_kwargs[k] = v
165
166 if var_kwargs:
167 values[self.v_kwargs_name] = var_kwargs
168 if wrong_positional_args:
169 values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
170 if duplicate_kwargs:
171 values[V_DUPLICATE_KWARGS] = duplicate_kwargs
172 return values
173
174 def execute(self, m: BaseModel) -> Any:
175 d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}
176 var_kwargs = d.pop(self.v_kwargs_name, {})
177
178 if self.v_args_name in d:
179 args_: List[Any] = []
180 in_kwargs = False
181 kwargs = {}
182 for name, value in d.items():
183 if in_kwargs:
184 kwargs[name] = value
185 elif name == self.v_args_name:
186 args_ += value
187 in_kwargs = True
188 else:
189 args_.append(value)
190 return self.raw_function(*args_, **kwargs, **var_kwargs)
191 elif self.positional_only_args:
192 args_ = []
193 kwargs = {}
194 for name, value in d.items():
195 if name in self.positional_only_args:
196 args_.append(value)
197 else:
198 kwargs[name] = value
199 return self.raw_function(*args_, **kwargs, **var_kwargs)
200 else:
201 return self.raw_function(**d, **var_kwargs)
202
203 def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
204 pos_args = len(self.arg_mapping)
205
206 class CustomConfig:
207 pass
208
209 if not TYPE_CHECKING: # pragma: no branch
210 if isinstance(config, dict):
211 CustomConfig = type('Config', (), config) # noqa: F811
212 elif config is not None:
213 CustomConfig = config # noqa: F811
214
215 if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
216 raise ConfigError(
217 'Setting the "fields" and "alias_generator" property on custom Config for '
218 '@validate_arguments is not yet supported, please remove.'
219 )
220
221 class DecoratorBaseModel(BaseModel):
222 @validator(self.v_args_name, check_fields=False, allow_reuse=True)
223 def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:
224 if takes_args or v is None:
225 return v
226
227 raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
228
229 @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
230 def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
231 if takes_kwargs or v is None:
232 return v
233
234 plural = '' if len(v) == 1 else 's'
235 keys = ', '.join(map(repr, v.keys()))
236 raise TypeError(f'unexpected keyword argument{plural}: {keys}')
237
238 @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
239 def check_positional_only(cls, v: Optional[List[str]]) -> None:
240 if v is None:
241 return
242
243 plural = '' if len(v) == 1 else 's'
244 keys = ', '.join(map(repr, v))
245 raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
246
247 @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
248 def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:
249 if v is None:
250 return
251
252 plural = '' if len(v) == 1 else 's'
253 keys = ', '.join(map(repr, v))
254 raise TypeError(f'multiple values for argument{plural}: {keys}')
255
256 class Config(CustomConfig):
257 extra = Extra.forbid
258
259 self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
260
[end of pydantic/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pydantic/decorator.py b/pydantic/decorator.py
--- a/pydantic/decorator.py
+++ b/pydantic/decorator.py
@@ -254,6 +254,6 @@
raise TypeError(f'multiple values for argument{plural}: {keys}')
class Config(CustomConfig):
- extra = Extra.forbid
+ extra = getattr(CustomConfig, 'extra', Extra.forbid)
self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
|
{"golden_diff": "diff --git a/pydantic/decorator.py b/pydantic/decorator.py\n--- a/pydantic/decorator.py\n+++ b/pydantic/decorator.py\n@@ -254,6 +254,6 @@\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n \n class Config(CustomConfig):\n- extra = Extra.forbid\n+ extra = getattr(CustomConfig, 'extra', Extra.forbid)\n \n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "issue": "\"extra\" config not respected for TypedDicts in functions\n### Checks\r\n\r\n* [x] I added a descriptive title to this issue\r\n* [x] I have searched (google, github) for similar issues and couldn't find anything\r\n* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug\r\n\r\n<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->\r\n\r\n# Bug\r\n\r\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.8.2\r\n pydantic compiled: False\r\n install path: /home/msharma216/.local/lib/python3.8/site-packages/pydantic\r\n python version: 3.8.0 (default, Feb 25 2021, 22:10:10) [GCC 8.4.0]\r\n platform: Linux-4.4.0-19041-Microsoft-x86_64-with-glibc2.27\r\n optional deps. installed: ['typing-extensions']\r\n\r\n```\r\n<!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version -->\r\n\r\n<!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to\r\nconfirm your bug hasn't already been reported. -->\r\n\r\n<!-- Where possible please include a self-contained code snippet describing your bug: -->\r\n\r\nWhen trying to use the `validate_arguments` decorator, I face the issue of getting validation errors for an extra attribute for a TypedDict as below:\r\n\r\n```py\r\nfrom typing_extensions import TypedDict\r\n\r\nfrom pydantic import validate_arguments, Extra\r\n\r\n\r\nclass TypedTest(TypedDict):\r\n y: str\r\n\r\n\r\n@validate_arguments(config={'extra': Extra.allow})\r\ndef test(other: TypedTest):\r\n pass\r\n\r\n\r\ntest(other={'y': 'b', 'z': 'a'})\r\n\r\n```\r\n\r\nOutput:\r\n```\r\npydantic.error_wrappers.ValidationError: 1 validation error for Test\r\nother -> z\r\n extra fields not permitted (type=value_error.extra)\r\n```\r\n\r\nExpected: No errors\r\n\r\nAnything I have missed that would let this validation pass for extra attributes in the TypedDict?\r\n\r\nThanks very much!\n", "before_files": [{"content": "from functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload\n\nfrom . import validator\nfrom .config import Extra\nfrom .errors import ConfigError\nfrom .main import BaseModel, create_model\nfrom .typing import get_all_type_hints\nfrom .utils import to_camel\n\n__all__ = ('validate_arguments',)\n\nif TYPE_CHECKING:\n from .typing import AnyCallable\n\n AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n\n\n@overload\ndef validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:\n ...\n\n\n@overload\ndef validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':\n ...\n\n\ndef validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n\n def validate(_func: 'AnyCallable') -> 'AnyCallable':\n vd = ValidatedFunction(_func, config)\n\n @wraps(_func)\n def wrapper_function(*args: Any, **kwargs: Any) -> Any:\n return vd.call(*args, **kwargs)\n\n wrapper_function.vd = vd # type: ignore\n wrapper_function.validate = vd.init_model_instance # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n return wrapper_function\n\n if func:\n return validate(func)\n else:\n return validate\n\n\nALT_V_ARGS = 'v__args'\nALT_V_KWARGS = 'v__kwargs'\nV_POSITIONAL_ONLY_NAME = 'v__positional_only'\nV_DUPLICATE_KWARGS = 'v__duplicate_kwargs'\n\n\nclass ValidatedFunction:\n def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n\n parameters: Mapping[str, Parameter] = signature(function).parameters\n\n if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:\n raise ConfigError(\n f'\"{ALT_V_ARGS}\", \"{ALT_V_KWARGS}\", \"{V_POSITIONAL_ONLY_NAME}\" and \"{V_DUPLICATE_KWARGS}\" '\n f'are not permitted as argument names when using the \"{validate_arguments.__name__}\" decorator'\n )\n\n self.raw_function = function\n self.arg_mapping: Dict[int, str] = {}\n self.positional_only_args = set()\n self.v_args_name = 'args'\n self.v_kwargs_name = 'kwargs'\n\n type_hints = get_all_type_hints(function)\n takes_args = False\n takes_kwargs = False\n fields: Dict[str, Tuple[Any, Any]] = {}\n for i, (name, p) in enumerate(parameters.items()):\n if p.annotation is p.empty:\n annotation = Any\n else:\n annotation = type_hints[name]\n\n default = ... if p.default is p.empty else p.default\n if p.kind == Parameter.POSITIONAL_ONLY:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_POSITIONAL_ONLY_NAME] = List[str], None\n self.positional_only_args.add(name)\n elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_DUPLICATE_KWARGS] = List[str], None\n elif p.kind == Parameter.KEYWORD_ONLY:\n fields[name] = annotation, default\n elif p.kind == Parameter.VAR_POSITIONAL:\n self.v_args_name = name\n fields[name] = Tuple[annotation, ...], None\n takes_args = True\n else:\n assert p.kind == Parameter.VAR_KEYWORD, p.kind\n self.v_kwargs_name = name\n fields[name] = Dict[str, annotation], None # type: ignore\n takes_kwargs = True\n\n # these checks avoid a clash between \"args\" and a field with that name\n if not takes_args and self.v_args_name in fields:\n self.v_args_name = ALT_V_ARGS\n\n # same with \"kwargs\"\n if not takes_kwargs and self.v_kwargs_name in fields:\n self.v_kwargs_name = ALT_V_KWARGS\n\n if not takes_args:\n # we add the field so validation below can raise the correct exception\n fields[self.v_args_name] = List[Any], None\n\n if not takes_kwargs:\n # same with kwargs\n fields[self.v_kwargs_name] = Dict[Any, Any], None\n\n self.create_model(fields, takes_args, takes_kwargs, config)\n\n def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:\n values = self.build_values(args, kwargs)\n return self.model(**values)\n\n def call(self, *args: Any, **kwargs: Any) -> Any:\n m = self.init_model_instance(*args, **kwargs)\n return self.execute(m)\n\n def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n if args:\n arg_iter = enumerate(args)\n while True:\n try:\n i, a = next(arg_iter)\n except StopIteration:\n break\n arg_name = self.arg_mapping.get(i)\n if arg_name is not None:\n values[arg_name] = a\n else:\n values[self.v_args_name] = [a] + [a for _, a in arg_iter]\n break\n\n var_kwargs = {}\n wrong_positional_args = []\n duplicate_kwargs = []\n non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}\n for k, v in kwargs.items():\n if k in non_var_fields:\n if k in self.positional_only_args:\n wrong_positional_args.append(k)\n if k in values:\n duplicate_kwargs.append(k)\n values[k] = v\n else:\n var_kwargs[k] = v\n\n if var_kwargs:\n values[self.v_kwargs_name] = var_kwargs\n if wrong_positional_args:\n values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args\n if duplicate_kwargs:\n values[V_DUPLICATE_KWARGS] = duplicate_kwargs\n return values\n\n def execute(self, m: BaseModel) -> Any:\n d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}\n var_kwargs = d.pop(self.v_kwargs_name, {})\n\n if self.v_args_name in d:\n args_: List[Any] = []\n in_kwargs = False\n kwargs = {}\n for name, value in d.items():\n if in_kwargs:\n kwargs[name] = value\n elif name == self.v_args_name:\n args_ += value\n in_kwargs = True\n else:\n args_.append(value)\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n elif self.positional_only_args:\n args_ = []\n kwargs = {}\n for name, value in d.items():\n if name in self.positional_only_args:\n args_.append(value)\n else:\n kwargs[name] = value\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n else:\n return self.raw_function(**d, **var_kwargs)\n\n def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:\n pos_args = len(self.arg_mapping)\n\n class CustomConfig:\n pass\n\n if not TYPE_CHECKING: # pragma: no branch\n if isinstance(config, dict):\n CustomConfig = type('Config', (), config) # noqa: F811\n elif config is not None:\n CustomConfig = config # noqa: F811\n\n if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):\n raise ConfigError(\n 'Setting the \"fields\" and \"alias_generator\" property on custom Config for '\n '@validate_arguments is not yet supported, please remove.'\n )\n\n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:\n if takes_args or v is None:\n return v\n\n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n\n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n if takes_kwargs or v is None:\n return v\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v.keys()))\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n\n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n def check_positional_only(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n\n @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)\n def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n\n class Config(CustomConfig):\n extra = Extra.forbid\n\n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "path": "pydantic/decorator.py"}]}
| 3,971 | 128 |
gh_patches_debug_30879
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-2487
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
S3: POST/PUT to bucket URLs don't route correctly on port 4566
<!-- Love localstack? Please consider supporting our collective:
👉 https://opencollective.com/localstack/donate -->
# Type of request: This is a ...
[x] bug report
[ ] feature request
# Detailed description
We noticed this while converting a service to use Localstack for tests. The service generates S3 presigned post URLs. We're able to create and use presigned S3 URLs on port 4572 (deprecated S3 port), but not 4566, the new shared one. The same issue happens with PUT requests, which is the simplest to repro.
While just using 4572 works, this does force us to use the deprecated port and I figured it was worth opening an issue because of the discrepancy.
## Expected behavior
POST http://localhost:4566/hello (with appropriate form params) should return a 204, in the same way that POST http://localhost:4572/hello does.
PUT http://localhost:4566/hello should create a bucket and return a 200, in the same way that PUT http://localhost:4572/hello does.
## Actual behavior
Both PUT and POST http://localhost:4566/hello return a 404.
In the localstack logs:
2020-05-20T13:37:41:INFO:localstack.services.edge: Unable to find forwarding rule for host "localhost:4566", path "/hello", target header "", auth header ""
# Steps to reproduce
```bash
$ curl -i -XPUT http://localhost:4572/hello
HTTP/1.1 200 OK
Server: BaseHTTP/0.6 Python/3.8.2
Date: Wed, 20 May 2020 13:43:17 GMT
Content-Type: application/xml; charset=utf-8
content-length: 159
Access-Control-Allow-Origin: *
Last-Modified: Wed, 20 May 2020 13:43:17 GMT
x-amz-request-id: 0ABD347D7A4E0697
x-amz-id-2: MzRISOwyjmnup0ABD347D7A4E06977/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp
Access-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH
Access-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging
Access-Control-Expose-Headers: x-amz-version-id
<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01"><CreateBucketResponse><Bucket>hello</Bucket></CreateBucketResponse></CreateBucketResponse>%
$ curl -i -XPUT http://localhost:4566/hello
HTTP/1.1 404 Not Found
Server: BaseHTTP/0.6 Python/3.8.2
Date: Wed, 20 May 2020 13:43:22 GMT
Content-Length: 21
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH
Access-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging
Access-Control-Expose-Headers: x-amz-version-id
{"status": "running"}%
```
## Command used to start LocalStack
`localstack start`
</issue>
<code>
[start of localstack/services/edge.py]
1 import re
2 import os
3 import sys
4 import json
5 import logging
6 from requests.models import Response
7 from localstack import config
8 from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER
9 from localstack.utils.common import run, is_root, TMP_THREADS
10 from localstack.utils.common import safe_requests as requests
11 from localstack.services.generic_proxy import ProxyListener, GenericProxy
12
13 LOG = logging.getLogger(__name__)
14
15 # Header to indicate that the process should kill itself. This is required because if
16 # this process is started as root, then we cannot kill it from a non-root process
17 HEADER_KILL_SIGNAL = 'x-localstack-kill'
18
19
20 class ProxyListenerEdge(ProxyListener):
21
22 def forward_request(self, method, path, data, headers):
23 if method == 'OPTIONS':
24 return 200
25
26 # kill the process if we receive this header
27 headers.get(HEADER_KILL_SIGNAL) and os._exit(0)
28
29 target = headers.get('x-amz-target', '')
30 auth_header = headers.get('authorization', '')
31 host = headers.get('host', '')
32 headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host
33
34 # extract API details
35 api, port, path, host = get_api_from_headers(headers, path)
36
37 if port and int(port) < 0:
38 return 404
39
40 if not port:
41 # detect S3 presigned URLs
42 if 'AWSAccessKeyId=' in path or 'Signature=' in path:
43 port = config.PORT_S3
44 # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`
45 # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first
46 if method == 'GET' and '/' in path.strip('/'):
47 port = config.PORT_S3
48
49 if not port:
50 if api in ['', None, '_unknown_']:
51 LOG.info(('Unable to find forwarding rule for host "%s", path "%s", '
52 'target header "%s", auth header "%s"') % (host, path, target, auth_header))
53 else:
54 LOG.info(('Unable to determine forwarding port for API "%s" - please '
55 'make sure this API is enabled via the SERVICES configuration') % api)
56 response = Response()
57 response.status_code = 404
58 response._content = '{"status": "running"}'
59 return response
60
61 use_ssl = config.USE_SSL
62
63 connect_host = '%s:%s' % (config.HOSTNAME, port)
64 url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)
65 headers['Host'] = host
66 function = getattr(requests, method.lower())
67 if isinstance(data, dict):
68 data = json.dumps(data)
69
70 response = function(url, data=data, headers=headers, verify=False)
71 return response
72
73
74 def get_api_from_headers(headers, path=None):
75 target = headers.get('x-amz-target', '')
76 host = headers.get('host', '')
77 auth_header = headers.get('authorization', '')
78 ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')
79 path = path or '/'
80
81 # initialize result
82 result = '_unknown_', 0
83
84 # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
85 try:
86 credential_scope = auth_header.split(',')[0].split()[1]
87 _, _, _, service, _ = credential_scope.split('/')
88 result = service, get_service_port_for_account(service, headers)
89 except Exception:
90 pass
91
92 result_before = result
93
94 # Fallback rules and route customizations applied below
95
96 if host.endswith('cloudfront.net'):
97 path = path or '/'
98 result = 'cloudfront', config.PORT_CLOUDFRONT
99 elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:
100 result = 'cognito-idp', config.PORT_COGNITO_IDP
101 elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:
102 result = 'cognito-identity', config.PORT_COGNITO_IDENTITY
103 elif result[0] == 's3' or re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host):
104 host = re.sub(r's3-website\..*\.amazonaws', 's3.amazonaws', host)
105 result = 's3', config.PORT_S3
106 elif result[0] == 'states' in auth_header or host.startswith('states.'):
107 result = 'stepfunctions', config.PORT_STEPFUNCTIONS
108 elif '.execute-api.' in host:
109 result = 'apigateway', config.PORT_APIGATEWAY
110 elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):
111 result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS
112 elif ls_target == 'web' or path == '/graph':
113 result = 'web', config.PORT_WEB_UI
114
115 return result[0], result_before[1] or result[1], path, host
116
117
118 def get_service_port_for_account(service, headers):
119 # assume we're only using a single account, hence return the static port mapping from config.py
120 return config.service_port(service)
121
122
123 def do_start_edge(port, use_ssl, asynchronous=False):
124 try:
125 # start local DNS server, if present
126 from localstack_ext.services import dns_server
127 dns_server.start_servers()
128 except Exception:
129 pass
130
131 # get port and start Edge
132 print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))
133 # use use=True here because our proxy allows both, HTTP and HTTPS traffic
134 proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())
135 proxy.start()
136 if not asynchronous:
137 proxy.join()
138 return proxy
139
140
141 def can_use_sudo():
142 try:
143 run('echo | sudo -S echo', print_error=False)
144 return True
145 except Exception:
146 return False
147
148
149 def ensure_can_use_sudo():
150 if not is_root() and not can_use_sudo():
151 print('Please enter your sudo password (required to configure local network):')
152 run('sudo echo', stdin=True)
153
154
155 def start_edge(port=None, use_ssl=True, asynchronous=False):
156 if not port:
157 port = config.EDGE_PORT
158 if config.EDGE_PORT_HTTP:
159 do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)
160 if port > 1024 or is_root():
161 return do_start_edge(port, use_ssl, asynchronous=asynchronous)
162
163 # process requires priviledged port but we're not root -> try running as sudo
164
165 class Terminator(object):
166
167 def stop(self, quiet=True):
168 try:
169 url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)
170 requests.verify_ssl = False
171 requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})
172 except Exception:
173 pass
174
175 # make sure we can run sudo commands
176 ensure_can_use_sudo()
177
178 # register a signal handler to terminate the sudo process later on
179 TMP_THREADS.append(Terminator())
180
181 # start the process as sudo
182 sudo_cmd = 'sudo '
183 python_cmd = sys.executable
184 cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)
185 process = run(cmd, asynchronous=asynchronous)
186 return process
187
188
189 if __name__ == '__main__':
190 logging.basicConfig()
191 start_edge(int(sys.argv[1]))
192
[end of localstack/services/edge.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/edge.py b/localstack/services/edge.py
--- a/localstack/services/edge.py
+++ b/localstack/services/edge.py
@@ -6,7 +6,7 @@
from requests.models import Response
from localstack import config
from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER
-from localstack.utils.common import run, is_root, TMP_THREADS
+from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes
from localstack.utils.common import safe_requests as requests
from localstack.services.generic_proxy import ProxyListener, GenericProxy
@@ -41,10 +41,18 @@
# detect S3 presigned URLs
if 'AWSAccessKeyId=' in path or 'Signature=' in path:
port = config.PORT_S3
- # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`
# TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first
- if method == 'GET' and '/' in path.strip('/'):
+ stripped = path.strip('/')
+ if method == 'GET' and '/' in stripped:
+ # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`
port = config.PORT_S3
+ if stripped and '/' not in stripped:
+ if method == 'PUT':
+ # assume that this is an S3 PUT bucket request with URL path `/<bucket>`
+ port = config.PORT_S3
+ elif method == 'POST' and to_bytes('key=') in to_bytes(data or ''):
+ # assume that this is an S3 POST request with form parameters in the body
+ port = config.PORT_S3
if not port:
if api in ['', None, '_unknown_']:
|
{"golden_diff": "diff --git a/localstack/services/edge.py b/localstack/services/edge.py\n--- a/localstack/services/edge.py\n+++ b/localstack/services/edge.py\n@@ -6,7 +6,7 @@\n from requests.models import Response\n from localstack import config\n from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\n-from localstack.utils.common import run, is_root, TMP_THREADS\n+from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes\n from localstack.utils.common import safe_requests as requests\n from localstack.services.generic_proxy import ProxyListener, GenericProxy\n \n@@ -41,10 +41,18 @@\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n port = config.PORT_S3\n- # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n- if method == 'GET' and '/' in path.strip('/'):\n+ stripped = path.strip('/')\n+ if method == 'GET' and '/' in stripped:\n+ # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n port = config.PORT_S3\n+ if stripped and '/' not in stripped:\n+ if method == 'PUT':\n+ # assume that this is an S3 PUT bucket request with URL path `/<bucket>`\n+ port = config.PORT_S3\n+ elif method == 'POST' and to_bytes('key=') in to_bytes(data or ''):\n+ # assume that this is an S3 POST request with form parameters in the body\n+ port = config.PORT_S3\n \n if not port:\n if api in ['', None, '_unknown_']:\n", "issue": "S3: POST/PUT to bucket URLs don't route correctly on port 4566\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\r\n\r\n# Type of request: This is a ...\r\n\r\n[x] bug report\r\n[ ] feature request\r\n\r\n# Detailed description\r\n\r\nWe noticed this while converting a service to use Localstack for tests. The service generates S3 presigned post URLs. We're able to create and use presigned S3 URLs on port 4572 (deprecated S3 port), but not 4566, the new shared one. The same issue happens with PUT requests, which is the simplest to repro.\r\n\r\nWhile just using 4572 works, this does force us to use the deprecated port and I figured it was worth opening an issue because of the discrepancy.\r\n\r\n## Expected behavior\r\n\r\nPOST http://localhost:4566/hello (with appropriate form params) should return a 204, in the same way that POST http://localhost:4572/hello does.\r\n\r\nPUT http://localhost:4566/hello should create a bucket and return a 200, in the same way that PUT http://localhost:4572/hello does.\r\n\r\n## Actual behavior\r\n\r\nBoth PUT and POST http://localhost:4566/hello return a 404.\r\n\r\nIn the localstack logs:\r\n2020-05-20T13:37:41:INFO:localstack.services.edge: Unable to find forwarding rule for host \"localhost:4566\", path \"/hello\", target header \"\", auth header \"\"\r\n\r\n# Steps to reproduce\r\n\r\n```bash\r\n$ curl -i -XPUT http://localhost:4572/hello\r\nHTTP/1.1 200 OK\r\nServer: BaseHTTP/0.6 Python/3.8.2\r\nDate: Wed, 20 May 2020 13:43:17 GMT\r\nContent-Type: application/xml; charset=utf-8\r\ncontent-length: 159\r\nAccess-Control-Allow-Origin: *\r\nLast-Modified: Wed, 20 May 2020 13:43:17 GMT\r\nx-amz-request-id: 0ABD347D7A4E0697\r\nx-amz-id-2: MzRISOwyjmnup0ABD347D7A4E06977/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp\r\nAccess-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH\r\nAccess-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging\r\nAccess-Control-Expose-Headers: x-amz-version-id\r\n\r\n<CreateBucketResponse xmlns=\"http://s3.amazonaws.com/doc/2006-03-01\"><CreateBucketResponse><Bucket>hello</Bucket></CreateBucketResponse></CreateBucketResponse>% \r\n\r\n$ curl -i -XPUT http://localhost:4566/hello\r\nHTTP/1.1 404 Not Found\r\nServer: BaseHTTP/0.6 Python/3.8.2\r\nDate: Wed, 20 May 2020 13:43:22 GMT\r\nContent-Length: 21\r\nAccess-Control-Allow-Origin: *\r\nAccess-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH\r\nAccess-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging\r\nAccess-Control-Expose-Headers: x-amz-version-id\r\n\r\n{\"status\": \"running\"}%\r\n```\r\n\r\n## Command used to start LocalStack\r\n\r\n`localstack start`\n", "before_files": [{"content": "import re\nimport os\nimport sys\nimport json\nimport logging\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\nfrom localstack.utils.common import run, is_root, TMP_THREADS\nfrom localstack.utils.common import safe_requests as requests\nfrom localstack.services.generic_proxy import ProxyListener, GenericProxy\n\nLOG = logging.getLogger(__name__)\n\n# Header to indicate that the process should kill itself. This is required because if\n# this process is started as root, then we cannot kill it from a non-root process\nHEADER_KILL_SIGNAL = 'x-localstack-kill'\n\n\nclass ProxyListenerEdge(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n if method == 'OPTIONS':\n return 200\n\n # kill the process if we receive this header\n headers.get(HEADER_KILL_SIGNAL) and os._exit(0)\n\n target = headers.get('x-amz-target', '')\n auth_header = headers.get('authorization', '')\n host = headers.get('host', '')\n headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host\n\n # extract API details\n api, port, path, host = get_api_from_headers(headers, path)\n\n if port and int(port) < 0:\n return 404\n\n if not port:\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n port = config.PORT_S3\n # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n if method == 'GET' and '/' in path.strip('/'):\n port = config.PORT_S3\n\n if not port:\n if api in ['', None, '_unknown_']:\n LOG.info(('Unable to find forwarding rule for host \"%s\", path \"%s\", '\n 'target header \"%s\", auth header \"%s\"') % (host, path, target, auth_header))\n else:\n LOG.info(('Unable to determine forwarding port for API \"%s\" - please '\n 'make sure this API is enabled via the SERVICES configuration') % api)\n response = Response()\n response.status_code = 404\n response._content = '{\"status\": \"running\"}'\n return response\n\n use_ssl = config.USE_SSL\n\n connect_host = '%s:%s' % (config.HOSTNAME, port)\n url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)\n headers['Host'] = host\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n\n response = function(url, data=data, headers=headers, verify=False)\n return response\n\n\ndef get_api_from_headers(headers, path=None):\n target = headers.get('x-amz-target', '')\n host = headers.get('host', '')\n auth_header = headers.get('authorization', '')\n ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')\n path = path or '/'\n\n # initialize result\n result = '_unknown_', 0\n\n # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n try:\n credential_scope = auth_header.split(',')[0].split()[1]\n _, _, _, service, _ = credential_scope.split('/')\n result = service, get_service_port_for_account(service, headers)\n except Exception:\n pass\n\n result_before = result\n\n # Fallback rules and route customizations applied below\n\n if host.endswith('cloudfront.net'):\n path = path or '/'\n result = 'cloudfront', config.PORT_CLOUDFRONT\n elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:\n result = 'cognito-idp', config.PORT_COGNITO_IDP\n elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:\n result = 'cognito-identity', config.PORT_COGNITO_IDENTITY\n elif result[0] == 's3' or re.match(r'.*s3(\\-website)?\\.([^\\.]+\\.)?amazonaws.com', host):\n host = re.sub(r's3-website\\..*\\.amazonaws', 's3.amazonaws', host)\n result = 's3', config.PORT_S3\n elif result[0] == 'states' in auth_header or host.startswith('states.'):\n result = 'stepfunctions', config.PORT_STEPFUNCTIONS\n elif '.execute-api.' in host:\n result = 'apigateway', config.PORT_APIGATEWAY\n elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):\n result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS\n elif ls_target == 'web' or path == '/graph':\n result = 'web', config.PORT_WEB_UI\n\n return result[0], result_before[1] or result[1], path, host\n\n\ndef get_service_port_for_account(service, headers):\n # assume we're only using a single account, hence return the static port mapping from config.py\n return config.service_port(service)\n\n\ndef do_start_edge(port, use_ssl, asynchronous=False):\n try:\n # start local DNS server, if present\n from localstack_ext.services import dns_server\n dns_server.start_servers()\n except Exception:\n pass\n\n # get port and start Edge\n print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))\n # use use=True here because our proxy allows both, HTTP and HTTPS traffic\n proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())\n proxy.start()\n if not asynchronous:\n proxy.join()\n return proxy\n\n\ndef can_use_sudo():\n try:\n run('echo | sudo -S echo', print_error=False)\n return True\n except Exception:\n return False\n\n\ndef ensure_can_use_sudo():\n if not is_root() and not can_use_sudo():\n print('Please enter your sudo password (required to configure local network):')\n run('sudo echo', stdin=True)\n\n\ndef start_edge(port=None, use_ssl=True, asynchronous=False):\n if not port:\n port = config.EDGE_PORT\n if config.EDGE_PORT_HTTP:\n do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)\n if port > 1024 or is_root():\n return do_start_edge(port, use_ssl, asynchronous=asynchronous)\n\n # process requires priviledged port but we're not root -> try running as sudo\n\n class Terminator(object):\n\n def stop(self, quiet=True):\n try:\n url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)\n requests.verify_ssl = False\n requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})\n except Exception:\n pass\n\n # make sure we can run sudo commands\n ensure_can_use_sudo()\n\n # register a signal handler to terminate the sudo process later on\n TMP_THREADS.append(Terminator())\n\n # start the process as sudo\n sudo_cmd = 'sudo '\n python_cmd = sys.executable\n cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)\n process = run(cmd, asynchronous=asynchronous)\n return process\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n start_edge(int(sys.argv[1]))\n", "path": "localstack/services/edge.py"}]}
| 3,592 | 405 |
gh_patches_debug_27244
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1182
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop support for Python 3.3
</issue>
<code>
[start of setup.py]
1 import glob
2 import imp
3 import io
4 import os
5 from os import path
6 import re
7 import sys
8
9 from setuptools import Extension, find_packages, setup
10
11 MYDIR = path.abspath(os.path.dirname(__file__))
12
13 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
14 VERSION = VERSION.__version__
15
16 # NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse
17 REQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']
18
19 try:
20 sys.pypy_version_info
21 PYPY = True
22 except AttributeError:
23 PYPY = False
24
25 if PYPY:
26 CYTHON = False
27 else:
28 try:
29 from Cython.Distutils import build_ext
30 CYTHON = True
31 except ImportError:
32 # TODO(kgriffs): pip now ignores all output, so the user
33 # may not see this message. See also:
34 #
35 # https://github.com/pypa/pip/issues/2732
36 #
37 print('\nNOTE: Cython not installed. '
38 'Falcon will still work fine, but may run '
39 'a bit slower.\n')
40 CYTHON = False
41
42 if CYTHON:
43 def list_modules(dirname):
44 filenames = glob.glob(path.join(dirname, '*.py'))
45
46 module_names = []
47 for name in filenames:
48 module, ext = path.splitext(path.basename(name))
49 if module != '__init__':
50 module_names.append(module)
51
52 return module_names
53
54 package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']
55 ext_modules = [
56 Extension(
57 package + '.' + module,
58 [path.join(*(package.split('.') + [module + '.py']))]
59 )
60 for package in package_names
61 for module in list_modules(path.join(MYDIR, *package.split('.')))
62 ]
63
64 cmdclass = {'build_ext': build_ext}
65
66 else:
67 cmdclass = {}
68 ext_modules = []
69
70
71 def load_description():
72 in_raw = False
73
74 description_lines = []
75
76 # NOTE(kgriffs): PyPI does not support the raw directive
77 for readme_line in io.open('README.rst', 'r', encoding='utf-8'):
78 if readme_line.startswith('.. raw::'):
79 in_raw = True
80 elif in_raw:
81 if readme_line and not re.match('\s', readme_line):
82 in_raw = False
83
84 if not in_raw:
85 description_lines.append(readme_line)
86
87 return ''.join(description_lines)
88
89
90 setup(
91 name='falcon',
92 version=VERSION,
93 description='An unladen web framework for building APIs and app backends.',
94 long_description=load_description(),
95 classifiers=[
96 'Development Status :: 5 - Production/Stable',
97 'Environment :: Web Environment',
98 'Natural Language :: English',
99 'Intended Audience :: Developers',
100 'Intended Audience :: System Administrators',
101 'License :: OSI Approved :: Apache Software License',
102 'Operating System :: MacOS :: MacOS X',
103 'Operating System :: Microsoft :: Windows',
104 'Operating System :: POSIX',
105 'Topic :: Internet :: WWW/HTTP :: WSGI',
106 'Topic :: Software Development :: Libraries :: Application Frameworks',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: Implementation :: CPython',
109 'Programming Language :: Python :: Implementation :: PyPy',
110 'Programming Language :: Python :: 2.7',
111 'Programming Language :: Python :: 3.3',
112 'Programming Language :: Python :: 3.4',
113 'Programming Language :: Python :: 3.5',
114 'Programming Language :: Python :: 3.6',
115 ],
116 keywords='wsgi web api framework rest http cloud',
117 author='Kurt Griffiths',
118 author_email='mail@kgriffs.com',
119 url='http://falconframework.org',
120 license='Apache 2.0',
121 packages=find_packages(exclude=['tests']),
122 include_package_data=True,
123 zip_safe=False,
124 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
125 install_requires=REQUIRES,
126 cmdclass=cmdclass,
127 ext_modules=ext_modules,
128 tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],
129 entry_points={
130 'console_scripts': [
131 'falcon-bench = falcon.cmd.bench:main',
132 'falcon-print-routes = falcon.cmd.print_routes:main'
133 ]
134 }
135 )
136
[end of setup.py]
[start of falcon/media/msgpack.py]
1 from __future__ import absolute_import
2
3 from falcon import errors
4 from falcon.media import BaseHandler
5
6
7 class MessagePackHandler(BaseHandler):
8 """Handler built using the :py:mod:`msgpack` module.
9
10 This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The
11 MessagePack ``bin`` type is used to distinguish between Unicode strings
12 (``str`` on Python 3, ``unicode`` on Python 2) and byte strings
13 (``bytes`` on Python 2/3, or ``str`` on Python 2).
14
15 Note:
16 This handler requires the extra ``msgpack`` package, which must be
17 installed in addition to ``falcon`` from PyPI:
18
19 .. code::
20
21 $ pip install msgpack
22
23 Python 2.6 users will need to use the deprecated ``msgpack-python``
24 package instead, pinned to version 0.4.8.
25 """
26
27 def __init__(self):
28 import msgpack
29
30 self.msgpack = msgpack
31 self.packer = msgpack.Packer(
32 encoding='utf-8',
33 autoreset=True,
34 use_bin_type=True,
35 )
36
37 def deserialize(self, raw):
38 try:
39 # NOTE(jmvrbanac): Using unpackb since we would need to manage
40 # a buffer for Unpacker() which wouldn't gain us much.
41 return self.msgpack.unpackb(raw, encoding='utf-8')
42 except ValueError as err:
43 raise errors.HTTPBadRequest(
44 'Invalid MessagePack',
45 'Could not parse MessagePack body - {0}'.format(err)
46 )
47
48 def serialize(self, media):
49 return self.packer.pack(media)
50
[end of falcon/media/msgpack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py
--- a/falcon/media/msgpack.py
+++ b/falcon/media/msgpack.py
@@ -19,9 +19,6 @@
.. code::
$ pip install msgpack
-
- Python 2.6 users will need to use the deprecated ``msgpack-python``
- package instead, pinned to version 0.4.8.
"""
def __init__(self):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -107,8 +107,9 @@
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
@@ -121,7 +122,7 @@
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=REQUIRES,
cmdclass=cmdclass,
ext_modules=ext_modules,
|
{"golden_diff": "diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py\n--- a/falcon/media/msgpack.py\n+++ b/falcon/media/msgpack.py\n@@ -19,9 +19,6 @@\n .. code::\n \n $ pip install msgpack\n-\n- Python 2.6 users will need to use the deprecated ``msgpack-python``\n- package instead, pinned to version 0.4.8.\n \"\"\"\n \n def __init__(self):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,8 +107,9 @@\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n+ 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3.3',\n+ 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n@@ -121,7 +122,7 @@\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n", "issue": "Drop support for Python 3.3\n\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n if readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match('\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='mail@kgriffs.com',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}, {"content": "from __future__ import absolute_import\n\nfrom falcon import errors\nfrom falcon.media import BaseHandler\n\n\nclass MessagePackHandler(BaseHandler):\n \"\"\"Handler built using the :py:mod:`msgpack` module.\n\n This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The\n MessagePack ``bin`` type is used to distinguish between Unicode strings\n (``str`` on Python 3, ``unicode`` on Python 2) and byte strings\n (``bytes`` on Python 2/3, or ``str`` on Python 2).\n\n Note:\n This handler requires the extra ``msgpack`` package, which must be\n installed in addition to ``falcon`` from PyPI:\n\n .. code::\n\n $ pip install msgpack\n\n Python 2.6 users will need to use the deprecated ``msgpack-python``\n package instead, pinned to version 0.4.8.\n \"\"\"\n\n def __init__(self):\n import msgpack\n\n self.msgpack = msgpack\n self.packer = msgpack.Packer(\n encoding='utf-8',\n autoreset=True,\n use_bin_type=True,\n )\n\n def deserialize(self, raw):\n try:\n # NOTE(jmvrbanac): Using unpackb since we would need to manage\n # a buffer for Unpacker() which wouldn't gain us much.\n return self.msgpack.unpackb(raw, encoding='utf-8')\n except ValueError as err:\n raise errors.HTTPBadRequest(\n 'Invalid MessagePack',\n 'Could not parse MessagePack body - {0}'.format(err)\n )\n\n def serialize(self, media):\n return self.packer.pack(media)\n", "path": "falcon/media/msgpack.py"}]}
| 2,312 | 365 |
gh_patches_debug_6453
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2762
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`python_requires` prevents install of recent urllib3 when using Poetry
### Subject
re: https://github.com/urllib3/urllib3/pull/1309#issuecomment-356650894
I no longer see the suggestion of including `<4` in the link referenced earlier in that PR (content relocated to https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#python-requires). Its inclusion is causing `1.22` to be installed.
I use `poetry` to manage dependencies for https://github.com/cj81499/advent-of-code, and today I got a dependabot alert for urllib3 (CVE-2018-20060).
I tried a `poetry update`, but poetry did not update urllib. `poetry add urllib3@latest` revealed the problem.
```command
> poetry add urllib3@latest
Using version ^1.26.12 for urllib3
Updating dependencies
Resolving dependencies... (0.0s)
The current project's Python requirement (>=3.9) is not compatible with some of the required packages Python requirement:
- urllib3 requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4, so it will not be satisfied for Python >=4
Because urllib3 (1.26.12) requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4
and no versions of urllib3 match >1.26.12,<2.0.0, urllib3 is forbidden.
So, because aoc-cj depends on urllib3 (^1.26.12), version solving failed.
• Check your dependencies Python requirement: The Python requirement can be specified via the `python` or `markers` properties
For urllib3, a possible solution would be to set the `python` property to ">=3.9,<4"
https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies,
https://python-poetry.org/docs/dependency-specification/#using-environment-markers
```
As a result of this change, I've been quietly stuck on `urllib3 v1.22` without even realizing it.
As suggested by poetry, I've changed _my_ python requirement from ">=3.9" to ">=3.9,<4" to be able to update, but I'm not sure there's a good reason to restrict the supported python versions to `<4`. Like I mentioned earlier, it seems this is no longer recommended by the Python docs.
In addition, since it didn't set a `python_requires`, a (future) Python 4 user might try to install urllib3 and get `v1.22`(!), which seems like pretty undesirable behavior to me.
_Originally posted by @cj81499 in https://github.com/urllib3/urllib3/issues/1309#issuecomment-1301625738_
### Environment
poetry 1.2.2
OS macOS-12.6-x86_64-i386-64bit
Python 3.9.15
urllib3 1.22
### Steps to Reproduce
- Install poetry
- Clone https://github.com/cj81499/advent-of-code
- Checkout commit `a8fecd3`
- run `poetry install`
- run `poetry show urllib3`
- Observe that version 1.22 is installed
### Expected Behavior
Latest urllib3 (`1.26.12` at time of writing) is installed
### Actual Behavior
urllib3 `1.22` is installed
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # This file is protected via CODEOWNERS
3
4 import codecs
5 import os
6 import re
7
8 from setuptools import setup
9
10 base_path = os.path.dirname(__file__)
11
12 # Get the version (borrowed from SQLAlchemy)
13 with open(os.path.join(base_path, "src", "urllib3", "_version.py")) as fp:
14 VERSION = (
15 re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S).match(fp.read()).group(1)
16 )
17
18
19 with codecs.open("README.rst", encoding="utf-8") as fp:
20 # Remove reST raw directive from README as they're not allowed on PyPI
21 # Those blocks start with a newline and continue until the next newline
22 mode = None
23 lines = []
24 for line in fp:
25 if line.startswith(".. raw::"):
26 mode = "ignore_nl"
27 elif line == "\n":
28 mode = "wait_nl" if mode == "ignore_nl" else None
29
30 if mode is None:
31 lines.append(line)
32 readme = "".join(lines)
33
34 with codecs.open("CHANGES.rst", encoding="utf-8") as fp:
35 changes = fp.read()
36
37 version = VERSION
38
39 setup(
40 name="urllib3",
41 version=version,
42 description="HTTP library with thread-safe connection pooling, file post, and more.",
43 long_description=u"\n\n".join([readme, changes]),
44 long_description_content_type="text/x-rst",
45 classifiers=[
46 "Environment :: Web Environment",
47 "Intended Audience :: Developers",
48 "License :: OSI Approved :: MIT License",
49 "Operating System :: OS Independent",
50 "Programming Language :: Python",
51 "Programming Language :: Python :: 2",
52 "Programming Language :: Python :: 2.7",
53 "Programming Language :: Python :: 3",
54 "Programming Language :: Python :: 3.6",
55 "Programming Language :: Python :: 3.7",
56 "Programming Language :: Python :: 3.8",
57 "Programming Language :: Python :: 3.9",
58 "Programming Language :: Python :: 3.10",
59 "Programming Language :: Python :: 3.11",
60 "Programming Language :: Python :: Implementation :: CPython",
61 "Programming Language :: Python :: Implementation :: PyPy",
62 "Topic :: Internet :: WWW/HTTP",
63 "Topic :: Software Development :: Libraries",
64 ],
65 keywords="urllib httplib threadsafe filepost http https ssl pooling",
66 author="Andrey Petrov",
67 author_email="andrey.petrov@shazow.net",
68 url="https://urllib3.readthedocs.io/",
69 project_urls={
70 "Documentation": "https://urllib3.readthedocs.io/",
71 "Code": "https://github.com/urllib3/urllib3",
72 "Issue tracker": "https://github.com/urllib3/urllib3/issues",
73 },
74 license="MIT",
75 packages=[
76 "urllib3",
77 "urllib3.packages",
78 "urllib3.packages.backports",
79 "urllib3.contrib",
80 "urllib3.contrib._securetransport",
81 "urllib3.util",
82 ],
83 package_dir={"": "src"},
84 requires=[],
85 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
86 extras_require={
87 "brotli": [
88 "brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'",
89 "brotlicffi>=0.8.0; (os_name != 'nt' or python_version >= '3') and platform_python_implementation != 'CPython'",
90 "brotlipy>=0.6.0; os_name == 'nt' and python_version < '3'",
91 ],
92 "secure": [
93 "pyOpenSSL>=0.14",
94 "cryptography>=1.3.4",
95 "idna>=2.0.0",
96 "certifi",
97 "ipaddress; python_version=='2.7'",
98 "urllib3-secure-extra",
99 ],
100 "socks": ["PySocks>=1.5.6,<2.0,!=1.5.7"],
101 },
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -82,7 +82,7 @@
],
package_dir={"": "src"},
requires=[],
- python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
+ python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
extras_require={
"brotli": [
"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,7 +82,7 @@\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n- python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n+ python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n extras_require={\n \"brotli\": [\n \"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'\",\n", "issue": "`python_requires` prevents install of recent urllib3 when using Poetry\n### Subject\r\n\r\nre: https://github.com/urllib3/urllib3/pull/1309#issuecomment-356650894\r\n\r\nI no longer see the suggestion of including `<4` in the link referenced earlier in that PR (content relocated to https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#python-requires). Its inclusion is causing `1.22` to be installed.\r\n\r\nI use `poetry` to manage dependencies for https://github.com/cj81499/advent-of-code, and today I got a dependabot alert for urllib3 (CVE-2018-20060).\r\n\r\nI tried a `poetry update`, but poetry did not update urllib. `poetry add urllib3@latest` revealed the problem.\r\n\r\n```command\r\n> poetry add urllib3@latest\r\nUsing version ^1.26.12 for urllib3\r\n\r\nUpdating dependencies\r\nResolving dependencies... (0.0s)\r\n\r\nThe current project's Python requirement (>=3.9) is not compatible with some of the required packages Python requirement:\r\n - urllib3 requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4, so it will not be satisfied for Python >=4\r\n\r\nBecause urllib3 (1.26.12) requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\r\n and no versions of urllib3 match >1.26.12,<2.0.0, urllib3 is forbidden.\r\nSo, because aoc-cj depends on urllib3 (^1.26.12), version solving failed.\r\n\r\n \u2022 Check your dependencies Python requirement: The Python requirement can be specified via the `python` or `markers` properties\r\n \r\n For urllib3, a possible solution would be to set the `python` property to \">=3.9,<4\"\r\n\r\n https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies,\r\n https://python-poetry.org/docs/dependency-specification/#using-environment-markers\r\n```\r\n\r\nAs a result of this change, I've been quietly stuck on `urllib3 v1.22` without even realizing it.\r\n\r\nAs suggested by poetry, I've changed _my_ python requirement from \">=3.9\" to \">=3.9,<4\" to be able to update, but I'm not sure there's a good reason to restrict the supported python versions to `<4`. Like I mentioned earlier, it seems this is no longer recommended by the Python docs.\r\n\r\nIn addition, since it didn't set a `python_requires`, a (future) Python 4 user might try to install urllib3 and get `v1.22`(!), which seems like pretty undesirable behavior to me.\r\n\r\n_Originally posted by @cj81499 in https://github.com/urllib3/urllib3/issues/1309#issuecomment-1301625738_\r\n\r\n### Environment\r\n\r\npoetry 1.2.2\r\n\r\nOS macOS-12.6-x86_64-i386-64bit\r\nPython 3.9.15\r\nurllib3 1.22\r\n\r\n### Steps to Reproduce\r\n\r\n- Install poetry\r\n- Clone https://github.com/cj81499/advent-of-code\r\n- Checkout commit `a8fecd3`\r\n- run `poetry install`\r\n- run `poetry show urllib3`\r\n- Observe that version 1.22 is installed\r\n\r\n### Expected Behavior\r\n\r\nLatest urllib3 (`1.26.12` at time of writing) is installed\r\n\r\n### Actual Behavior\r\n\r\nurllib3 `1.22` is installed\n", "before_files": [{"content": "#!/usr/bin/env python\n# This file is protected via CODEOWNERS\n\nimport codecs\nimport os\nimport re\n\nfrom setuptools import setup\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, \"src\", \"urllib3\", \"_version.py\")) as fp:\n VERSION = (\n re.compile(r\"\"\".*__version__ = [\"'](.*?)['\"]\"\"\", re.S).match(fp.read()).group(1)\n )\n\n\nwith codecs.open(\"README.rst\", encoding=\"utf-8\") as fp:\n # Remove reST raw directive from README as they're not allowed on PyPI\n # Those blocks start with a newline and continue until the next newline\n mode = None\n lines = []\n for line in fp:\n if line.startswith(\".. raw::\"):\n mode = \"ignore_nl\"\n elif line == \"\\n\":\n mode = \"wait_nl\" if mode == \"ignore_nl\" else None\n\n if mode is None:\n lines.append(line)\n readme = \"\".join(lines)\n\nwith codecs.open(\"CHANGES.rst\", encoding=\"utf-8\") as fp:\n changes = fp.read()\n\nversion = VERSION\n\nsetup(\n name=\"urllib3\",\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u\"\\n\\n\".join([readme, changes]),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries\",\n ],\n keywords=\"urllib httplib threadsafe filepost http https ssl pooling\",\n author=\"Andrey Petrov\",\n author_email=\"andrey.petrov@shazow.net\",\n url=\"https://urllib3.readthedocs.io/\",\n project_urls={\n \"Documentation\": \"https://urllib3.readthedocs.io/\",\n \"Code\": \"https://github.com/urllib3/urllib3\",\n \"Issue tracker\": \"https://github.com/urllib3/urllib3/issues\",\n },\n license=\"MIT\",\n packages=[\n \"urllib3\",\n \"urllib3.packages\",\n \"urllib3.packages.backports\",\n \"urllib3.contrib\",\n \"urllib3.contrib._securetransport\",\n \"urllib3.util\",\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n extras_require={\n \"brotli\": [\n \"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'\",\n \"brotlicffi>=0.8.0; (os_name != 'nt' or python_version >= '3') and platform_python_implementation != 'CPython'\",\n \"brotlipy>=0.6.0; os_name == 'nt' and python_version < '3'\",\n ],\n \"secure\": [\n \"pyOpenSSL>=0.14\",\n \"cryptography>=1.3.4\",\n \"idna>=2.0.0\",\n \"certifi\",\n \"ipaddress; python_version=='2.7'\",\n \"urllib3-secure-extra\",\n ],\n \"socks\": [\"PySocks>=1.5.6,<2.0,!=1.5.7\"],\n },\n)\n", "path": "setup.py"}]}
| 2,534 | 177 |
gh_patches_debug_29225
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-196
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ZAS Abfallwirtschaft
I'm trying to integrate the ZAS Abfallwirtschaft ICS. But i can't seem to get it to work.
Is there anything i need to configure specifically?
The Website is
https://www.za-sws.de/abfallkalender.cfm
And the generated ICS is
https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S
_(The adress used is a random one, not mine of course)_
My configuration looks like this:
```
waste_collection_schedule:
sources:
- name: ics
args:
url: https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S
customize:
- type: Restmuelltonne 02-woechentl.
alias: Restmüll
icon: mdi:trash-can
- type: Biotonnenwaesche
alias: Biotonnenwäsche
show: false
icon: mdi:flower-outline
- type: Papiertonne 04-woechentl.
alias: Papiertonne
icon: mdi:trash-can-outline
- type: LVP
alias: Gelbe Tonne
icon: mdi:recycle
```
Thanks in advance.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py]
1 import datetime
2 import logging
3 from pathlib import Path
4
5 import requests
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7 from waste_collection_schedule.service.ICS import ICS
8 from waste_collection_schedule.service.ICS_v1 import ICS_v1
9
10 TITLE = "ICS"
11 DESCRIPTION = "Source for ICS based schedules."
12 URL = None
13 TEST_CASES = {
14 "Dortmund, Dudenstr. 5": {
15 "url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4"
16 },
17 "Leipzig, Sandgrubenweg 27": {
18 "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027"
19 },
20 "Ludwigsburg": {
21 "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics"
22 },
23 "Esslingen, Bahnhof": {
24 "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe"
25 },
26 "Test File": {
27 # Path is used here to allow to call the Source from any location.
28 # This is not required in a yaml configuration!
29 "file": str(Path(__file__).resolve().parents[1].joinpath("test/test.ics"))
30 },
31 "Test File (recurring)": {
32 # Path is used here to allow to call the Source from any location.
33 # This is not required in a yaml configuration!
34 "file": str(Path(__file__).resolve().parents[1].joinpath("test/recurring.ics"))
35 },
36 "München, Bahnstr. 11": {
37 "url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}",
38 "version": 1,
39 },
40 "Buxtehude, Am Berg": {
41 "url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics"
42 },
43 # "Hausmüllinfo: ASR Chemnitz": {
44 # "url": "https://asc.hausmuell.info/ics/ics.php",
45 # "method": "POST",
46 # "params": {
47 # "hidden_id_egebiet": 439087,
48 # "input_ort": "Chemnitz",
49 # "input_str": "Straße der Nationen",
50 # "input_hnr": 2,
51 # "hidden_send_btn": "ics",
52 # # "hiddenYear": 2021,
53 # "hidden_id_ort": 10,
54 # "hidden_id_ortsteil": 0,
55 # "hidden_id_str": 17814,
56 # "hidden_id_hnr": 5538100,
57 # "hidden_kalenderart": "privat",
58 # "showBinsBio": "on",
59 # "showBinsRest": "on",
60 # "showBinsRest_rc": "on",
61 # "showBinsPapier": "on",
62 # "showBinsOrganic": "on",
63 # "showBinsXmas": "on",
64 # "showBinsDsd": "on",
65 # "showBinsProb": "on",
66 # },
67 # "year_field": "hiddenYear",
68 # },
69 "Abfall Zollernalbkreis, Ebingen": {
70 "url": "https://www.abfallkalender-zak.de",
71 "params": {
72 "city": "2,3,4",
73 "street": "3",
74 "types[]": [
75 "restmuell",
76 "gelbersack",
77 "papiertonne",
78 "biomuell",
79 "gruenabfall",
80 "schadstoffsammlung",
81 "altpapiersammlung",
82 "schrottsammlung",
83 "weihnachtsbaeume",
84 "elektrosammlung",
85 ],
86 "go_ics": "Download",
87 },
88 "year_field": "year",
89 },
90 "Detmold": {
91 "url": "https://abfuhrkalender.detmold.de/icsmaker.php",
92 "method": "GET",
93 "params": {"strid": 338},
94 "year_field": "year",
95 },
96 "EAW Rheingau Taunus": {
97 "url": "https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429",
98 "split_at": ",",
99 },
100 "Recollect, Ottawa": {
101 "url": "https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics",
102 "split_at": "\\, [and ]*",
103 },
104 "Frankfurt am Main, Achenbachstrasse 3": {
105 "url": "https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics"
106 },
107 "Erlensee, Am Haspel": {
108 "url": "https://sperrmuell.erlensee.de/?type=reminder",
109 "method": "POST",
110 "params": {
111 "street": 8,
112 "eventType[]": [27, 23, 19, 20, 21, 24, 22, 25, 26],
113 "timeframe": 23,
114 "download": "ical",
115 },
116 },
117 }
118
119
120 HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
121 _LOGGER = logging.getLogger(__name__)
122
123
124 class Source:
125 def __init__(
126 self,
127 url=None,
128 file=None,
129 offset=None,
130 params=None,
131 year_field=None,
132 method="GET",
133 split_at=None,
134 version=2,
135 ):
136 self._url = url
137 self._file = file
138 if bool(self._url is not None) == bool(self._file is not None):
139 raise RuntimeError("Specify either url or file")
140 if version == 1:
141 self._ics = ICS_v1(offset=offset, split_at=split_at)
142 else:
143 self._ics = ICS(offset=offset, split_at=split_at)
144 self._params = params
145 self._year_field = year_field # replace this field in params with current year
146 self._method = method # The method to send the params
147
148 def fetch(self):
149 if self._url is not None:
150 if "{%Y}" in self._url or self._year_field is not None:
151 # url contains wildcard or params contains year field
152 now = datetime.datetime.now()
153
154 # replace year in url
155 url = self._url.replace("{%Y}", str(now.year))
156
157 # replace year in params
158 if self._year_field is not None:
159 if self._params is None:
160 raise RuntimeError("year_field specified without params")
161 self._params[self._year_field] = str(now.year)
162
163 entries = self.fetch_url(url, self._params)
164
165 if now.month == 12:
166 # also get data for next year if we are already in december
167 url = self._url.replace("{%Y}", str(now.year + 1))
168 if self._year_field is not None:
169 self._params[self._year_field] = str(now.year + 1)
170
171 try:
172 entries.extend(self.fetch_url(url, self._params))
173 except Exception:
174 # ignore if fetch for next year fails
175 pass
176 return entries
177 else:
178 return self.fetch_url(self._url, self._params)
179 elif self._file is not None:
180 return self.fetch_file(self._file)
181
182 def fetch_url(self, url, params=None):
183 # get ics file
184 if self._method == "GET":
185 r = requests.get(url, params=params, headers=HEADERS)
186 elif self._method == "POST":
187 r = requests.post(url, data=params, headers=HEADERS)
188 else:
189 raise RuntimeError(
190 "Error: unknown method to fetch URL, use GET or POST; got {self._method}"
191 )
192 r.encoding = "utf-8" # requests doesn't guess the encoding correctly
193
194 # check the return code
195 if not r.ok:
196 _LOGGER.error(
197 "Error: the response is not ok; need code 200, but got code %s"
198 % r.status_code
199 )
200 return []
201
202 return self._convert(r.text)
203
204 def fetch_file(self, file):
205 f = open(file)
206 return self._convert(f.read())
207
208 def _convert(self, data):
209 dates = self._ics.convert(data)
210
211 entries = []
212 for d in dates:
213 entries.append(Collection(d[0], d[1]))
214 return entries
215
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
@@ -132,6 +132,7 @@
method="GET",
split_at=None,
version=2,
+ verify_ssl=True,
):
self._url = url
self._file = file
@@ -144,6 +145,7 @@
self._params = params
self._year_field = year_field # replace this field in params with current year
self._method = method # The method to send the params
+ self._verify_ssl = verify_ssl
def fetch(self):
if self._url is not None:
@@ -182,9 +184,13 @@
def fetch_url(self, url, params=None):
# get ics file
if self._method == "GET":
- r = requests.get(url, params=params, headers=HEADERS)
+ r = requests.get(
+ url, params=params, headers=HEADERS, verify=self._verify_ssl
+ )
elif self._method == "POST":
- r = requests.post(url, data=params, headers=HEADERS)
+ r = requests.post(
+ url, data=params, headers=HEADERS, verify=self._verify_ssl
+ )
else:
raise RuntimeError(
"Error: unknown method to fetch URL, use GET or POST; got {self._method}"
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n@@ -132,6 +132,7 @@\n method=\"GET\",\n split_at=None,\n version=2,\n+ verify_ssl=True,\n ):\n self._url = url\n self._file = file\n@@ -144,6 +145,7 @@\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n+ self._verify_ssl = verify_ssl\n \n def fetch(self):\n if self._url is not None:\n@@ -182,9 +184,13 @@\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n- r = requests.get(url, params=params, headers=HEADERS)\n+ r = requests.get(\n+ url, params=params, headers=HEADERS, verify=self._verify_ssl\n+ )\n elif self._method == \"POST\":\n- r = requests.post(url, data=params, headers=HEADERS)\n+ r = requests.post(\n+ url, data=params, headers=HEADERS, verify=self._verify_ssl\n+ )\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n", "issue": "ZAS Abfallwirtschaft\nI'm trying to integrate the ZAS Abfallwirtschaft ICS. But i can't seem to get it to work.\r\nIs there anything i need to configure specifically?\r\n\r\nThe Website is\r\nhttps://www.za-sws.de/abfallkalender.cfm\r\n\r\nAnd the generated ICS is\r\nhttps://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S\r\n_(The adress used is a random one, not mine of course)_\r\n\r\nMy configuration looks like this:\r\n\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: ics\r\n args:\r\n url: https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S\r\n customize:\r\n - type: Restmuelltonne 02-woechentl.\r\n alias: Restm\u00fcll\r\n icon: mdi:trash-can\r\n - type: Biotonnenwaesche\r\n alias: Biotonnenw\u00e4sche\r\n show: false\r\n icon: mdi:flower-outline\r\n - type: Papiertonne 04-woechentl.\r\n alias: Papiertonne\r\n icon: mdi:trash-can-outline\r\n - type: LVP\r\n alias: Gelbe Tonne\r\n icon: mdi:recycle\r\n```\r\n\r\nThanks in advance.\n", "before_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.ICS_v1 import ICS_v1\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\",\n \"version\": 1,\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n split_at=None,\n version=2,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n if version == 1:\n self._ics = ICS_v1(offset=offset, split_at=split_at)\n else:\n self._ics = ICS(offset=offset, split_at=split_at)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url, self._params))\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(url, params=params, headers=HEADERS)\n elif self._method == \"POST\":\n r = requests.post(url, data=params, headers=HEADERS)\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n )\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n\n # check the return code\n if not r.ok:\n _LOGGER.error(\n \"Error: the response is not ok; need code 200, but got code %s\"\n % r.status_code\n )\n return []\n\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}]}
| 4,090 | 365 |
gh_patches_debug_38419
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-1540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fluid/PaddleNLP/sequence_tagging_for_ner 预测问题
用ner模型在预测的时候发现,给预测数据随意标记的label会影响到最终的预测结果;当把数据减少一些(比如10w)后,这个问题就没有了,数据比较大的时候term的预测结果就会往这个随意标记的类别大量倾斜。。。
定义term的类别只有1和0, 预测是时候任意定义所有label=2,发现用较大预测数据集时候,大部分term的标签被预测为2,数据量较少时候,则是正常的
预测结果正常:

预测结果出现问题:

</issue>
<code>
[start of fluid/PaddleNLP/sequence_tagging_for_ner/train.py]
1 from __future__ import print_function
2
3 import os
4 import math
5 import time
6 import numpy as np
7 import six
8
9 import paddle
10 import paddle.fluid as fluid
11
12 import reader
13 from network_conf import ner_net
14 from utils import logger, load_dict
15 from utils_extend import to_lodtensor, get_embedding
16
17
18 def test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list,
19 place):
20 chunk_evaluator.reset()
21 for data in test_data():
22 word = to_lodtensor([x[0] for x in data], place)
23 mark = to_lodtensor([x[1] for x in data], place)
24 target = to_lodtensor([x[2] for x in data], place)
25 rets = exe.run(inference_program,
26 feed={"word": word,
27 "mark": mark,
28 "target": target},
29 fetch_list=test_fetch_list)
30 num_infer = np.array(rets[0])
31 num_label = np.array(rets[1])
32 num_correct = np.array(rets[2])
33 chunk_evaluator.update(num_infer[0], num_label[0], num_correct[0])
34 return chunk_evaluator.eval()
35
36
37 def main(train_data_file,
38 test_data_file,
39 vocab_file,
40 target_file,
41 emb_file,
42 model_save_dir,
43 num_passes,
44 use_gpu,
45 parallel,
46 batch_size=200):
47 if not os.path.exists(model_save_dir):
48 os.mkdir(model_save_dir)
49
50 word_dict = load_dict(vocab_file)
51 label_dict = load_dict(target_file)
52
53 word_vector_values = get_embedding(emb_file)
54
55 word_dict_len = len(word_dict)
56 label_dict_len = len(label_dict)
57
58 if "CE_MODE_X" in os.environ:
59 fluid.default_startup_program().random_seed = 110
60
61 avg_cost, feature_out, word, mark, target = ner_net(
62 word_dict_len, label_dict_len, parallel)
63
64 sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
65 sgd_optimizer.minimize(avg_cost)
66
67 crf_decode = fluid.layers.crf_decoding(
68 input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
69
70 (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
71 num_correct_chunks) = fluid.layers.chunk_eval(
72 input=crf_decode,
73 label=target,
74 chunk_scheme="IOB",
75 num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
76 chunk_evaluator = fluid.metrics.ChunkEvaluator()
77
78 inference_program = fluid.default_main_program().clone(for_test=True)
79 test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]
80
81 if "CE_MODE_X" not in os.environ:
82 train_reader = paddle.batch(
83 paddle.reader.shuffle(
84 reader.data_reader(train_data_file, word_dict, label_dict),
85 buf_size=20000),
86 batch_size=batch_size)
87 test_reader = paddle.batch(
88 paddle.reader.shuffle(
89 reader.data_reader(test_data_file, word_dict, label_dict),
90 buf_size=20000),
91 batch_size=batch_size)
92 else:
93 train_reader = paddle.batch(
94 reader.data_reader(train_data_file, word_dict, label_dict),
95 batch_size=batch_size)
96 test_reader = paddle.batch(
97 reader.data_reader(test_data_file, word_dict, label_dict),
98 batch_size=batch_size)
99
100 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
101 feeder = fluid.DataFeeder(feed_list=[word, mark, target], place=place)
102 exe = fluid.Executor(place)
103
104 exe.run(fluid.default_startup_program())
105
106 embedding_name = 'emb'
107 embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor()
108 embedding_param.set(word_vector_values, place)
109
110 time_begin = time.time()
111 for pass_id in six.moves.xrange(num_passes):
112 chunk_evaluator.reset()
113 for batch_id, data in enumerate(train_reader()):
114 cost_var, nums_infer, nums_label, nums_correct = exe.run(
115 fluid.default_main_program(),
116 feed=feeder.feed(data),
117 fetch_list=[
118 avg_cost, num_infer_chunks, num_label_chunks,
119 num_correct_chunks
120 ])
121 if batch_id % 5 == 0:
122 print("Pass " + str(pass_id) + ", Batch " + str(batch_id) +
123 ", Cost " + str(cost_var[0]))
124 chunk_evaluator.update(nums_infer, nums_label, nums_correct)
125 pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval()
126 print("[TrainSet] pass_id:" + str(pass_id) + " pass_precision:" + str(
127 pass_precision) + " pass_recall:" + str(pass_recall) +
128 " pass_f1_score:" + str(pass_f1_score))
129
130 test_pass_precision, test_pass_recall, test_pass_f1_score = test(
131 exe, chunk_evaluator, inference_program, test_reader,
132 test_fetch_list, place)
133 print("[TestSet] pass_id:" + str(pass_id) + " pass_precision:" + str(
134 test_pass_precision) + " pass_recall:" + str(test_pass_recall) +
135 " pass_f1_score:" + str(test_pass_f1_score))
136
137 save_dirname = os.path.join(model_save_dir, "params_pass_%d" % pass_id)
138 fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],
139 crf_decode, exe)
140
141 if "CE_MODE_X" in os.environ:
142 print("kpis train_precision %f" % pass_precision)
143 print("kpis test_precision %f" % test_pass_precision)
144 print("kpis train_duration %f" % (time.time() - time_begin))
145
146
147 if __name__ == "__main__":
148 main(
149 train_data_file="data/train",
150 test_data_file="data/test",
151 vocab_file="data/vocab.txt",
152 target_file="data/target.txt",
153 emb_file="data/wordVectors.txt",
154 model_save_dir="models",
155 num_passes=2000,
156 use_gpu=False,
157 parallel=False)
158
[end of fluid/PaddleNLP/sequence_tagging_for_ner/train.py]
[start of fluid/PaddleNLP/sequence_tagging_for_ner/infer.py]
1 from __future__ import print_function
2
3 import numpy as np
4 import six
5
6 import paddle
7 import paddle.fluid as fluid
8
9 from network_conf import ner_net
10 import reader
11 from utils import load_dict, load_reverse_dict
12 from utils_extend import to_lodtensor
13
14
15 def infer(model_path, batch_size, test_data_file, vocab_file, target_file,
16 use_gpu):
17 """
18 use the model under model_path to predict the test data, the result will be printed on the screen
19
20 return nothing
21 """
22 word_dict = load_dict(vocab_file)
23 word_reverse_dict = load_reverse_dict(vocab_file)
24
25 label_dict = load_dict(target_file)
26 label_reverse_dict = load_reverse_dict(target_file)
27
28 test_data = paddle.batch(
29 reader.data_reader(test_data_file, word_dict, label_dict),
30 batch_size=batch_size)
31 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
32 exe = fluid.Executor(place)
33
34 inference_scope = fluid.core.Scope()
35 with fluid.scope_guard(inference_scope):
36 [inference_program, feed_target_names,
37 fetch_targets] = fluid.io.load_inference_model(model_path, exe)
38 for data in test_data():
39 word = to_lodtensor([x[0] for x in data], place)
40 mark = to_lodtensor([x[1] for x in data], place)
41 target = to_lodtensor([x[2] for x in data], place)
42 crf_decode = exe.run(
43 inference_program,
44 feed={"word": word,
45 "mark": mark,
46 "target": target},
47 fetch_list=fetch_targets,
48 return_numpy=False)
49 lod_info = (crf_decode[0].lod())[0]
50 np_data = np.array(crf_decode[0])
51 assert len(data) == len(lod_info) - 1
52 for sen_index in six.moves.xrange(len(data)):
53 assert len(data[sen_index][0]) == lod_info[
54 sen_index + 1] - lod_info[sen_index]
55 word_index = 0
56 for tag_index in six.moves.xrange(lod_info[sen_index],
57 lod_info[sen_index + 1]):
58 word = word_reverse_dict[data[sen_index][0][word_index]]
59 gold_tag = label_reverse_dict[data[sen_index][2][
60 word_index]]
61 tag = label_reverse_dict[np_data[tag_index][0]]
62 print(word + "\t" + gold_tag + "\t" + tag)
63 word_index += 1
64 print("")
65
66
67 if __name__ == "__main__":
68 infer(
69 model_path="models/params_pass_0",
70 batch_size=6,
71 test_data_file="data/test",
72 vocab_file="data/vocab.txt",
73 target_file="data/target.txt",
74 use_gpu=False)
75
[end of fluid/PaddleNLP/sequence_tagging_for_ner/infer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py
--- a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py
+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py
@@ -38,12 +38,10 @@
for data in test_data():
word = to_lodtensor([x[0] for x in data], place)
mark = to_lodtensor([x[1] for x in data], place)
- target = to_lodtensor([x[2] for x in data], place)
crf_decode = exe.run(
inference_program,
feed={"word": word,
- "mark": mark,
- "target": target},
+ "mark": mark},
fetch_list=fetch_targets,
return_numpy=False)
lod_info = (crf_decode[0].lod())[0]
diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py
--- a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py
+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py
@@ -61,22 +61,21 @@
avg_cost, feature_out, word, mark, target = ner_net(
word_dict_len, label_dict_len, parallel)
- sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
- sgd_optimizer.minimize(avg_cost)
-
crf_decode = fluid.layers.crf_decoding(
input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
(precision, recall, f1_score, num_infer_chunks, num_label_chunks,
- num_correct_chunks) = fluid.layers.chunk_eval(
- input=crf_decode,
- label=target,
- chunk_scheme="IOB",
- num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
+ num_correct_chunks) = fluid.layers.chunk_eval(
+ input=crf_decode,
+ label=target,
+ chunk_scheme="IOB",
+ num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
chunk_evaluator = fluid.metrics.ChunkEvaluator()
inference_program = fluid.default_main_program().clone(for_test=True)
test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]
+ sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
+ sgd_optimizer.minimize(avg_cost)
if "CE_MODE_X" not in os.environ:
train_reader = paddle.batch(
@@ -135,7 +134,7 @@
" pass_f1_score:" + str(test_pass_f1_score))
save_dirname = os.path.join(model_save_dir, "params_pass_%d" % pass_id)
- fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],
+ fluid.io.save_inference_model(save_dirname, ['word', 'mark'],
crf_decode, exe)
if "CE_MODE_X" in os.environ:
|
{"golden_diff": "diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n--- a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n@@ -38,12 +38,10 @@\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n- target = to_lodtensor([x[2] for x in data], place)\n crf_decode = exe.run(\n inference_program,\n feed={\"word\": word,\n- \"mark\": mark,\n- \"target\": target},\n+ \"mark\": mark},\n fetch_list=fetch_targets,\n return_numpy=False)\n lod_info = (crf_decode[0].lod())[0]\ndiff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n--- a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n@@ -61,22 +61,21 @@\n avg_cost, feature_out, word, mark, target = ner_net(\n word_dict_len, label_dict_len, parallel)\n \n- sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n- sgd_optimizer.minimize(avg_cost)\n-\n crf_decode = fluid.layers.crf_decoding(\n input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n \n (precision, recall, f1_score, num_infer_chunks, num_label_chunks,\n- num_correct_chunks) = fluid.layers.chunk_eval(\n- input=crf_decode,\n- label=target,\n- chunk_scheme=\"IOB\",\n- num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n+ num_correct_chunks) = fluid.layers.chunk_eval(\n+ input=crf_decode,\n+ label=target,\n+ chunk_scheme=\"IOB\",\n+ num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n chunk_evaluator = fluid.metrics.ChunkEvaluator()\n \n inference_program = fluid.default_main_program().clone(for_test=True)\n test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]\n+ sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n+ sgd_optimizer.minimize(avg_cost)\n \n if \"CE_MODE_X\" not in os.environ:\n train_reader = paddle.batch(\n@@ -135,7 +134,7 @@\n \" pass_f1_score:\" + str(test_pass_f1_score))\n \n save_dirname = os.path.join(model_save_dir, \"params_pass_%d\" % pass_id)\n- fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],\n+ fluid.io.save_inference_model(save_dirname, ['word', 'mark'],\n crf_decode, exe)\n \n if \"CE_MODE_X\" in os.environ:\n", "issue": "fluid/PaddleNLP/sequence_tagging_for_ner \u9884\u6d4b\u95ee\u9898\n\u7528ner\u6a21\u578b\u5728\u9884\u6d4b\u7684\u65f6\u5019\u53d1\u73b0\uff0c\u7ed9\u9884\u6d4b\u6570\u636e\u968f\u610f\u6807\u8bb0\u7684label\u4f1a\u5f71\u54cd\u5230\u6700\u7ec8\u7684\u9884\u6d4b\u7ed3\u679c\uff1b\u5f53\u628a\u6570\u636e\u51cf\u5c11\u4e00\u4e9b(\u6bd4\u598210w)\u540e\uff0c\u8fd9\u4e2a\u95ee\u9898\u5c31\u6ca1\u6709\u4e86\uff0c\u6570\u636e\u6bd4\u8f83\u5927\u7684\u65f6\u5019term\u7684\u9884\u6d4b\u7ed3\u679c\u5c31\u4f1a\u5f80\u8fd9\u4e2a\u968f\u610f\u6807\u8bb0\u7684\u7c7b\u522b\u5927\u91cf\u503e\u659c\u3002\u3002\u3002\r\n\r\n\u5b9a\u4e49term\u7684\u7c7b\u522b\u53ea\u67091\u548c0\uff0c \u9884\u6d4b\u662f\u65f6\u5019\u4efb\u610f\u5b9a\u4e49\u6240\u6709label=2\uff0c\u53d1\u73b0\u7528\u8f83\u5927\u9884\u6d4b\u6570\u636e\u96c6\u65f6\u5019\uff0c\u5927\u90e8\u5206term\u7684\u6807\u7b7e\u88ab\u9884\u6d4b\u4e3a2\uff0c\u6570\u636e\u91cf\u8f83\u5c11\u65f6\u5019\uff0c\u5219\u662f\u6b63\u5e38\u7684\r\n\r\n\u9884\u6d4b\u7ed3\u679c\u6b63\u5e38\uff1a\r\n\r\n\r\n\u9884\u6d4b\u7ed3\u679c\u51fa\u73b0\u95ee\u9898\uff1a\r\n\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport os\nimport math\nimport time\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport reader\nfrom network_conf import ner_net\nfrom utils import logger, load_dict\nfrom utils_extend import to_lodtensor, get_embedding\n\n\ndef test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list,\n place):\n chunk_evaluator.reset()\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n target = to_lodtensor([x[2] for x in data], place)\n rets = exe.run(inference_program,\n feed={\"word\": word,\n \"mark\": mark,\n \"target\": target},\n fetch_list=test_fetch_list)\n num_infer = np.array(rets[0])\n num_label = np.array(rets[1])\n num_correct = np.array(rets[2])\n chunk_evaluator.update(num_infer[0], num_label[0], num_correct[0])\n return chunk_evaluator.eval()\n\n\ndef main(train_data_file,\n test_data_file,\n vocab_file,\n target_file,\n emb_file,\n model_save_dir,\n num_passes,\n use_gpu,\n parallel,\n batch_size=200):\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n word_dict = load_dict(vocab_file)\n label_dict = load_dict(target_file)\n\n word_vector_values = get_embedding(emb_file)\n\n word_dict_len = len(word_dict)\n label_dict_len = len(label_dict)\n\n if \"CE_MODE_X\" in os.environ:\n fluid.default_startup_program().random_seed = 110\n\n avg_cost, feature_out, word, mark, target = ner_net(\n word_dict_len, label_dict_len, parallel)\n\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd_optimizer.minimize(avg_cost)\n\n crf_decode = fluid.layers.crf_decoding(\n input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n\n (precision, recall, f1_score, num_infer_chunks, num_label_chunks,\n num_correct_chunks) = fluid.layers.chunk_eval(\n input=crf_decode,\n label=target,\n chunk_scheme=\"IOB\",\n num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n chunk_evaluator = fluid.metrics.ChunkEvaluator()\n\n inference_program = fluid.default_main_program().clone(for_test=True)\n test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]\n\n if \"CE_MODE_X\" not in os.environ:\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(train_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(test_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n else:\n train_reader = paddle.batch(\n reader.data_reader(train_data_file, word_dict, label_dict),\n batch_size=batch_size)\n test_reader = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n feeder = fluid.DataFeeder(feed_list=[word, mark, target], place=place)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n embedding_name = 'emb'\n embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor()\n embedding_param.set(word_vector_values, place)\n\n time_begin = time.time()\n for pass_id in six.moves.xrange(num_passes):\n chunk_evaluator.reset()\n for batch_id, data in enumerate(train_reader()):\n cost_var, nums_infer, nums_label, nums_correct = exe.run(\n fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[\n avg_cost, num_infer_chunks, num_label_chunks,\n num_correct_chunks\n ])\n if batch_id % 5 == 0:\n print(\"Pass \" + str(pass_id) + \", Batch \" + str(batch_id) +\n \", Cost \" + str(cost_var[0]))\n chunk_evaluator.update(nums_infer, nums_label, nums_correct)\n pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval()\n print(\"[TrainSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n pass_precision) + \" pass_recall:\" + str(pass_recall) +\n \" pass_f1_score:\" + str(pass_f1_score))\n\n test_pass_precision, test_pass_recall, test_pass_f1_score = test(\n exe, chunk_evaluator, inference_program, test_reader,\n test_fetch_list, place)\n print(\"[TestSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n test_pass_precision) + \" pass_recall:\" + str(test_pass_recall) +\n \" pass_f1_score:\" + str(test_pass_f1_score))\n\n save_dirname = os.path.join(model_save_dir, \"params_pass_%d\" % pass_id)\n fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],\n crf_decode, exe)\n\n if \"CE_MODE_X\" in os.environ:\n print(\"kpis\ttrain_precision\t%f\" % pass_precision)\n print(\"kpis\ttest_precision\t%f\" % test_pass_precision)\n print(\"kpis\ttrain_duration\t%f\" % (time.time() - time_begin))\n\n\nif __name__ == \"__main__\":\n main(\n train_data_file=\"data/train\",\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n emb_file=\"data/wordVectors.txt\",\n model_save_dir=\"models\",\n num_passes=2000,\n use_gpu=False,\n parallel=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/train.py"}, {"content": "from __future__ import print_function\n\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nfrom network_conf import ner_net\nimport reader\nfrom utils import load_dict, load_reverse_dict\nfrom utils_extend import to_lodtensor\n\n\ndef infer(model_path, batch_size, test_data_file, vocab_file, target_file,\n use_gpu):\n \"\"\"\n use the model under model_path to predict the test data, the result will be printed on the screen\n\n return nothing\n \"\"\"\n word_dict = load_dict(vocab_file)\n word_reverse_dict = load_reverse_dict(vocab_file)\n\n label_dict = load_dict(target_file)\n label_reverse_dict = load_reverse_dict(target_file)\n\n test_data = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(model_path, exe)\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n target = to_lodtensor([x[2] for x in data], place)\n crf_decode = exe.run(\n inference_program,\n feed={\"word\": word,\n \"mark\": mark,\n \"target\": target},\n fetch_list=fetch_targets,\n return_numpy=False)\n lod_info = (crf_decode[0].lod())[0]\n np_data = np.array(crf_decode[0])\n assert len(data) == len(lod_info) - 1\n for sen_index in six.moves.xrange(len(data)):\n assert len(data[sen_index][0]) == lod_info[\n sen_index + 1] - lod_info[sen_index]\n word_index = 0\n for tag_index in six.moves.xrange(lod_info[sen_index],\n lod_info[sen_index + 1]):\n word = word_reverse_dict[data[sen_index][0][word_index]]\n gold_tag = label_reverse_dict[data[sen_index][2][\n word_index]]\n tag = label_reverse_dict[np_data[tag_index][0]]\n print(word + \"\\t\" + gold_tag + \"\\t\" + tag)\n word_index += 1\n print(\"\")\n\n\nif __name__ == \"__main__\":\n infer(\n model_path=\"models/params_pass_0\",\n batch_size=6,\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n use_gpu=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/infer.py"}]}
| 3,323 | 729 |
gh_patches_debug_21223
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-2060
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add dogpile.cache to support libraries
We need to update https://github.com/DataDog/dd-trace-py/blob/master/docs/index.rst after #1123
</issue>
<code>
[start of ddtrace/contrib/dogpile_cache/__init__.py]
1 """
2 Instrument dogpile.cache__ to report all cached lookups.
3
4 This will add spans around the calls to your cache backend (eg. redis, memory,
5 etc). The spans will also include the following tags:
6
7 - key/keys: The key(s) dogpile passed to your backend. Note that this will be
8 the output of the region's ``function_key_generator``, but before any key
9 mangling is applied (ie. the region's ``key_mangler``).
10 - region: Name of the region.
11 - backend: Name of the backend class.
12 - hit: If the key was found in the cache.
13 - expired: If the key is expired. This is only relevant if the key was found.
14
15 While cache tracing will generally already have keys in tags, some caching
16 setups will not have useful tag values - such as when you're using consistent
17 hashing with memcached - the key(s) will appear as a mangled hash.
18 ::
19
20 # Patch before importing dogpile.cache
21 from ddtrace import patch
22 patch(dogpile_cache=True)
23
24 from dogpile.cache import make_region
25
26 region = make_region().configure(
27 "dogpile.cache.pylibmc",
28 expiration_time=3600,
29 arguments={"url": ["127.0.0.1"]},
30 )
31
32 @region.cache_on_arguments()
33 def hello(name):
34 # Some complicated, slow calculation
35 return "Hello, {}".format(name)
36
37 .. __: https://dogpilecache.sqlalchemy.org/
38 """
39 from ...utils.importlib import require_modules
40
41
42 required_modules = ["dogpile.cache"]
43
44 with require_modules(required_modules) as missing_modules:
45 if not missing_modules:
46 from .patch import patch
47 from .patch import unpatch
48
49 __all__ = ["patch", "unpatch"]
50
[end of ddtrace/contrib/dogpile_cache/__init__.py]
[start of ddtrace/monkey.py]
1 """Patch libraries to be automatically instrumented.
2
3 It can monkey patch supported standard libraries and third party modules.
4 A patched module will automatically report spans with its default configuration.
5
6 A library instrumentation can be configured (for instance, to report as another service)
7 using Pin. For that, check its documentation.
8 """
9 import importlib
10 import os
11 import sys
12 import threading
13
14 from ddtrace.vendor.wrapt.importer import when_imported
15
16 from .internal.logger import get_logger
17 from .settings import config
18 from .utils import formats
19
20
21 log = get_logger(__name__)
22
23 # Default set of modules to automatically patch or not
24 PATCH_MODULES = {
25 "asyncio": True,
26 "boto": True,
27 "botocore": True,
28 "bottle": False,
29 "cassandra": True,
30 "celery": True,
31 "consul": True,
32 "django": True,
33 "elasticsearch": True,
34 "algoliasearch": True,
35 "futures": False, # experimental propagation
36 "grpc": True,
37 "mongoengine": True,
38 "mysql": True,
39 "mysqldb": True,
40 "pymysql": True,
41 "psycopg": True,
42 "pylibmc": True,
43 "pymemcache": True,
44 "pymongo": True,
45 "redis": True,
46 "rediscluster": True,
47 "requests": True,
48 "sanic": True,
49 "sqlalchemy": False, # Prefer DB client instrumentation
50 "sqlite3": True,
51 "aiohttp": True, # requires asyncio (Python 3.4+)
52 "aiopg": True,
53 "aiobotocore": False,
54 "httplib": False,
55 "vertica": True,
56 "molten": True,
57 "jinja2": True,
58 "mako": True,
59 "flask": True,
60 "kombu": False,
61 "starlette": True,
62 # Ignore some web framework integrations that might be configured explicitly in code
63 "falcon": False,
64 "pylons": False,
65 "pyramid": False,
66 # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true
67 "logging": config.logs_injection,
68 "pynamodb": True,
69 "pyodbc": True,
70 "fastapi": True,
71 }
72
73 _LOCK = threading.Lock()
74 _PATCHED_MODULES = set()
75
76 # Modules which are patched on first use
77 # DEV: These modules are patched when the user first imports them, rather than
78 # explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`
79 # DEV: This ensures we do not patch a module until it is needed
80 # DEV: <contrib name> => <list of module names that trigger a patch>
81 _PATCH_ON_IMPORT = {
82 "aiohttp": ("aiohttp",),
83 "aiobotocore": ("aiobotocore",),
84 "celery": ("celery",),
85 "flask": ("flask, "),
86 "gevent": ("gevent",),
87 "requests": ("requests",),
88 "botocore": ("botocore",),
89 "elasticsearch": ("elasticsearch",),
90 "pynamodb": ("pynamodb",),
91 }
92
93
94 class PatchException(Exception):
95 """Wraps regular `Exception` class when patching modules"""
96
97 pass
98
99
100 class ModuleNotFoundException(PatchException):
101 pass
102
103
104 def _on_import_factory(module, raise_errors=True):
105 """Factory to create an import hook for the provided module name"""
106
107 def on_import(hook):
108 # Import and patch module
109 path = "ddtrace.contrib.%s" % module
110 imported_module = importlib.import_module(path)
111 imported_module.patch()
112
113 return on_import
114
115
116 def patch_all(**patch_modules):
117 """Automatically patches all available modules.
118
119 In addition to ``patch_modules``, an override can be specified via an
120 environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.
121
122 ``patch_modules`` have the highest precedence for overriding.
123
124 :param dict patch_modules: Override whether particular modules are patched or not.
125
126 >>> patch_all(redis=False, cassandra=False)
127 """
128 modules = PATCH_MODULES.copy()
129
130 # The enabled setting can be overridden by environment variables
131 for module, enabled in modules.items():
132 env_var = "DD_TRACE_%s_ENABLED" % module.upper()
133 if env_var not in os.environ:
134 continue
135
136 override_enabled = formats.asbool(os.environ[env_var])
137 modules[module] = override_enabled
138
139 # Arguments take precedence over the environment and the defaults.
140 modules.update(patch_modules)
141
142 patch(raise_errors=False, **modules)
143
144
145 def patch(raise_errors=True, **patch_modules):
146 """Patch only a set of given modules.
147
148 :param bool raise_errors: Raise error if one patch fail.
149 :param dict patch_modules: List of modules to patch.
150
151 >>> patch(psycopg=True, elasticsearch=True)
152 """
153 modules = [m for (m, should_patch) in patch_modules.items() if should_patch]
154 for module in modules:
155 if module in _PATCH_ON_IMPORT:
156 # If the module has already been imported then patch immediately
157 if module in sys.modules:
158 patch_module(module, raise_errors=raise_errors)
159
160 # Otherwise, add a hook to patch when it is imported for the first time
161 else:
162 # Use factory to create handler to close over `module` and `raise_errors` values from this loop
163 when_imported(module)(_on_import_factory(module, raise_errors))
164
165 # manually add module to patched modules
166 with _LOCK:
167 _PATCHED_MODULES.add(module)
168 else:
169 patch_module(module, raise_errors=raise_errors)
170
171 patched_modules = get_patched_modules()
172 log.info(
173 "patched %s/%s modules (%s)",
174 len(patched_modules),
175 len(modules),
176 ",".join(patched_modules),
177 )
178
179
180 def patch_module(module, raise_errors=True):
181 """Patch a single module
182
183 Returns if the module got properly patched.
184 """
185 try:
186 return _patch_module(module)
187 except ModuleNotFoundException:
188 if raise_errors:
189 raise
190 return False
191 except Exception:
192 if raise_errors:
193 raise
194 log.debug("failed to patch %s", module, exc_info=True)
195 return False
196
197
198 def get_patched_modules():
199 """Get the list of patched modules"""
200 with _LOCK:
201 return sorted(_PATCHED_MODULES)
202
203
204 def _patch_module(module):
205 """_patch_module will attempt to monkey patch the module.
206
207 Returns if the module got patched.
208 Can also raise errors if it fails.
209 """
210 path = "ddtrace.contrib.%s" % module
211 with _LOCK:
212 if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
213 log.debug("already patched: %s", path)
214 return False
215
216 try:
217 imported_module = importlib.import_module(path)
218 except ImportError:
219 # if the import fails, the integration is not available
220 raise PatchException("integration '%s' not available" % path)
221 else:
222 # if patch() is not available in the module, it means
223 # that the library is not installed in the environment
224 if not hasattr(imported_module, "patch"):
225 raise ModuleNotFoundException("module '%s' not installed" % module)
226
227 imported_module.patch()
228 _PATCHED_MODULES.add(module)
229 return True
230
[end of ddtrace/monkey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/dogpile_cache/__init__.py b/ddtrace/contrib/dogpile_cache/__init__.py
--- a/ddtrace/contrib/dogpile_cache/__init__.py
+++ b/ddtrace/contrib/dogpile_cache/__init__.py
@@ -1,12 +1,12 @@
"""
Instrument dogpile.cache__ to report all cached lookups.
-This will add spans around the calls to your cache backend (eg. redis, memory,
+This will add spans around the calls to your cache backend (e.g. redis, memory,
etc). The spans will also include the following tags:
- key/keys: The key(s) dogpile passed to your backend. Note that this will be
the output of the region's ``function_key_generator``, but before any key
- mangling is applied (ie. the region's ``key_mangler``).
+ mangling is applied (i.e. the region's ``key_mangler``).
- region: Name of the region.
- backend: Name of the backend class.
- hit: If the key was found in the cache.
diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -68,6 +68,7 @@
"pynamodb": True,
"pyodbc": True,
"fastapi": True,
+ "dogpile_cache": True,
}
_LOCK = threading.Lock()
|
{"golden_diff": "diff --git a/ddtrace/contrib/dogpile_cache/__init__.py b/ddtrace/contrib/dogpile_cache/__init__.py\n--- a/ddtrace/contrib/dogpile_cache/__init__.py\n+++ b/ddtrace/contrib/dogpile_cache/__init__.py\n@@ -1,12 +1,12 @@\n \"\"\"\n Instrument dogpile.cache__ to report all cached lookups.\n \n-This will add spans around the calls to your cache backend (eg. redis, memory,\n+This will add spans around the calls to your cache backend (e.g. redis, memory,\n etc). The spans will also include the following tags:\n \n - key/keys: The key(s) dogpile passed to your backend. Note that this will be\n the output of the region's ``function_key_generator``, but before any key\n- mangling is applied (ie. the region's ``key_mangler``).\n+ mangling is applied (i.e. the region's ``key_mangler``).\n - region: Name of the region.\n - backend: Name of the backend class.\n - hit: If the key was found in the cache.\ndiff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -68,6 +68,7 @@\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n+ \"dogpile_cache\": True,\n }\n \n _LOCK = threading.Lock()\n", "issue": "Add dogpile.cache to support libraries\nWe need to update https://github.com/DataDog/dd-trace-py/blob/master/docs/index.rst after #1123 \n", "before_files": [{"content": "\"\"\"\nInstrument dogpile.cache__ to report all cached lookups.\n\nThis will add spans around the calls to your cache backend (eg. redis, memory,\netc). The spans will also include the following tags:\n\n- key/keys: The key(s) dogpile passed to your backend. Note that this will be\n the output of the region's ``function_key_generator``, but before any key\n mangling is applied (ie. the region's ``key_mangler``).\n- region: Name of the region.\n- backend: Name of the backend class.\n- hit: If the key was found in the cache.\n- expired: If the key is expired. This is only relevant if the key was found.\n\nWhile cache tracing will generally already have keys in tags, some caching\nsetups will not have useful tag values - such as when you're using consistent\nhashing with memcached - the key(s) will appear as a mangled hash.\n::\n\n # Patch before importing dogpile.cache\n from ddtrace import patch\n patch(dogpile_cache=True)\n\n from dogpile.cache import make_region\n\n region = make_region().configure(\n \"dogpile.cache.pylibmc\",\n expiration_time=3600,\n arguments={\"url\": [\"127.0.0.1\"]},\n )\n\n @region.cache_on_arguments()\n def hello(name):\n # Some complicated, slow calculation\n return \"Hello, {}\".format(name)\n\n.. __: https://dogpilecache.sqlalchemy.org/\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = [\"dogpile.cache\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/dogpile_cache/__init__.py"}, {"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]}
| 3,297 | 325 |
gh_patches_debug_19767
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-624
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support older versions of grpcio
Some attributes, like `trailing_metadata()` don't seem to exist in older versions of grpcio (e.g. `1.34.0`). Ideally we should also provide support for these ones to avoid clashes with older versions of TF (e.g. `2.5.x`).
</issue>
<code>
[start of mlserver/grpc/utils.py]
1 import grpc
2
3 from typing import Callable, Dict, Tuple
4 from fastapi import status
5
6 from grpc import ServicerContext
7
8 from .logging import logger
9 from ..errors import MLServerError
10
11
12 STATUS_CODE_MAPPING = {
13 status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,
14 status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,
15 status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,
16 status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,
17 }
18
19
20 def to_headers(context: ServicerContext) -> Dict[str, str]:
21 metadata = context.invocation_metadata() + context.trailing_metadata()
22 headers = {}
23 for metadatum in metadata:
24 headers[metadatum.key] = metadatum.value
25
26 return headers
27
28
29 def to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:
30 return tuple(headers.items())
31
32
33 def _grpc_status_code(err: MLServerError):
34 return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)
35
36
37 def handle_mlserver_error(f: Callable):
38 async def _inner(self, request, context):
39 try:
40 return await f(self, request, context)
41 except MLServerError as err:
42 logger.error(err)
43 await context.abort(code=_grpc_status_code(err), details=str(err))
44
45 return _inner
46
[end of mlserver/grpc/utils.py]
[start of mlserver/grpc/interceptors.py]
1 from typing import Awaitable, Callable, Tuple
2 from functools import partial
3 from timeit import default_timer
4
5 from grpc.aio import ServerInterceptor, ServicerContext
6 from grpc import HandlerCallDetails, RpcMethodHandler, RpcError, StatusCode
7 from py_grpc_prometheus.prometheus_server_interceptor import (
8 grpc_utils,
9 PromServerInterceptor as _PromServerInterceptor,
10 )
11
12 from .logging import logger
13
14
15 class LoggingInterceptor(ServerInterceptor):
16 def _get_log_message(self, handler_call_details: HandlerCallDetails) -> str:
17 return handler_call_details.method
18
19 async def intercept_service(
20 self,
21 continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],
22 handler_call_details: HandlerCallDetails,
23 ) -> RpcMethodHandler:
24 logger.info(self._get_log_message(handler_call_details))
25 handler = await continuation(handler_call_details)
26 return handler
27
28
29 class PromServerInterceptor(ServerInterceptor):
30 """
31 Simple wrapper around `py_grpc_prometheus` to support `grpc.aio`.
32
33 TODO: Open PR to add support upstream for AsyncIO.
34 """
35
36 def __init__(self, *args, **kwargs):
37 self._interceptor = _PromServerInterceptor(*args, **kwargs)
38 # We need a status code mapping to ensure we can convert from an int:
39 # https://groups.google.com/g/grpc-io/c/EdIXjMEaOyw/m/d3DeqmrJAAAJ
40 self._status_codes = {code.value[0]: code for code in StatusCode}
41
42 async def intercept_service(
43 self,
44 continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],
45 handler_call_details: HandlerCallDetails,
46 ) -> RpcMethodHandler:
47 method_call = grpc_utils.split_method_call(handler_call_details)
48 handler = await continuation(handler_call_details)
49
50 metrics_wrapper = partial(self._metrics_wrapper, method_call)
51 return self._interceptor._wrap_rpc_behavior(handler, metrics_wrapper)
52
53 def _compute_status_code(self, servicer_context: ServicerContext) -> StatusCode:
54 """
55 This method is mostly copied from `py-grpc-prometheus`, with a couple
56 minor changes to avoid using private APIs from ServicerContext which
57 don't exist anymore in `grpc.aio`.
58 To see the original implementation, please check:
59
60 https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L127-L134
61 """
62 # Backwards compatibility for non-aio.
63 # TODO: It's not clear yet how to check whether the context has been
64 # cancelled with aio.
65 if hasattr(servicer_context, "_state"):
66 if servicer_context._state.client == "cancelled":
67 return StatusCode.CANCELLED
68
69 code = servicer_context.code()
70 if code is None:
71 return StatusCode.OK
72
73 # NOTE: With gRPC AIO, the `code` can be a plain integer that needs to
74 # be converted to an actual `StatusCode` entry
75 if isinstance(code, int):
76 if code not in self._status_codes:
77 return StatusCode.UNKNOWN
78
79 return self._status_codes[code]
80
81 return code
82
83 def _metrics_wrapper(
84 self,
85 method_call: Tuple[str, str, str],
86 old_handler: RpcMethodHandler,
87 request_streaming: bool,
88 response_streaming: bool,
89 ):
90 """
91 Port of `py-grpc-prometheus` metrics_wrapper method to work with gRPC's
92 AsyncIO support.
93 To see the original implementation, please check:
94
95 https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L46-L120
96 """
97 grpc_service_name, grpc_method_name, _ = method_call
98
99 async def _new_handler(request_or_iterator, servicer_context: ServicerContext):
100 response_or_iterator = None
101 try:
102 start = default_timer()
103 grpc_type = grpc_utils.get_method_type(
104 request_streaming, response_streaming
105 )
106 try:
107 if request_streaming:
108 request_or_iterator = grpc_utils.wrap_iterator_inc_counter(
109 request_or_iterator,
110 self._interceptor._metrics[
111 "grpc_server_stream_msg_received"
112 ],
113 grpc_type,
114 grpc_service_name,
115 grpc_method_name,
116 )
117 else:
118 self._interceptor._metrics[
119 "grpc_server_started_counter"
120 ].labels(
121 grpc_type=grpc_type,
122 grpc_service=grpc_service_name,
123 grpc_method=grpc_method_name,
124 ).inc()
125
126 # Invoke the original rpc behavior.
127 # NOTE: This is the main change required with respect to
128 # the original implementation in `py-grpc-prometheus`.
129 response_or_iterator = await old_handler(
130 request_or_iterator, servicer_context
131 )
132
133 if response_streaming:
134 sent_metric = self._interceptor._metrics[
135 "grpc_server_stream_msg_sent"
136 ]
137 response_or_iterator = grpc_utils.wrap_iterator_inc_counter(
138 response_or_iterator,
139 sent_metric,
140 grpc_type,
141 grpc_service_name,
142 grpc_method_name,
143 )
144
145 else:
146 self._interceptor.increase_grpc_server_handled_total_counter(
147 grpc_type,
148 grpc_service_name,
149 grpc_method_name,
150 self._compute_status_code(servicer_context).name,
151 )
152 return response_or_iterator
153 except RpcError as e:
154 self._interceptor.increase_grpc_server_handled_total_counter(
155 grpc_type,
156 grpc_service_name,
157 grpc_method_name,
158 self._interceptor._compute_error_code(e).name,
159 )
160 raise e
161
162 finally:
163
164 if not response_streaming:
165 if self._interceptor._legacy:
166 self._interceptor._metrics[
167 "legacy_grpc_server_handled_latency_seconds"
168 ].labels(
169 grpc_type=grpc_type,
170 grpc_service=grpc_service_name,
171 grpc_method=grpc_method_name,
172 ).observe(
173 max(default_timer() - start, 0)
174 )
175 elif self._interceptor._enable_handling_time_histogram:
176 self._interceptor._metrics[
177 "grpc_server_handled_histogram"
178 ].labels(
179 grpc_type=grpc_type,
180 grpc_service=grpc_service_name,
181 grpc_method=grpc_method_name,
182 ).observe(
183 max(default_timer() - start, 0)
184 )
185 except Exception as e: # pylint: disable=broad-except
186 # Allow user to skip the exceptions in order to maintain
187 # the basic functionality in the server
188 # The logging function in exception can be toggled with log_exceptions
189 # in order to suppress the noise in logging
190 if self._interceptor._skip_exceptions:
191 if self._interceptor._log_exceptions:
192 logger.error(e)
193 if response_or_iterator is None:
194 return response_or_iterator
195 return old_handler(request_or_iterator, servicer_context)
196 raise e
197
198 return _new_handler
199
[end of mlserver/grpc/interceptors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlserver/grpc/interceptors.py b/mlserver/grpc/interceptors.py
--- a/mlserver/grpc/interceptors.py
+++ b/mlserver/grpc/interceptors.py
@@ -66,6 +66,9 @@
if servicer_context._state.client == "cancelled":
return StatusCode.CANCELLED
+ if not hasattr(servicer_context, "code"):
+ return StatusCode.OK
+
code = servicer_context.code()
if code is None:
return StatusCode.OK
diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py
--- a/mlserver/grpc/utils.py
+++ b/mlserver/grpc/utils.py
@@ -18,7 +18,11 @@
def to_headers(context: ServicerContext) -> Dict[str, str]:
- metadata = context.invocation_metadata() + context.trailing_metadata()
+ metadata = context.invocation_metadata()
+ if hasattr(context, "trailing_metadata"):
+ # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose
+ # access to the trailing metadata on the service side
+ metadata += context.trailing_metadata()
headers = {}
for metadatum in metadata:
headers[metadatum.key] = metadatum.value
|
{"golden_diff": "diff --git a/mlserver/grpc/interceptors.py b/mlserver/grpc/interceptors.py\n--- a/mlserver/grpc/interceptors.py\n+++ b/mlserver/grpc/interceptors.py\n@@ -66,6 +66,9 @@\n if servicer_context._state.client == \"cancelled\":\n return StatusCode.CANCELLED\n \n+ if not hasattr(servicer_context, \"code\"):\n+ return StatusCode.OK\n+\n code = servicer_context.code()\n if code is None:\n return StatusCode.OK\ndiff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py\n--- a/mlserver/grpc/utils.py\n+++ b/mlserver/grpc/utils.py\n@@ -18,7 +18,11 @@\n \n \n def to_headers(context: ServicerContext) -> Dict[str, str]:\n- metadata = context.invocation_metadata() + context.trailing_metadata()\n+ metadata = context.invocation_metadata()\n+ if hasattr(context, \"trailing_metadata\"):\n+ # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n+ # access to the trailing metadata on the service side\n+ metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n", "issue": "Support older versions of grpcio\nSome attributes, like `trailing_metadata()` don't seem to exist in older versions of grpcio (e.g. `1.34.0`). Ideally we should also provide support for these ones to avoid clashes with older versions of TF (e.g. `2.5.x`). \n", "before_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata() + context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple(headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}, {"content": "from typing import Awaitable, Callable, Tuple\nfrom functools import partial\nfrom timeit import default_timer\n\nfrom grpc.aio import ServerInterceptor, ServicerContext\nfrom grpc import HandlerCallDetails, RpcMethodHandler, RpcError, StatusCode\nfrom py_grpc_prometheus.prometheus_server_interceptor import (\n grpc_utils,\n PromServerInterceptor as _PromServerInterceptor,\n)\n\nfrom .logging import logger\n\n\nclass LoggingInterceptor(ServerInterceptor):\n def _get_log_message(self, handler_call_details: HandlerCallDetails) -> str:\n return handler_call_details.method\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n logger.info(self._get_log_message(handler_call_details))\n handler = await continuation(handler_call_details)\n return handler\n\n\nclass PromServerInterceptor(ServerInterceptor):\n \"\"\"\n Simple wrapper around `py_grpc_prometheus` to support `grpc.aio`.\n\n TODO: Open PR to add support upstream for AsyncIO.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._interceptor = _PromServerInterceptor(*args, **kwargs)\n # We need a status code mapping to ensure we can convert from an int:\n # https://groups.google.com/g/grpc-io/c/EdIXjMEaOyw/m/d3DeqmrJAAAJ\n self._status_codes = {code.value[0]: code for code in StatusCode}\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n method_call = grpc_utils.split_method_call(handler_call_details)\n handler = await continuation(handler_call_details)\n\n metrics_wrapper = partial(self._metrics_wrapper, method_call)\n return self._interceptor._wrap_rpc_behavior(handler, metrics_wrapper)\n\n def _compute_status_code(self, servicer_context: ServicerContext) -> StatusCode:\n \"\"\"\n This method is mostly copied from `py-grpc-prometheus`, with a couple\n minor changes to avoid using private APIs from ServicerContext which\n don't exist anymore in `grpc.aio`.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L127-L134\n \"\"\"\n # Backwards compatibility for non-aio.\n # TODO: It's not clear yet how to check whether the context has been\n # cancelled with aio.\n if hasattr(servicer_context, \"_state\"):\n if servicer_context._state.client == \"cancelled\":\n return StatusCode.CANCELLED\n\n code = servicer_context.code()\n if code is None:\n return StatusCode.OK\n\n # NOTE: With gRPC AIO, the `code` can be a plain integer that needs to\n # be converted to an actual `StatusCode` entry\n if isinstance(code, int):\n if code not in self._status_codes:\n return StatusCode.UNKNOWN\n\n return self._status_codes[code]\n\n return code\n\n def _metrics_wrapper(\n self,\n method_call: Tuple[str, str, str],\n old_handler: RpcMethodHandler,\n request_streaming: bool,\n response_streaming: bool,\n ):\n \"\"\"\n Port of `py-grpc-prometheus` metrics_wrapper method to work with gRPC's\n AsyncIO support.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L46-L120\n \"\"\"\n grpc_service_name, grpc_method_name, _ = method_call\n\n async def _new_handler(request_or_iterator, servicer_context: ServicerContext):\n response_or_iterator = None\n try:\n start = default_timer()\n grpc_type = grpc_utils.get_method_type(\n request_streaming, response_streaming\n )\n try:\n if request_streaming:\n request_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n request_or_iterator,\n self._interceptor._metrics[\n \"grpc_server_stream_msg_received\"\n ],\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n else:\n self._interceptor._metrics[\n \"grpc_server_started_counter\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).inc()\n\n # Invoke the original rpc behavior.\n # NOTE: This is the main change required with respect to\n # the original implementation in `py-grpc-prometheus`.\n response_or_iterator = await old_handler(\n request_or_iterator, servicer_context\n )\n\n if response_streaming:\n sent_metric = self._interceptor._metrics[\n \"grpc_server_stream_msg_sent\"\n ]\n response_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n response_or_iterator,\n sent_metric,\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n\n else:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._compute_status_code(servicer_context).name,\n )\n return response_or_iterator\n except RpcError as e:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._interceptor._compute_error_code(e).name,\n )\n raise e\n\n finally:\n\n if not response_streaming:\n if self._interceptor._legacy:\n self._interceptor._metrics[\n \"legacy_grpc_server_handled_latency_seconds\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n elif self._interceptor._enable_handling_time_histogram:\n self._interceptor._metrics[\n \"grpc_server_handled_histogram\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n except Exception as e: # pylint: disable=broad-except\n # Allow user to skip the exceptions in order to maintain\n # the basic functionality in the server\n # The logging function in exception can be toggled with log_exceptions\n # in order to suppress the noise in logging\n if self._interceptor._skip_exceptions:\n if self._interceptor._log_exceptions:\n logger.error(e)\n if response_or_iterator is None:\n return response_or_iterator\n return old_handler(request_or_iterator, servicer_context)\n raise e\n\n return _new_handler\n", "path": "mlserver/grpc/interceptors.py"}]}
| 3,056 | 282 |
gh_patches_debug_988
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1030
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials
Fresh install.
I tried to exclude 'password' or 'abc123' from valid passwords for the user root
Now file looks like
```
root:x:!root
root:x:!123456
root:x:!password
root:x:*
```
Retarted cowrie, but no way to deny login with root/password credentials
Maybe, some sort of problem with new regexp checking?
</issue>
<code>
[start of src/cowrie/core/auth.py]
1 # Copyright (c) 2009-2014 Upi Tamminen <desaster@gmail.com>
2 # See the COPYRIGHT file for more information
3
4 """
5 This module contains authentication code
6 """
7
8 from __future__ import absolute_import, division
9
10 import json
11 import re
12 from collections import OrderedDict
13 from os import path
14 from random import randint
15
16 from twisted.python import log
17
18 from cowrie.core.config import CONFIG
19
20 _USERDB_DEFAULTS = [
21 'root:x:!root',
22 'root:x:!123456',
23 'root:x:!/honeypot/i',
24 'root:x:*',
25 'richard:x:*',
26 'richard:x:fout',
27 ]
28
29
30 class UserDB(object):
31 """
32 By Walter de Jong <walter@sara.nl>
33 """
34
35 def __init__(self):
36 self.userdb = OrderedDict()
37 self.load()
38
39 def load(self):
40 """
41 load the user db
42 """
43
44 try:
45 with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:
46 userdb = db.readlines()
47 except IOError:
48 log.msg("Could not read etc/userdb.txt, default database activated")
49 userdb = _USERDB_DEFAULTS
50
51 for user in userdb:
52 if not user.startswith('#'):
53 try:
54 login = user.split(':')[0].encode('utf8')
55 password = user.split(':')[2].strip().encode('utf8')
56 except IndexError:
57 continue
58 else:
59 self.adduser(login, password)
60
61 def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):
62 for credentials, policy in self.userdb.items():
63 login, passwd = credentials
64
65 if self.match_rule(login, thelogin):
66 if self.match_rule(passwd, thepasswd):
67 return policy
68
69 return False
70
71 def match_rule(self, rule, input):
72 if type(rule) is bytes:
73 return rule in [b'*', input]
74 else:
75 return bool(rule.search(input))
76
77 def re_or_str(self, rule):
78 """
79 Convert a /.../ type rule to a regex, otherwise return the string as-is
80
81 @param login: rule
82 @type login: bytes
83 """
84 res = re.match(br'/(.+)/(i)?$', rule)
85 if res:
86 return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)
87
88 return rule
89
90 def adduser(self, login, passwd):
91 """
92 All arguments are bytes
93
94 @param login: user id
95 @type login: bytes
96 @param passwd: password
97 @type passwd: bytes
98 """
99 login = self.re_or_str(login)
100
101 if passwd[0] == b'!':
102 policy = False
103 passwd = passwd[1:]
104 else:
105 policy = True
106
107 passwd = self.re_or_str(passwd)
108 self.userdb[(login, passwd)] = policy
109
110
111 class AuthRandom(object):
112 """
113 Alternative class that defines the checklogin() method.
114 Users will be authenticated after a random number of attempts.
115 """
116
117 def __init__(self):
118 # Default values
119 self.mintry, self.maxtry, self.maxcache = 2, 5, 10
120
121 # Are there auth_class parameters?
122 if CONFIG.has_option('honeypot', 'auth_class_parameters'):
123 parameters = CONFIG.get('honeypot', 'auth_class_parameters')
124 parlist = parameters.split(',')
125 if len(parlist) == 3:
126 self.mintry = int(parlist[0])
127 self.maxtry = int(parlist[1])
128 self.maxcache = int(parlist[2])
129
130 if self.maxtry < self.mintry:
131 self.maxtry = self.mintry + 1
132 log.msg("maxtry < mintry, adjusting maxtry to: {}".format(self.maxtry))
133 self.uservar = {}
134 self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))
135 self.loadvars()
136
137 def loadvars(self):
138 """
139 Load user vars from json file
140 """
141 if path.isfile(self.uservar_file):
142 with open(self.uservar_file, 'r') as fp:
143 try:
144 self.uservar = json.load(fp)
145 except Exception:
146 self.uservar = {}
147
148 def savevars(self):
149 """
150 Save the user vars to json file
151 """
152 data = self.uservar
153 # Note: this is subject to races between cowrie logins
154 with open(self.uservar_file, 'w') as fp:
155 json.dump(data, fp)
156
157 def checklogin(self, thelogin, thepasswd, src_ip):
158 """
159 Every new source IP will have to try a random number of times between
160 'mintry' and 'maxtry' before succeeding to login.
161 All username/password combinations must be different.
162 The successful login combination is stored with the IP address.
163 Successful username/passwords pairs are also cached for 'maxcache' times.
164 This is to allow access for returns from different IP addresses.
165 Variables are saved in 'uservar.json' in the data directory.
166 """
167
168 auth = False
169 userpass = str(thelogin) + ':' + str(thepasswd)
170
171 if 'cache' not in self.uservar:
172 self.uservar['cache'] = []
173 cache = self.uservar['cache']
174
175 # Check if it is the first visit from src_ip
176 if src_ip not in self.uservar:
177 self.uservar[src_ip] = {}
178 ipinfo = self.uservar[src_ip]
179 ipinfo['try'] = 0
180 if userpass in cache:
181 log.msg("first time for {}, found cached: {}".format(src_ip, userpass))
182 ipinfo['max'] = 1
183 ipinfo['user'] = str(thelogin)
184 ipinfo['pw'] = str(thepasswd)
185 auth = True
186 self.savevars()
187 return auth
188 else:
189 ipinfo['max'] = randint(self.mintry, self.maxtry)
190 log.msg("first time for {}, need: {}".format(src_ip, ipinfo['max']))
191 else:
192 if userpass in cache:
193 ipinfo = self.uservar[src_ip]
194 log.msg("Found cached: {}".format(userpass))
195 ipinfo['max'] = 1
196 ipinfo['user'] = str(thelogin)
197 ipinfo['pw'] = str(thepasswd)
198 auth = True
199 self.savevars()
200 return auth
201
202 ipinfo = self.uservar[src_ip]
203
204 # Fill in missing variables
205 if 'max' not in ipinfo:
206 ipinfo['max'] = randint(self.mintry, self.maxtry)
207 if 'try' not in ipinfo:
208 ipinfo['try'] = 0
209 if 'tried' not in ipinfo:
210 ipinfo['tried'] = []
211
212 # Don't count repeated username/password combinations
213 if userpass in ipinfo['tried']:
214 log.msg('already tried this combination')
215 self.savevars()
216 return auth
217
218 ipinfo['try'] += 1
219 attempts = ipinfo['try']
220 need = ipinfo['max']
221 log.msg("login attempt: {}".format(attempts))
222
223 # Check if enough login attempts are tried
224 if attempts < need:
225 self.uservar[src_ip]['tried'].append(userpass)
226 elif attempts == need:
227 ipinfo['user'] = str(thelogin)
228 ipinfo['pw'] = str(thepasswd)
229 cache.append(userpass)
230 if len(cache) > self.maxcache:
231 cache.pop(0)
232 auth = True
233 # Returning after successful login
234 elif attempts > need:
235 if 'user' not in ipinfo or 'pw' not in ipinfo:
236 log.msg('return, but username or password not set!!!')
237 ipinfo['tried'].append(userpass)
238 ipinfo['try'] = 1
239 else:
240 log.msg("login return, expect: [{}/{}]".format(ipinfo['user'], ipinfo['pw']))
241 if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:
242 auth = True
243 self.savevars()
244 return auth
245
[end of src/cowrie/core/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py
--- a/src/cowrie/core/auth.py
+++ b/src/cowrie/core/auth.py
@@ -98,7 +98,7 @@
"""
login = self.re_or_str(login)
- if passwd[0] == b'!':
+ if passwd[0] == ord("!"):
policy = False
passwd = passwd[1:]
else:
|
{"golden_diff": "diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py\n--- a/src/cowrie/core/auth.py\n+++ b/src/cowrie/core/auth.py\n@@ -98,7 +98,7 @@\n \"\"\"\n login = self.re_or_str(login)\n \n- if passwd[0] == b'!':\n+ if passwd[0] == ord(\"!\"):\n policy = False\n passwd = passwd[1:]\n else:\n", "issue": "adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials\nFresh install.\r\n\r\nI tried to exclude 'password' or 'abc123' from valid passwords for the user root\r\n\r\nNow file looks like \r\n```\r\nroot:x:!root\r\nroot:x:!123456\r\nroot:x:!password\r\nroot:x:*\r\n```\r\n\r\nRetarted cowrie, but no way to deny login with root/password credentials\r\n\r\nMaybe, some sort of problem with new regexp checking?\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <desaster@gmail.com>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains authentication code\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\n_USERDB_DEFAULTS = [\n 'root:x:!root',\n 'root:x:!123456',\n 'root:x:!/honeypot/i',\n 'root:x:*',\n 'richard:x:*',\n 'richard:x:fout',\n]\n\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <walter@sara.nl>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.load()\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n try:\n with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:\n userdb = db.readlines()\n except IOError:\n log.msg(\"Could not read etc/userdb.txt, default database activated\")\n userdb = _USERDB_DEFAULTS\n\n for user in userdb:\n if not user.startswith('#'):\n try:\n login = user.split(':')[0].encode('utf8')\n password = user.split(':')[2].strip().encode('utf8')\n except IndexError:\n continue\n else:\n self.adduser(login, password)\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n\n @param login: rule\n @type login: bytes\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n def adduser(self, login, passwd):\n \"\"\"\n All arguments are bytes\n\n @param login: user id\n @type login: bytes\n @param passwd: password\n @type passwd: bytes\n \"\"\"\n login = self.re_or_str(login)\n\n if passwd[0] == b'!':\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'r') as fp:\n try:\n self.uservar = json.load(fp)\n except Exception:\n self.uservar = {}\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'w') as fp:\n json.dump(data, fp)\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = str(thelogin) + ':' + str(thepasswd)\n\n if 'cache' not in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n else:\n if userpass in cache:\n ipinfo = self.uservar[src_ip]\n log.msg(\"Found cached: {}\".format(userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if 'max' not in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if 'try' not in ipinfo:\n ipinfo['try'] = 0\n if 'tried' not in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if 'user' not in ipinfo or 'pw' not in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n", "path": "src/cowrie/core/auth.py"}]}
| 3,119 | 102 |
gh_patches_debug_11025
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-3555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't invert gate created from QuantumCircuit.to_gate
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**:
- **Python version**:
- **Operating system**:
### What is the current behavior?
When inverting a gate created from QuantumCircuit.to_gate the following exception is raised:
`ValueError: not enough values to unpack (expected 3, got 2)`
### Steps to reproduce the problem
```
qc = QuantumCircuit(1)
qc.x(0)
gate = qc.to_gate()
gate.inverse()
```
### What is the expected behavior?
### Suggested solutions
</issue>
<code>
[start of qiskit/converters/circuit_to_gate.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Helper function for converting a circuit to a gate"""
16
17 from qiskit.circuit.gate import Gate
18 from qiskit.circuit.quantumregister import QuantumRegister, Qubit
19 from qiskit.exceptions import QiskitError
20
21
22 def circuit_to_gate(circuit, parameter_map=None):
23 """Build a ``Gate`` object from a ``QuantumCircuit``.
24
25 The gate is anonymous (not tied to a named quantum register),
26 and so can be inserted into another circuit. The gate will
27 have the same string name as the circuit.
28
29 Args:
30 circuit (QuantumCircuit): the input circuit.
31 parameter_map (dict): For parameterized circuits, a mapping from
32 parameters in the circuit to parameters to be used in the gate.
33 If None, existing circuit parameters will also parameterize the
34 Gate.
35
36 Raises:
37 QiskitError: if circuit is non-unitary or if
38 parameter_map is not compatible with circuit
39
40 Return:
41 Gate: a Gate equivalent to the action of the
42 input circuit. Upon decomposition, this gate will
43 yield the components comprising the original circuit.
44 """
45 for inst, _, _ in circuit.data:
46 if not isinstance(inst, Gate):
47 raise QiskitError('One or more instructions in this instruction '
48 'cannot be converted to a gate')
49
50 if parameter_map is None:
51 parameter_dict = {p: p for p in circuit.parameters}
52 else:
53 parameter_dict = circuit._unroll_param_dict(parameter_map)
54
55 if parameter_dict.keys() != circuit.parameters:
56 raise QiskitError(('parameter_map should map all circuit parameters. '
57 'Circuit parameters: {}, parameter_map: {}').format(
58 circuit.parameters, parameter_dict))
59
60 gate = Gate(name=circuit.name,
61 num_qubits=sum([qreg.size for qreg in circuit.qregs]),
62 params=sorted(parameter_dict.values(), key=lambda p: p.name))
63 gate.condition = None
64
65 def find_bit_position(bit):
66 """find the index of a given bit (Register, int) within
67 a flat ordered list of bits of the circuit
68 """
69 if isinstance(bit, Qubit):
70 ordered_regs = circuit.qregs
71 else:
72 ordered_regs = circuit.cregs
73 reg_index = ordered_regs.index(bit.register)
74 return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index
75
76 target = circuit.copy()
77 target._substitute_parameters(parameter_dict)
78
79 definition = target.data
80
81 if gate.num_qubits > 0:
82 q = QuantumRegister(gate.num_qubits, 'q')
83
84 definition = list(map(lambda x:
85 (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),
86 definition))
87 gate.definition = definition
88
89 return gate
90
[end of qiskit/converters/circuit_to_gate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py
--- a/qiskit/converters/circuit_to_gate.py
+++ b/qiskit/converters/circuit_to_gate.py
@@ -81,9 +81,14 @@
if gate.num_qubits > 0:
q = QuantumRegister(gate.num_qubits, 'q')
- definition = list(map(lambda x:
- (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),
- definition))
+ # The 3rd parameter in the output tuple) is hard coded to [] because
+ # Gate objects do not have cregs set and we've verified that all
+ # instructions are gates
+ definition = list(map(
+ lambda x: (x[0],
+ list(map(lambda y: q[find_bit_position(y)], x[1])),
+ []),
+ definition))
gate.definition = definition
return gate
|
{"golden_diff": "diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py\n--- a/qiskit/converters/circuit_to_gate.py\n+++ b/qiskit/converters/circuit_to_gate.py\n@@ -81,9 +81,14 @@\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n \n- definition = list(map(lambda x:\n- (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n- definition))\n+ # The 3rd parameter in the output tuple) is hard coded to [] because\n+ # Gate objects do not have cregs set and we've verified that all\n+ # instructions are gates\n+ definition = list(map(\n+ lambda x: (x[0],\n+ list(map(lambda y: q[find_bit_position(y)], x[1])),\n+ []),\n+ definition))\n gate.definition = definition\n \n return gate\n", "issue": "Can't invert gate created from QuantumCircuit.to_gate\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nWhen inverting a gate created from QuantumCircuit.to_gate the following exception is raised:\r\n\r\n`ValueError: not enough values to unpack (expected 3, got 2)`\r\n\r\n\r\n### Steps to reproduce the problem\r\n```\r\nqc = QuantumCircuit(1)\r\nqc.x(0)\r\ngate = qc.to_gate()\r\ngate.inverse()\r\n```\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n definition = list(map(lambda x:\n (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}]}
| 1,622 | 228 |
gh_patches_debug_4796
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-2791
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
menuCreateItem input issue
Why `menuCreateItem` does not require `menu` argument on the schema level, but returns an error if not given? It doesn't look right.
Besides, do we really need to specify `menu` argument, if `parent` is given? It's not like child could be pinned to different menu than its parent.
</issue>
<code>
[start of saleor/graphql/menu/mutations.py]
1 import graphene
2 from graphql_jwt.decorators import permission_required
3
4 from ...menu import models
5 from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
6 from ..product.types import Category, Collection
7 from ..page.types import Page
8 from .types import Menu
9
10
11 class MenuItemInput(graphene.InputObjectType):
12 name = graphene.String(description='Name of the menu item.')
13 url = graphene.String(description='URL of the pointed item.')
14 category = graphene.ID(
15 description='Category to which item points.', name='category')
16 collection = graphene.ID(
17 description='Collection to which item points.', name='collection')
18 page = graphene.ID(
19 description='Page to which item points.', name='page')
20
21
22 class MenuItemCreateInput(MenuItemInput):
23 menu = graphene.ID(
24 description='Menu to which item belongs to.', name='menu')
25 parent = graphene.ID(
26 description='''
27 ID of the parent menu. If empty, menu will be top level
28 menu.''',
29 name='parent')
30
31
32 class MenuInput(graphene.InputObjectType):
33 name = graphene.String(description='Name of the menu.')
34
35
36 class MenuCreateInput(MenuInput):
37 items = graphene.List(
38 MenuItemInput, description='List of menu items.')
39
40
41 class MenuCreate(ModelMutation):
42 class Arguments:
43 input = MenuCreateInput(
44 required=True,
45 description='Fields required to create a menu.')
46
47 class Meta:
48 description = 'Creates a new Menu'
49 model = models.Menu
50
51 @classmethod
52 def user_is_allowed(cls, user, input):
53 return user.has_perm('menu.manage_menus')
54
55 @classmethod
56 def clean_input(cls, info, instance, input, errors):
57 cleaned_input = super().clean_input(info, instance, input, errors)
58 items = []
59 for item in cleaned_input.get('items', []):
60 category = item.get('category')
61 collection = item.get('collection')
62 page = item.get('page')
63 url = item.get('url')
64 if len([i for i in [category, collection, page, url] if i]) > 1:
65 cls.add_error(
66 errors, 'items', 'More than one item provided.')
67 else:
68 if category:
69 category = cls.get_node_or_error(
70 info, category, errors, 'items', only_type=Category)
71 item['category'] = category
72 elif collection:
73 collection = cls.get_node_or_error(
74 info, collection, errors, 'items',
75 only_type=Collection)
76 item['collection'] = collection
77 elif page:
78 page = cls.get_node_or_error(
79 info, page, errors, 'items', only_type=Page)
80 item['page'] = page
81 elif not url:
82 cls.add_error(errors, 'items', 'No menu item provided.')
83 items.append(item)
84 cleaned_input['items'] = items
85 return cleaned_input
86
87 @classmethod
88 def _save_m2m(cls, info, instance, cleaned_data):
89 super()._save_m2m(info, instance, cleaned_data)
90 items = cleaned_data.get('items', [])
91 for item in items:
92 instance.items.create(**item)
93
94
95 class MenuUpdate(ModelMutation):
96 class Arguments:
97 id = graphene.ID(
98 required=True, description='ID of a menu to update.')
99 input = MenuInput(
100 required=True,
101 description='Fields required to update a menu.')
102
103 class Meta:
104 description = 'Updates a menu.'
105 model = models.Menu
106
107 @classmethod
108 def user_is_allowed(cls, user, input):
109 return user.has_perm('menu.manage_menus')
110
111
112 class MenuDelete(ModelDeleteMutation):
113 class Arguments:
114 id = graphene.ID(
115 required=True, description='ID of a menu to delete.')
116
117 class Meta:
118 description = 'Deletes a menu.'
119 model = models.Menu
120
121 @classmethod
122 def user_is_allowed(cls, user, input):
123 return user.has_perm('menu.manage_menus')
124
125
126 class MenuItemCreate(ModelMutation):
127 class Arguments:
128 input = MenuItemCreateInput(
129 required=True,
130 description="""Fields required to update a menu item.
131 Only one of 'url', 'category', 'page', 'collection' is allowed
132 per item""")
133
134 class Meta:
135 description = 'Creates a new Menu'
136 model = models.MenuItem
137
138 @classmethod
139 def user_is_allowed(cls, user, input):
140 return user.has_perm('menu.manage_menus')
141
142 @classmethod
143 def clean_input(cls, info, instance, input, errors):
144 cleaned_input = super().clean_input(info, instance, input, errors)
145 items = [
146 cleaned_input.get('page'), cleaned_input.get('collection'),
147 cleaned_input.get('url'), cleaned_input.get('category')]
148 items = [item for item in items if item is not None]
149 if len(items) > 1:
150 cls.add_error(
151 errors=errors,
152 field='items', message='More than one item provided.')
153 return cleaned_input
154
155
156 class MenuItemUpdate(MenuItemCreate):
157 class Arguments:
158 id = graphene.ID(
159 required=True, description='ID of a menu item to update.')
160 input = MenuItemInput(
161 required=True,
162 description="""Fields required to update a menu item.
163 Only one of 'url', 'category', 'page', 'collection' is allowed
164 per item""")
165
166 class Meta:
167 description = 'Updates a menu item.'
168 model = models.MenuItem
169
170 @classmethod
171 def user_is_allowed(cls, user, input):
172 return user.has_perm('menu.manage_menus')
173
174 @classmethod
175 def construct_instance(cls, instance, cleaned_data):
176 # Only one item can be assigned per menu item
177 instance.page = None
178 instance.collection = None
179 instance.category = None
180 instance.url = None
181 return super().construct_instance(instance, cleaned_data)
182
183
184 class MenuItemDelete(ModelDeleteMutation):
185 class Arguments:
186 id = graphene.ID(
187 required=True, description='ID of a menu item to delete.')
188
189 class Meta:
190 description = 'Deletes a menu item.'
191 model = models.MenuItem
192
193 @classmethod
194 def user_is_allowed(cls, user, input):
195 return user.has_perm('menu.manage_menus')
196
197
198 class NavigationType(graphene.Enum):
199 MAIN = 'main'
200 SECONDARY = 'secondary'
201
202 @property
203 def description(self):
204 if self == NavigationType.MAIN:
205 return 'Main storefront\'s navigation.'
206 return 'Secondary storefront\'s navigation.'
207
208
209 class AssignNavigation(BaseMutation):
210 menu = graphene.Field(Menu, description='Assigned navigation menu.')
211
212 class Arguments:
213 menu = graphene.ID(
214 description='ID of the menu.')
215 navigation_type = NavigationType(
216 description='Type of the navigation bar to assign the menu to.',
217 required=True)
218
219 class Meta:
220 description = 'Assigns storefront\'s navigation menus.'
221
222 @classmethod
223 @permission_required(['menu.manage_menus', 'site.manage_settings'])
224 def mutate(cls, root, info, navigation_type, menu=None):
225 errors = []
226 site_settings = info.context.site.settings
227 if menu is not None:
228 menu = cls.get_node_or_error(
229 info, menu, errors=errors, field='menu')
230 if not errors:
231 if navigation_type == NavigationType.MAIN:
232 site_settings.top_menu = menu
233 site_settings.save(update_fields=['top_menu'])
234 elif navigation_type == NavigationType.SECONDARY:
235 site_settings.bottom_menu = menu
236 site_settings.save(update_fields=['bottom_menu'])
237 else:
238 raise AssertionError(
239 'Unknown navigation type: %s' % navigation_type)
240 return AssignNavigation(menu=menu, errors=errors)
241
[end of saleor/graphql/menu/mutations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py
--- a/saleor/graphql/menu/mutations.py
+++ b/saleor/graphql/menu/mutations.py
@@ -21,7 +21,8 @@
class MenuItemCreateInput(MenuItemInput):
menu = graphene.ID(
- description='Menu to which item belongs to.', name='menu')
+ description='Menu to which item belongs to.', name='menu',
+ required=True)
parent = graphene.ID(
description='''
ID of the parent menu. If empty, menu will be top level
|
{"golden_diff": "diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py\n--- a/saleor/graphql/menu/mutations.py\n+++ b/saleor/graphql/menu/mutations.py\n@@ -21,7 +21,8 @@\n \n class MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n- description='Menu to which item belongs to.', name='menu')\n+ description='Menu to which item belongs to.', name='menu',\n+ required=True)\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n", "issue": "menuCreateItem input issue\nWhy `menuCreateItem` does not require `menu` argument on the schema level, but returns an error if not given? It doesn't look right. \r\nBesides, do we really need to specify `menu` argument, if `parent` is given? It's not like child could be pinned to different menu than its parent.\n", "before_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom ..product.types import Category, Collection\nfrom ..page.types import Page\nfrom .types import Menu\n\n\nclass MenuItemInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu item.')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu')\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuCreateInput(MenuInput):\n items = graphene.List(\n MenuItemInput, description='List of menu items.')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = []\n for item in cleaned_input.get('items', []):\n category = item.get('category')\n collection = item.get('collection')\n page = item.get('page')\n url = item.get('url')\n if len([i for i in [category, collection, page, url] if i]) > 1:\n cls.add_error(\n errors, 'items', 'More than one item provided.')\n else:\n if category:\n category = cls.get_node_or_error(\n info, category, errors, 'items', only_type=Category)\n item['category'] = category\n elif collection:\n collection = cls.get_node_or_error(\n info, collection, errors, 'items',\n only_type=Collection)\n item['collection'] = collection\n elif page:\n page = cls.get_node_or_error(\n info, page, errors, 'items', only_type=Page)\n item['page'] = page\n elif not url:\n cls.add_error(errors, 'items', 'No menu item provided.')\n items.append(item)\n cleaned_input['items'] = items\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n super()._save_m2m(info, instance, cleaned_data)\n items = cleaned_data.get('items', [])\n for item in items:\n instance.items.create(**item)\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}]}
| 2,868 | 131 |
gh_patches_debug_62393
|
rasdani/github-patches
|
git_diff
|
AUTOMATIC1111__stable-diffusion-webui-6772
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Newly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion)
Previously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!
And to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!
### Steps to reproduce the problem
1. Switch to a new model and wait for completion - it takes forever
### What should have happened?
Model load should **never** take over 2 minutes to complete.
### Commit where the problem happens
f8c512478568293155539f616dce26c5e4495055
### What platforms do you use to access UI ?
Windows, Linux
### What browsers do you use to access the UI ?
Google Chrome, Microsoft Edge
### Command Line Arguments
```Shell
--api --xformers
```
### Additional information, context and logs
Console log showing model load taking 142 seconds!
```text
Calculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef
Loading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt
Couldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead
Applying xformers cross attention optimization.
Weights loaded in 142.6s.
```
</issue>
<code>
[start of modules/hashes.py]
1 import hashlib
2 import json
3 import os.path
4
5 import filelock
6
7
8 cache_filename = "cache.json"
9 cache_data = None
10
11
12 def dump_cache():
13 with filelock.FileLock(cache_filename+".lock"):
14 with open(cache_filename, "w", encoding="utf8") as file:
15 json.dump(cache_data, file, indent=4)
16
17
18 def cache(subsection):
19 global cache_data
20
21 if cache_data is None:
22 with filelock.FileLock(cache_filename+".lock"):
23 if not os.path.isfile(cache_filename):
24 cache_data = {}
25 else:
26 with open(cache_filename, "r", encoding="utf8") as file:
27 cache_data = json.load(file)
28
29 s = cache_data.get(subsection, {})
30 cache_data[subsection] = s
31
32 return s
33
34
35 def calculate_sha256(filename):
36 hash_sha256 = hashlib.sha256()
37
38 with open(filename, "rb") as f:
39 for chunk in iter(lambda: f.read(4096), b""):
40 hash_sha256.update(chunk)
41
42 return hash_sha256.hexdigest()
43
44
45 def sha256_from_cache(filename, title):
46 hashes = cache("hashes")
47 ondisk_mtime = os.path.getmtime(filename)
48
49 if title not in hashes:
50 return None
51
52 cached_sha256 = hashes[title].get("sha256", None)
53 cached_mtime = hashes[title].get("mtime", 0)
54
55 if ondisk_mtime > cached_mtime or cached_sha256 is None:
56 return None
57
58 return cached_sha256
59
60
61 def sha256(filename, title):
62 hashes = cache("hashes")
63
64 sha256_value = sha256_from_cache(filename, title)
65 if sha256_value is not None:
66 return sha256_value
67
68 print(f"Calculating sha256 for {filename}: ", end='')
69 sha256_value = calculate_sha256(filename)
70 print(f"{sha256_value}")
71
72 hashes[title] = {
73 "mtime": os.path.getmtime(filename),
74 "sha256": sha256_value,
75 }
76
77 dump_cache()
78
79 return sha256_value
80
81
82
83
84
85
[end of modules/hashes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modules/hashes.py b/modules/hashes.py
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -34,9 +34,10 @@
def calculate_sha256(filename):
hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
with open(filename, "rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
+ for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
|
{"golden_diff": "diff --git a/modules/hashes.py b/modules/hashes.py\n--- a/modules/hashes.py\n+++ b/modules/hashes.py\n@@ -34,9 +34,10 @@\n \r\n def calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n+ blksize = 1024 * 1024\r\n \r\n with open(filename, \"rb\") as f:\r\n- for chunk in iter(lambda: f.read(4096), b\"\"):\r\n+ for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n \r\n return hash_sha256.hexdigest()\n", "issue": "[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues and checked the recent builds/commits\r\n\r\n### What happened?\r\n\r\nNewly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion) \r\n\r\nPreviously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!\r\n\r\nAnd to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Switch to a new model and wait for completion - it takes forever\r\n\r\n\r\n### What should have happened?\r\n\r\nModel load should **never** take over 2 minutes to complete.\r\n\r\n### Commit where the problem happens\r\n\r\nf8c512478568293155539f616dce26c5e4495055\r\n\r\n### What platforms do you use to access UI ?\r\n\r\nWindows, Linux\r\n\r\n### What browsers do you use to access the UI ?\r\n\r\nGoogle Chrome, Microsoft Edge\r\n\r\n### Command Line Arguments\r\n\r\n```Shell\r\n--api --xformers\r\n```\r\n\r\n\r\n### Additional information, context and logs\r\n\r\nConsole log showing model load taking 142 seconds!\r\n\r\n```text\r\nCalculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef\r\nLoading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt\r\nCouldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead\r\nApplying xformers cross attention optimization.\r\nWeights loaded in 142.6s.\r\n```\r\n\n", "before_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}]}
| 1,734 | 148 |
gh_patches_debug_44267
|
rasdani/github-patches
|
git_diff
|
dask__dask-1246
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dask.array regression involving arithmetic with xarray objects
This code worked with dask v0.7.6, but failed when I updated to v0.9.0:
```
In [1]: import numpy as np
In [2]: import xarray as xr
In [3]: import dask.array as da
In [4]: x = xr.core.indexing.LazilyIndexedArray(np.zeros((3, 3)))
In [5]: y = da.from_array(x, (2, 2))
In [6]: (y[:2, :2] * y[:2, :2]).compute()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-82666a5a7d29> in <module>()
----> 1 (y[:2, :2] * y[:2, :2]).compute()
/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(self, **kwargs)
35
36 def compute(self, **kwargs):
---> 37 return compute(self, **kwargs)[0]
38
39 @classmethod
/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(*args, **kwargs)
108 for opt, val in groups.items()])
109 keys = [var._keys() for var in variables]
--> 110 results = get(dsk, keys, **kwargs)
111
112 results_iter = iter(results)
/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, **kwargs)
55 results = get_async(pool.apply_async, len(pool._pool), dsk, result,
56 cache=cache, queue=queue, get_id=_thread_get_id,
---> 57 **kwargs)
58
59 return results
/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py in get_async(apply_async, num_workers, dsk, result, cache, queue, get_id, raise_on_exception, rerun_exceptions_locally, callbacks, **kwargs)
486 _execute_task(task, data) # Re-execute locally
487 else:
--> 488 raise(remote_exception(res, tb))
489 state['cache'][key] = res
490 finish_task(dsk, key, state, results, keyorder.get)
TypeError: unsupported operand type(s) for *: 'LazilyIndexedArray' and 'LazilyIndexedArray'
Traceback
---------
File "/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py", line 267, in execute_task
result = _execute_task(task, data)
File "/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py", line 249, in _execute_task
return func(*args2)
```
LazilyIndexedArray is an object that satisfies some of the usual duck-array API (e.g. shape, dtype, `__getitem__`) and that can be coerced into a NumPy array:
https://github.com/pydata/xarray/blob/v0.7.2/xarray/core/indexing.py#L272
I _think_ it should be valid into to `da.from_array` -- this certainly worked in old versions of dask.
</issue>
<code>
[start of dask/array/optimization.py]
1 from __future__ import absolute_import, division, print_function
2
3 from operator import getitem
4
5 import numpy as np
6 from toolz import valmap, partial
7
8 from .core import getarray
9 from ..core import flatten
10 from ..optimize import cull, fuse, dealias, inline_functions
11 from ..rewrite import RuleSet, RewriteRule
12
13
14 def optimize(dsk, keys, **kwargs):
15 """ Optimize dask for array computation
16
17 1. Cull tasks not necessary to evaluate keys
18 2. Remove full slicing, e.g. x[:]
19 3. Inline fast functions like getitem and np.transpose
20 """
21 keys = list(flatten(keys))
22 fast_functions = kwargs.get('fast_functions',
23 set([getarray, np.transpose]))
24 dsk2, dependencies = cull(dsk, keys)
25 dsk4, dependencies = fuse(dsk2, keys, dependencies)
26 dsk5 = optimize_slices(dsk4)
27 dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,
28 dependencies=dependencies)
29 return dsk6
30
31
32 def optimize_slices(dsk):
33 """ Optimize slices
34
35 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]
36 2. Remove full slices, like x[:] -> x
37
38 See also:
39 fuse_slice_dict
40 """
41 dsk = dsk.copy()
42 for k, v in dsk.items():
43 if type(v) is tuple:
44 if v[0] is getitem or v[0] is getarray:
45 try:
46 func, a, a_index = v
47 except ValueError: # has four elements, includes a lock
48 continue
49 while type(a) is tuple and (a[0] is getitem or a[0] is getarray):
50 try:
51 _, b, b_index = a
52 except ValueError: # has four elements, includes a lock
53 break
54 if (type(a_index) is tuple) != (type(b_index) is tuple):
55 break
56 if ((type(a_index) is tuple) and
57 (len(a_index) != len(b_index)) and
58 any(i is None for i in b_index + a_index)):
59 break
60 try:
61 c_index = fuse_slice(b_index, a_index)
62 except NotImplementedError:
63 break
64 (a, a_index) = (b, c_index)
65 if (type(a_index) is slice and
66 not a_index.start and
67 a_index.stop is None and
68 a_index.step is None):
69 dsk[k] = a
70 elif type(a_index) is tuple and all(type(s) is slice and
71 not s.start and
72 s.stop is None and
73 s.step is None
74 for s in a_index):
75 dsk[k] = a
76 else:
77 dsk[k] = (func, a, a_index)
78 return dsk
79
80
81 def normalize_slice(s):
82 """ Replace Nones in slices with integers
83
84 >>> normalize_slice(slice(None, None, None))
85 slice(0, None, 1)
86 """
87 start, stop, step = s.start, s.stop, s.step
88 if start is None:
89 start = 0
90 if step is None:
91 step = 1
92 if start < 0 or step < 0 or stop is not None and stop < 0:
93 raise NotImplementedError()
94 return slice(start, stop, step)
95
96
97 def fuse_slice(a, b):
98 """ Fuse stacked slices together
99
100 Fuse a pair of repeated slices into a single slice:
101
102 >>> fuse_slice(slice(1000, 2000), slice(10, 15))
103 slice(1010, 1015, None)
104
105 This also works for tuples of slices
106
107 >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),
108 ... (slice(10, 15), [5, 2]))
109 (slice(110, 115, None), [150, 120])
110
111 And a variety of other interesting cases
112
113 >>> fuse_slice(slice(1000, 2000), 10) # integers
114 1010
115
116 >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))
117 slice(1050, 1100, 10)
118
119 >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists
120 [1005, 1010, 1015]
121
122 >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP
123 None
124 """
125 # None only works if the second side is a full slice
126 if a is None and b == slice(None, None):
127 return None
128
129 # Replace None with 0 and one in start and step
130 if isinstance(a, slice):
131 a = normalize_slice(a)
132 if isinstance(b, slice):
133 b = normalize_slice(b)
134
135 if isinstance(a, slice) and isinstance(b, int):
136 if b < 0:
137 raise NotImplementedError()
138 return a.start + b*a.step
139
140 if isinstance(a, slice) and isinstance(b, slice):
141 start = a.start + a.step * b.start
142 if b.stop is not None:
143 stop = a.start + a.step * b.stop
144 else:
145 stop = None
146 if a.stop is not None:
147 if stop is not None:
148 stop = min(a.stop, stop)
149 else:
150 stop = a.stop
151 stop = stop
152 step = a.step * b.step
153 if step == 1:
154 step = None
155 return slice(start, stop, step)
156
157 if isinstance(b, list):
158 return [fuse_slice(a, bb) for bb in b]
159 if isinstance(a, list) and isinstance(b, (int, slice)):
160 return a[b]
161
162 if isinstance(a, tuple) and not isinstance(b, tuple):
163 b = (b,)
164
165 # If given two tuples walk through both, being mindful of uneven sizes
166 # and newaxes
167 if isinstance(a, tuple) and isinstance(b, tuple):
168
169 if (any(isinstance(item, list) for item in a) and
170 any(isinstance(item, list) for item in b)):
171 raise NotImplementedError("Can't handle multiple list indexing")
172
173 j = 0
174 result = list()
175 for i in range(len(a)):
176 # axis ceased to exist or we're out of b
177 if isinstance(a[i], int) or j == len(b):
178 result.append(a[i])
179 continue
180 while b[j] is None: # insert any Nones on the rhs
181 result.append(None)
182 j += 1
183 result.append(fuse_slice(a[i], b[j])) # Common case
184 j += 1
185 while j < len(b): # anything leftover on the right?
186 result.append(b[j])
187 j += 1
188 return tuple(result)
189 raise NotImplementedError()
190
[end of dask/array/optimization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dask/array/optimization.py b/dask/array/optimization.py
--- a/dask/array/optimization.py
+++ b/dask/array/optimization.py
@@ -3,12 +3,10 @@
from operator import getitem
import numpy as np
-from toolz import valmap, partial
from .core import getarray
from ..core import flatten
-from ..optimize import cull, fuse, dealias, inline_functions
-from ..rewrite import RuleSet, RewriteRule
+from ..optimize import cull, fuse, inline_functions
def optimize(dsk, keys, **kwargs):
@@ -20,12 +18,12 @@
"""
keys = list(flatten(keys))
fast_functions = kwargs.get('fast_functions',
- set([getarray, np.transpose]))
+ set([getarray, np.transpose]))
dsk2, dependencies = cull(dsk, keys)
dsk4, dependencies = fuse(dsk2, keys, dependencies)
dsk5 = optimize_slices(dsk4)
dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,
- dependencies=dependencies)
+ dependencies=dependencies)
return dsk6
@@ -38,34 +36,39 @@
See also:
fuse_slice_dict
"""
+ getters = (getarray, getitem)
dsk = dsk.copy()
for k, v in dsk.items():
if type(v) is tuple:
- if v[0] is getitem or v[0] is getarray:
+ if v[0] in getters:
try:
func, a, a_index = v
+ use_getarray = func is getarray
except ValueError: # has four elements, includes a lock
continue
- while type(a) is tuple and (a[0] is getitem or a[0] is getarray):
+ while type(a) is tuple and a[0] in getters:
try:
- _, b, b_index = a
+ f2, b, b_index = a
+ use_getarray |= f2 is getarray
except ValueError: # has four elements, includes a lock
break
if (type(a_index) is tuple) != (type(b_index) is tuple):
break
if ((type(a_index) is tuple) and
- (len(a_index) != len(b_index)) and
- any(i is None for i in b_index + a_index)):
+ (len(a_index) != len(b_index)) and
+ any(i is None for i in b_index + a_index)):
break
try:
c_index = fuse_slice(b_index, a_index)
except NotImplementedError:
break
(a, a_index) = (b, c_index)
- if (type(a_index) is slice and
- not a_index.start and
- a_index.stop is None and
- a_index.step is None):
+ if use_getarray:
+ dsk[k] = (getarray, a, a_index)
+ elif (type(a_index) is slice and
+ not a_index.start and
+ a_index.stop is None and
+ a_index.step is None):
dsk[k] = a
elif type(a_index) is tuple and all(type(s) is slice and
not s.start and
@@ -74,7 +77,7 @@
for s in a_index):
dsk[k] = a
else:
- dsk[k] = (func, a, a_index)
+ dsk[k] = (getitem, a, a_index)
return dsk
@@ -167,7 +170,7 @@
if isinstance(a, tuple) and isinstance(b, tuple):
if (any(isinstance(item, list) for item in a) and
- any(isinstance(item, list) for item in b)):
+ any(isinstance(item, list) for item in b)):
raise NotImplementedError("Can't handle multiple list indexing")
j = 0
|
{"golden_diff": "diff --git a/dask/array/optimization.py b/dask/array/optimization.py\n--- a/dask/array/optimization.py\n+++ b/dask/array/optimization.py\n@@ -3,12 +3,10 @@\n from operator import getitem\n \n import numpy as np\n-from toolz import valmap, partial\n \n from .core import getarray\n from ..core import flatten\n-from ..optimize import cull, fuse, dealias, inline_functions\n-from ..rewrite import RuleSet, RewriteRule\n+from ..optimize import cull, fuse, inline_functions\n \n \n def optimize(dsk, keys, **kwargs):\n@@ -20,12 +18,12 @@\n \"\"\"\n keys = list(flatten(keys))\n fast_functions = kwargs.get('fast_functions',\n- set([getarray, np.transpose]))\n+ set([getarray, np.transpose]))\n dsk2, dependencies = cull(dsk, keys)\n dsk4, dependencies = fuse(dsk2, keys, dependencies)\n dsk5 = optimize_slices(dsk4)\n dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,\n- dependencies=dependencies)\n+ dependencies=dependencies)\n return dsk6\n \n \n@@ -38,34 +36,39 @@\n See also:\n fuse_slice_dict\n \"\"\"\n+ getters = (getarray, getitem)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple:\n- if v[0] is getitem or v[0] is getarray:\n+ if v[0] in getters:\n try:\n func, a, a_index = v\n+ use_getarray = func is getarray\n except ValueError: # has four elements, includes a lock\n continue\n- while type(a) is tuple and (a[0] is getitem or a[0] is getarray):\n+ while type(a) is tuple and a[0] in getters:\n try:\n- _, b, b_index = a\n+ f2, b, b_index = a\n+ use_getarray |= f2 is getarray\n except ValueError: # has four elements, includes a lock\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if ((type(a_index) is tuple) and\n- (len(a_index) != len(b_index)) and\n- any(i is None for i in b_index + a_index)):\n+ (len(a_index) != len(b_index)) and\n+ any(i is None for i in b_index + a_index)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n except NotImplementedError:\n break\n (a, a_index) = (b, c_index)\n- if (type(a_index) is slice and\n- not a_index.start and\n- a_index.stop is None and\n- a_index.step is None):\n+ if use_getarray:\n+ dsk[k] = (getarray, a, a_index)\n+ elif (type(a_index) is slice and\n+ not a_index.start and\n+ a_index.stop is None and\n+ a_index.step is None):\n dsk[k] = a\n elif type(a_index) is tuple and all(type(s) is slice and\n not s.start and\n@@ -74,7 +77,7 @@\n for s in a_index):\n dsk[k] = a\n else:\n- dsk[k] = (func, a, a_index)\n+ dsk[k] = (getitem, a, a_index)\n return dsk\n \n \n@@ -167,7 +170,7 @@\n if isinstance(a, tuple) and isinstance(b, tuple):\n \n if (any(isinstance(item, list) for item in a) and\n- any(isinstance(item, list) for item in b)):\n+ any(isinstance(item, list) for item in b)):\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n \n j = 0\n", "issue": "dask.array regression involving arithmetic with xarray objects\nThis code worked with dask v0.7.6, but failed when I updated to v0.9.0:\n\n```\nIn [1]: import numpy as np\n\nIn [2]: import xarray as xr\n\nIn [3]: import dask.array as da\n\nIn [4]: x = xr.core.indexing.LazilyIndexedArray(np.zeros((3, 3)))\n\nIn [5]: y = da.from_array(x, (2, 2))\n\nIn [6]: (y[:2, :2] * y[:2, :2]).compute()\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-6-82666a5a7d29> in <module>()\n----> 1 (y[:2, :2] * y[:2, :2]).compute()\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(self, **kwargs)\n 35\n 36 def compute(self, **kwargs):\n---> 37 return compute(self, **kwargs)[0]\n 38\n 39 @classmethod\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(*args, **kwargs)\n 108 for opt, val in groups.items()])\n 109 keys = [var._keys() for var in variables]\n--> 110 results = get(dsk, keys, **kwargs)\n 111\n 112 results_iter = iter(results)\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, **kwargs)\n 55 results = get_async(pool.apply_async, len(pool._pool), dsk, result,\n 56 cache=cache, queue=queue, get_id=_thread_get_id,\n---> 57 **kwargs)\n 58\n 59 return results\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py in get_async(apply_async, num_workers, dsk, result, cache, queue, get_id, raise_on_exception, rerun_exceptions_locally, callbacks, **kwargs)\n 486 _execute_task(task, data) # Re-execute locally\n 487 else:\n--> 488 raise(remote_exception(res, tb))\n 489 state['cache'][key] = res\n 490 finish_task(dsk, key, state, results, keyorder.get)\n\nTypeError: unsupported operand type(s) for *: 'LazilyIndexedArray' and 'LazilyIndexedArray'\n\nTraceback\n---------\n File \"/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py\", line 267, in execute_task\n result = _execute_task(task, data)\n File \"/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py\", line 249, in _execute_task\n return func(*args2)\n```\n\nLazilyIndexedArray is an object that satisfies some of the usual duck-array API (e.g. shape, dtype, `__getitem__`) and that can be coerced into a NumPy array:\nhttps://github.com/pydata/xarray/blob/v0.7.2/xarray/core/indexing.py#L272\n\nI _think_ it should be valid into to `da.from_array` -- this certainly worked in old versions of dask.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom operator import getitem\n\nimport numpy as np\nfrom toolz import valmap, partial\n\nfrom .core import getarray\nfrom ..core import flatten\nfrom ..optimize import cull, fuse, dealias, inline_functions\nfrom ..rewrite import RuleSet, RewriteRule\n\n\ndef optimize(dsk, keys, **kwargs):\n \"\"\" Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n keys = list(flatten(keys))\n fast_functions = kwargs.get('fast_functions',\n set([getarray, np.transpose]))\n dsk2, dependencies = cull(dsk, keys)\n dsk4, dependencies = fuse(dsk2, keys, dependencies)\n dsk5 = optimize_slices(dsk4)\n dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,\n dependencies=dependencies)\n return dsk6\n\n\ndef optimize_slices(dsk):\n \"\"\" Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple:\n if v[0] is getitem or v[0] is getarray:\n try:\n func, a, a_index = v\n except ValueError: # has four elements, includes a lock\n continue\n while type(a) is tuple and (a[0] is getitem or a[0] is getarray):\n try:\n _, b, b_index = a\n except ValueError: # has four elements, includes a lock\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if ((type(a_index) is tuple) and\n (len(a_index) != len(b_index)) and\n any(i is None for i in b_index + a_index)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n except NotImplementedError:\n break\n (a, a_index) = (b, c_index)\n if (type(a_index) is slice and\n not a_index.start and\n a_index.stop is None and\n a_index.step is None):\n dsk[k] = a\n elif type(a_index) is tuple and all(type(s) is slice and\n not s.start and\n s.stop is None and\n s.step is None\n for s in a_index):\n dsk[k] = a\n else:\n dsk[k] = (func, a, a_index)\n return dsk\n\n\ndef normalize_slice(s):\n \"\"\" Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef fuse_slice(a, b):\n \"\"\" Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, int):\n if b < 0:\n raise NotImplementedError()\n return a.start + b*a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n stop = stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (int, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n if (any(isinstance(item, list) for item in a) and\n any(isinstance(item, list) for item in b)):\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], int) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()\n", "path": "dask/array/optimization.py"}]}
| 3,427 | 908 |
gh_patches_debug_605
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1664
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.71
On the docket:
+ [x] Secure Pex against sha1 collision attacks. #1662
+ [x] Problems building venvs from certain distributions. #1656
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.70"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.70"
+__version__ = "2.1.71"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.70\"\n+__version__ = \"2.1.71\"\n", "issue": "Release 2.1.71\nOn the docket:\r\n+ [x] Secure Pex against sha1 collision attacks. #1662 \r\n+ [x] Problems building venvs from certain distributions. #1656\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.70\"\n", "path": "pex/version.py"}]}
| 634 | 96 |
gh_patches_debug_881
|
rasdani/github-patches
|
git_diff
|
python__peps-3263
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infra: Check Sphinx warnings on CI
This is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces.
It checks three things:
1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails
* -> To prevent regressions
2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore`
* To help us incrementally improve over time
3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning
* To make them more visible, and give us the opportunity to fix them
I've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance.
<!-- readthedocs-preview pep-previews start -->
----
:books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/
<!-- readthedocs-preview pep-previews end -->
</issue>
<code>
[start of conf.py]
1 # This file is placed in the public domain or under the
2 # CC0-1.0-Universal license, whichever is more permissive.
3
4 """Configuration for building PEPs using Sphinx."""
5
6 from pathlib import Path
7 import sys
8
9 sys.path.append(str(Path(".").absolute()))
10
11 # -- Project information -----------------------------------------------------
12
13 project = "PEPs"
14 master_doc = "contents"
15
16 # -- General configuration ---------------------------------------------------
17
18 # Add any Sphinx extension module names here, as strings.
19 extensions = [
20 "pep_sphinx_extensions",
21 "sphinx.ext.intersphinx",
22 "sphinx.ext.githubpages",
23 ]
24
25 # The file extensions of source files. Sphinx uses these suffixes as sources.
26 source_suffix = {
27 ".rst": "pep",
28 ".txt": "pep",
29 }
30
31 # List of patterns (relative to source dir) to ignore when looking for source files.
32 include_patterns = [
33 # Required for Sphinx
34 "contents.rst",
35 # PEP files
36 "pep-????.rst",
37 "pep-????.txt",
38 # PEP ancillary files
39 "pep-????/*.rst",
40 # Documentation
41 "docs/*.rst",
42 ]
43 exclude_patterns = [
44 # PEP Template
45 "pep-0012/pep-NNNN.rst",
46 ]
47
48 # Intersphinx configuration
49 intersphinx_mapping = {
50 'python': ('https://docs.python.org/3/', None),
51 'packaging': ('https://packaging.python.org/en/latest/', None),
52 'devguide': ('https://devguide.python.org/', None),
53 'py3.11': ('https://docs.python.org/3.11/', None),
54 'py3.12': ('https://docs.python.org/3.12/', None),
55 }
56 intersphinx_disabled_reftypes = []
57
58 # -- Options for HTML output -------------------------------------------------
59
60 # HTML output settings
61 html_math_renderer = "maths_to_html" # Maths rendering
62
63 # Theme settings
64 html_theme_path = ["pep_sphinx_extensions"]
65 html_theme = "pep_theme" # The actual theme directory (child of html_theme_path)
66 html_use_index = False # Disable index (we use PEP 0)
67 html_style = "" # must be defined here or in theme.conf, but is unused
68 html_permalinks = False # handled in the PEPContents transform
69 html_baseurl = "https://peps.python.org" # to create the CNAME file
70 gettext_auto_build = False # speed-ups
71
72 templates_path = ["pep_sphinx_extensions/pep_theme/templates"] # Theme template relative paths from `confdir`
73
[end of conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -45,6 +45,9 @@
"pep-0012/pep-NNNN.rst",
]
+# Warn on missing references
+nitpicky = True
+
# Intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
|
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -45,6 +45,9 @@\n \"pep-0012/pep-NNNN.rst\",\n ]\n \n+# Warn on missing references\n+nitpicky = True\n+\n # Intersphinx configuration\n intersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n", "issue": "Infra: Check Sphinx warnings on CI\nThis is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces.\r\n\r\nIt checks three things:\r\n\r\n1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails\r\n * -> To prevent regressions\r\n\r\n2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore`\r\n * To help us incrementally improve over time\r\n\r\n3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning\r\n * To make them more visible, and give us the opportunity to fix them\r\n\r\nI've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance.\r\n\r\n\r\n\r\n<!-- readthedocs-preview pep-previews start -->\r\n----\n:books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/\n\r\n<!-- readthedocs-preview pep-previews end -->\n", "before_files": [{"content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"PEPs\"\nmaster_doc = \"contents\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n \"pep_sphinx_extensions\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.githubpages\",\n]\n\n# The file extensions of source files. Sphinx uses these suffixes as sources.\nsource_suffix = {\n \".rst\": \"pep\",\n \".txt\": \"pep\",\n}\n\n# List of patterns (relative to source dir) to ignore when looking for source files.\ninclude_patterns = [\n # Required for Sphinx\n \"contents.rst\",\n # PEP files\n \"pep-????.rst\",\n \"pep-????.txt\",\n # PEP ancillary files\n \"pep-????/*.rst\",\n # Documentation\n \"docs/*.rst\",\n]\nexclude_patterns = [\n # PEP Template\n \"pep-0012/pep-NNNN.rst\",\n]\n\n# Intersphinx configuration\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'packaging': ('https://packaging.python.org/en/latest/', None),\n 'devguide': ('https://devguide.python.org/', None),\n 'py3.11': ('https://docs.python.org/3.11/', None),\n 'py3.12': ('https://docs.python.org/3.12/', None),\n}\nintersphinx_disabled_reftypes = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# HTML output settings\nhtml_math_renderer = \"maths_to_html\" # Maths rendering\n\n# Theme settings\nhtml_theme_path = [\"pep_sphinx_extensions\"]\nhtml_theme = \"pep_theme\" # The actual theme directory (child of html_theme_path)\nhtml_use_index = False # Disable index (we use PEP 0)\nhtml_style = \"\" # must be defined here or in theme.conf, but is unused\nhtml_permalinks = False # handled in the PEPContents transform\nhtml_baseurl = \"https://peps.python.org\" # to create the CNAME file\ngettext_auto_build = False # speed-ups\n\ntemplates_path = [\"pep_sphinx_extensions/pep_theme/templates\"] # Theme template relative paths from `confdir`\n", "path": "conf.py"}]}
| 1,525 | 92 |
gh_patches_debug_32502
|
rasdani/github-patches
|
git_diff
|
TileDB-Inc__TileDB-Py-263
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multi_index not accepting tuples
The [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:)
```
>>> import tiledb, tiledb.cloud
>>> A = tiledb.DenseArray("tiledb://TileDB-Inc/quickstart_dense", ctx=tiledb.cloud.Ctx())
>>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 121, in __getitem__
ranges = self.getitem_ranges(idx)
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 109, in getitem_ranges
subranges = sel_to_subranges(sel)
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 60, in sel_to_subranges
raise TypeError("Unsupported selection ")
TypeError: Unsupported selection
```
It would be great if multi_index and UDF apply accepted the same index types.
</issue>
<code>
[start of tiledb/multirange_indexing.py]
1 import tiledb
2 from tiledb import Array, ArraySchema
3 import os, numpy as np
4 import sys, weakref
5
6 try:
7 from tiledb.libtiledb import multi_index
8 except:
9 from tiledb.indexing import multi_index
10
11 def _index_as_tuple(idx):
12 """Forces scalar index objects to a tuple representation"""
13 if isinstance(idx, tuple):
14 return idx
15 return (idx,)
16
17 def mr_dense_result_shape(ranges, base_shape = None):
18 # assumptions: len(ranges) matches number of dims
19 if base_shape is not None:
20 assert len(ranges) == len(base_shape), "internal error: mismatched shapes"
21
22 new_shape = list()
23 for i,rr in enumerate(ranges):
24 if rr != ():
25 m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))
26 new_shape.append(np.sum(m))
27 else:
28 if base_shape is None:
29 raise ValueError("Missing required base_shape for whole-dimension slices")
30 # empty range covers dimension
31 new_shape.append(base_shape[i])
32
33 return tuple(new_shape)
34
35 def mr_dense_result_numel(ranges):
36 return np.prod(mr_dense_result_shape(ranges))
37
38 def sel_to_subranges(dim_sel):
39 if isinstance(dim_sel, list):
40 dim_sel = tuple(dim_sel)
41 elif not isinstance(dim_sel, tuple):
42 dim_sel = (dim_sel,)
43
44 subranges = list()
45 for range in dim_sel:
46 if np.isscalar(range):
47 subranges.append( (range, range) )
48 elif isinstance(range, slice):
49 if range.step is not None:
50 raise ValueError("Stepped slice ranges are not supported")
51 elif range.start is None and range.stop is None:
52 # ':' full slice
53 pass
54 else:
55 subranges.append( (range.start, range.stop) )
56 elif isinstance(range, list):
57 for el in range:
58 subranges.append( (el, el) )
59 else:
60 raise TypeError("Unsupported selection ")
61
62 return tuple(subranges)
63
64
65 class MultiRangeIndexer(object):
66 """
67 Implements multi-range / outer / orthogonal indexing.
68
69 """
70 # for cython
71 # comment out for Python 2 :/
72 #array: Array
73 #schema: ArraySchema
74 #def __init__(self, array: Array, query = None):
75
76 def __init__(self, array, query = None):
77 if not issubclass(type(array), tiledb.Array):
78 raise ValueError("Internal error: MultiRangeIndexer expected tiledb.Array")
79 self.array_ref = weakref.ref(array)
80 self.schema = array.schema
81 self.query = query
82
83 @property
84 def array(self):
85 assert self.array_ref() is not None, \
86 "Internal error: invariant violation (indexing call w/ dead array_ref)"
87 return self.array_ref()
88
89 @classmethod
90 def __test_init__(cls, array):
91 """
92 Internal helper method for testing getitem range calculation.
93 :param array:
94 :return:
95 """
96 m = cls.__new__(cls)
97 m.array_ref = weakref.ref(array)
98 m.schema = array.schema
99 m.query = None
100 return m
101
102 def getitem_ranges(self, idx):
103 dom = self.schema.domain
104 ndim = dom.ndim
105 idx = _index_as_tuple(idx)
106
107 ranges = list()
108 for i,sel in enumerate(idx):
109 subranges = sel_to_subranges(sel)
110 ranges.append(subranges)
111
112 # extend the list to ndim
113 if len(ranges) < ndim:
114 ranges.extend([ tuple() for _ in range(ndim-len(ranges))])
115
116 rval = tuple(ranges)
117 return rval
118
119 def __getitem__(self, idx):
120 # implements multi-range / outer / orthogonal indexing
121 ranges = self.getitem_ranges(idx)
122
123 dom = self.schema.domain
124 attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))
125
126 coords = None
127 if self.query is not None:
128 # if we are called via Query object, then we need to respect Query semantics
129 attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all
130 coords = self.query.coords
131
132 # TODO order
133 result_dict = multi_index(
134 self.array,
135 attr_names,
136 ranges,
137 coords=coords
138 )
139
140 if self.schema.sparse:
141 return result_dict
142 else:
143 result_shape = mr_dense_result_shape(ranges, self.schema.shape)
144 for arr in result_dict.values():
145 # TODO check/test layout
146 arr.shape = result_shape
147 return result_dict
[end of tiledb/multirange_indexing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py
--- a/tiledb/multirange_indexing.py
+++ b/tiledb/multirange_indexing.py
@@ -8,11 +8,6 @@
except:
from tiledb.indexing import multi_index
-def _index_as_tuple(idx):
- """Forces scalar index objects to a tuple representation"""
- if isinstance(idx, tuple):
- return idx
- return (idx,)
def mr_dense_result_shape(ranges, base_shape = None):
# assumptions: len(ranges) matches number of dims
@@ -36,11 +31,6 @@
return np.prod(mr_dense_result_shape(ranges))
def sel_to_subranges(dim_sel):
- if isinstance(dim_sel, list):
- dim_sel = tuple(dim_sel)
- elif not isinstance(dim_sel, tuple):
- dim_sel = (dim_sel,)
-
subranges = list()
for range in dim_sel:
if np.isscalar(range):
@@ -53,6 +43,8 @@
pass
else:
subranges.append( (range.start, range.stop) )
+ elif isinstance(range, tuple):
+ subranges.extend((range,))
elif isinstance(range, list):
for el in range:
subranges.append( (el, el) )
@@ -102,10 +94,16 @@
def getitem_ranges(self, idx):
dom = self.schema.domain
ndim = dom.ndim
- idx = _index_as_tuple(idx)
+
+ if isinstance(idx, tuple):
+ idx = list(idx)
+ else:
+ idx = [idx]
ranges = list()
for i,sel in enumerate(idx):
+ if not isinstance(sel, list):
+ sel = [sel]
subranges = sel_to_subranges(sel)
ranges.append(subranges)
|
{"golden_diff": "diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py\n--- a/tiledb/multirange_indexing.py\n+++ b/tiledb/multirange_indexing.py\n@@ -8,11 +8,6 @@\n except:\n from tiledb.indexing import multi_index\n \n-def _index_as_tuple(idx):\n- \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n- if isinstance(idx, tuple):\n- return idx\n- return (idx,)\n \n def mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n@@ -36,11 +31,6 @@\n return np.prod(mr_dense_result_shape(ranges))\n \n def sel_to_subranges(dim_sel):\n- if isinstance(dim_sel, list):\n- dim_sel = tuple(dim_sel)\n- elif not isinstance(dim_sel, tuple):\n- dim_sel = (dim_sel,)\n-\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n@@ -53,6 +43,8 @@\n pass\n else:\n subranges.append( (range.start, range.stop) )\n+ elif isinstance(range, tuple):\n+ subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n@@ -102,10 +94,16 @@\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n- idx = _index_as_tuple(idx)\n+\n+ if isinstance(idx, tuple):\n+ idx = list(idx)\n+ else:\n+ idx = [idx]\n \n ranges = list()\n for i,sel in enumerate(idx):\n+ if not isinstance(sel, list):\n+ sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n", "issue": "multi_index not accepting tuples\nThe [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:)\r\n\r\n\r\n```\r\n>>> import tiledb, tiledb.cloud\r\n>>> A = tiledb.DenseArray(\"tiledb://TileDB-Inc/quickstart_dense\", ctx=tiledb.cloud.Ctx())\r\n>>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a']\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 121, in __getitem__\r\n ranges = self.getitem_ranges(idx)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 109, in getitem_ranges\r\n subranges = sel_to_subranges(sel)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 60, in sel_to_subranges\r\n raise TypeError(\"Unsupported selection \")\r\nTypeError: Unsupported selection \r\n```\r\n\r\nIt would be great if multi_index and UDF apply accepted the same index types.\n", "before_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema\nimport os, numpy as np\nimport sys, weakref\n\ntry:\n from tiledb.libtiledb import multi_index\nexcept:\n from tiledb.indexing import multi_index\n\ndef _index_as_tuple(idx):\n \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n if isinstance(idx, tuple):\n return idx\n return (idx,)\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n if isinstance(dim_sel, list):\n dim_sel = tuple(dim_sel)\n elif not isinstance(dim_sel, tuple):\n dim_sel = (dim_sel,)\n\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n # for cython\n # comment out for Python 2 :/\n #array: Array\n #schema: ArraySchema\n #def __init__(self, array: Array, query = None):\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n idx = _index_as_tuple(idx)\n\n ranges = list()\n for i,sel in enumerate(idx):\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n # TODO order\n result_dict = multi_index(\n self.array,\n attr_names,\n ranges,\n coords=coords\n )\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict", "path": "tiledb/multirange_indexing.py"}]}
| 2,213 | 427 |
gh_patches_debug_7533
|
rasdani/github-patches
|
git_diff
|
stephenmcd__mezzanine-780
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SuspiciousOperation in asset_proxy URLs
Since switching our site from HTTP to HTTPS, I've been getting the following error on all of TinyMCE's dialog windows.
```
SuspiciousOperation: Attempted access to '/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm' denied.
Stacktrace (most recent call last):
File "django/core/handlers/base.py", line 115, in get_response
response = callback(request, *callback_args, **callback_kwargs)
File "django/contrib/admin/views/decorators.py", line 17, in _checklogin
return view_func(request, *args, **kwargs)
File "mezzanine/core/views.py", line 143, in static_proxy
path = finders.find(url)
File "django/contrib/staticfiles/finders.py", line 239, in find
result = finder.find(path, all=all)
File "django/contrib/staticfiles/finders.py", line 147, in find
match = self.find_in_app(app, path)
File "django/contrib/staticfiles/finders.py", line 166, in find_in_app
if storage.exists(path):
File "django/core/files/storage.py", line 243, in exists
return os.path.exists(self.path(name))
File "django/core/files/storage.py", line 259, in path
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
```
The URLs that are causing them look like this:
```
https://www.example.com/asset_proxy/?u=https://www.example.com/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm
```
</issue>
<code>
[start of mezzanine/core/views.py]
1 import os
2 from urlparse import urljoin, urlparse
3
4 from django.contrib import admin
5 from django.contrib.admin.views.decorators import staff_member_required
6 from django.contrib.admin.options import ModelAdmin
7 from django.contrib.staticfiles import finders
8 from django.core.exceptions import PermissionDenied
9 from django.core.urlresolvers import reverse
10 from django.db.models import get_model
11 from django.http import (HttpResponse, HttpResponseServerError,
12 HttpResponseNotFound)
13 from django.shortcuts import redirect
14 from django.template import RequestContext
15 from django.template.loader import get_template
16 from django.utils.translation import ugettext_lazy as _
17 from django.views.decorators.csrf import requires_csrf_token
18
19 from mezzanine.conf import settings
20 from mezzanine.core.forms import get_edit_form
21 from mezzanine.core.models import Displayable, SitePermission
22 from mezzanine.utils.cache import add_cache_bypass
23 from mezzanine.utils.views import is_editable, paginate, render, set_cookie
24 from mezzanine.utils.sites import has_site_permission
25
26
27 def set_device(request, device=""):
28 """
29 Sets a device name in a cookie when a user explicitly wants to go
30 to the site for a particular device (eg mobile).
31 """
32 response = redirect(add_cache_bypass(request.GET.get("next") or "/"))
33 set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365)
34 return response
35
36
37 @staff_member_required
38 def set_site(request):
39 """
40 Put the selected site ID into the session - posted to from
41 the "Select site" drop-down in the header of the admin. The
42 site ID is then used in favour of the current request's
43 domain in ``mezzanine.core.managers.CurrentSiteManager``.
44 """
45 site_id = int(request.GET["site_id"])
46 if not request.user.is_superuser:
47 try:
48 SitePermission.objects.get(user=request.user, sites=site_id)
49 except SitePermission.DoesNotExist:
50 raise PermissionDenied
51 request.session["site_id"] = site_id
52 admin_url = reverse("admin:index")
53 next = request.GET.get("next") or admin_url
54 # Don't redirect to a change view for an object that won't exist
55 # on the selected site - go to its list view instead.
56 if next.startswith(admin_url):
57 parts = next.split("/")
58 if len(parts) > 4 and parts[4].isdigit():
59 next = "/".join(parts[:4])
60 return redirect(next)
61
62
63 def direct_to_template(request, template, extra_context=None, **kwargs):
64 """
65 Replacement for Django's ``direct_to_template`` that uses
66 ``TemplateResponse`` via ``mezzanine.utils.views.render``.
67 """
68 context = extra_context or {}
69 context["params"] = kwargs
70 for (key, value) in context.items():
71 if callable(value):
72 context[key] = value()
73 return render(request, template, context)
74
75
76 @staff_member_required
77 def edit(request):
78 """
79 Process the inline editing form.
80 """
81 model = get_model(request.POST["app"], request.POST["model"])
82 obj = model.objects.get(id=request.POST["id"])
83 form = get_edit_form(obj, request.POST["fields"], data=request.POST,
84 files=request.FILES)
85 if not (is_editable(obj, request) and has_site_permission(request.user)):
86 response = _("Permission denied")
87 elif form.is_valid():
88 form.save()
89 model_admin = ModelAdmin(model, admin.site)
90 message = model_admin.construct_change_message(request, form, None)
91 model_admin.log_change(request, obj, message)
92 response = ""
93 else:
94 response = form.errors.values()[0][0]
95 return HttpResponse(unicode(response))
96
97
98 def search(request, template="search_results.html"):
99 """
100 Display search results. Takes an optional "contenttype" GET parameter
101 in the form "app-name.ModelName" to limit search results to a single model.
102 """
103 settings.use_editable()
104 query = request.GET.get("q", "")
105 page = request.GET.get("page", 1)
106 per_page = settings.SEARCH_PER_PAGE
107 max_paging_links = settings.MAX_PAGING_LINKS
108 try:
109 search_model = get_model(*request.GET.get("type", "").split(".", 1))
110 if not issubclass(search_model, Displayable):
111 raise TypeError
112 except TypeError:
113 search_model = Displayable
114 search_type = _("Everything")
115 else:
116 search_type = search_model._meta.verbose_name_plural.capitalize()
117 results = search_model.objects.search(query, for_user=request.user)
118 paginated = paginate(results, page, per_page, max_paging_links)
119 context = {"query": query, "results": paginated,
120 "search_type": search_type}
121 return render(request, template, context)
122
123
124 @staff_member_required
125 def static_proxy(request):
126 """
127 Serves TinyMCE plugins inside the inline popups and the uploadify
128 SWF, as these are normally static files, and will break with
129 cross-domain JavaScript errors if ``STATIC_URL`` is an external
130 host. URL for the file is passed in via querystring in the inline
131 popup plugin template.
132 """
133 # Get the relative URL after STATIC_URL.
134 url = request.GET["u"]
135 protocol = "http" if not request.is_secure() else "https"
136 host = protocol + "://" + request.get_host()
137 generic_host = "//" + request.get_host()
138 for prefix in (host, generic_host, settings.STATIC_URL):
139 if url.startswith(prefix):
140 url = url.replace(prefix, "", 1)
141 response = ""
142 mimetype = ""
143 path = finders.find(url)
144 if path:
145 if isinstance(path, (list, tuple)):
146 path = path[0]
147 with open(path, "rb") as f:
148 response = f.read()
149 mimetype = "application/octet-stream"
150 if url.endswith(".htm"):
151 # Inject <base href="{{ STATIC_URL }}"> into TinyMCE
152 # plugins, since the path static files in these won't be
153 # on the same domain.
154 mimetype = "text/html"
155 static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
156 if not urlparse(static_url).scheme:
157 static_url = urljoin(host, static_url)
158 base_tag = "<base href='%s'>" % static_url
159 response = response.replace("<head>", "<head>" + base_tag)
160 return HttpResponse(response, mimetype=mimetype)
161
162
163 @requires_csrf_token
164 def page_not_found(request, template_name="errors/404.html"):
165 """
166 Mimics Django's 404 handler but with a different template path.
167 """
168 context = RequestContext(request, {
169 "STATIC_URL": settings.STATIC_URL,
170 "request_path": request.path,
171 })
172 t = get_template(template_name)
173 return HttpResponseNotFound(t.render(context))
174
175
176 @requires_csrf_token
177 def server_error(request, template_name="errors/500.html"):
178 """
179 Mimics Django's error handler but adds ``STATIC_URL`` to the
180 context.
181 """
182 context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL})
183 t = get_template(template_name)
184 return HttpResponseServerError(t.render(context))
185
[end of mezzanine/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mezzanine/core/views.py b/mezzanine/core/views.py
--- a/mezzanine/core/views.py
+++ b/mezzanine/core/views.py
@@ -135,7 +135,7 @@
protocol = "http" if not request.is_secure() else "https"
host = protocol + "://" + request.get_host()
generic_host = "//" + request.get_host()
- for prefix in (host, generic_host, settings.STATIC_URL):
+ for prefix in (settings.STATIC_URL, host, generic_host, '/'):
if url.startswith(prefix):
url = url.replace(prefix, "", 1)
response = ""
|
{"golden_diff": "diff --git a/mezzanine/core/views.py b/mezzanine/core/views.py\n--- a/mezzanine/core/views.py\n+++ b/mezzanine/core/views.py\n@@ -135,7 +135,7 @@\n protocol = \"http\" if not request.is_secure() else \"https\"\n host = protocol + \"://\" + request.get_host()\n generic_host = \"//\" + request.get_host()\n- for prefix in (host, generic_host, settings.STATIC_URL):\n+ for prefix in (settings.STATIC_URL, host, generic_host, '/'):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n", "issue": "SuspiciousOperation in asset_proxy URLs\nSince switching our site from HTTP to HTTPS, I've been getting the following error on all of TinyMCE's dialog windows.\n\n```\nSuspiciousOperation: Attempted access to '/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm' denied.\n\nStacktrace (most recent call last):\n\n File \"django/core/handlers/base.py\", line 115, in get_response\n response = callback(request, *callback_args, **callback_kwargs)\n File \"django/contrib/admin/views/decorators.py\", line 17, in _checklogin\n return view_func(request, *args, **kwargs)\n File \"mezzanine/core/views.py\", line 143, in static_proxy\n path = finders.find(url)\n File \"django/contrib/staticfiles/finders.py\", line 239, in find\n result = finder.find(path, all=all)\n File \"django/contrib/staticfiles/finders.py\", line 147, in find\n match = self.find_in_app(app, path)\n File \"django/contrib/staticfiles/finders.py\", line 166, in find_in_app\n if storage.exists(path):\n File \"django/core/files/storage.py\", line 243, in exists\n return os.path.exists(self.path(name))\n File \"django/core/files/storage.py\", line 259, in path\n raise SuspiciousOperation(\"Attempted access to '%s' denied.\" % name)\n```\n\nThe URLs that are causing them look like this:\n\n```\nhttps://www.example.com/asset_proxy/?u=https://www.example.com/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm\n```\n\n", "before_files": [{"content": "import os\nfrom urlparse import urljoin, urlparse\n\nfrom django.contrib import admin\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.admin.options import ModelAdmin\nfrom django.contrib.staticfiles import finders\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import get_model\nfrom django.http import (HttpResponse, HttpResponseServerError,\n HttpResponseNotFound)\nfrom django.shortcuts import redirect\nfrom django.template import RequestContext\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import requires_csrf_token\n\nfrom mezzanine.conf import settings\nfrom mezzanine.core.forms import get_edit_form\nfrom mezzanine.core.models import Displayable, SitePermission\nfrom mezzanine.utils.cache import add_cache_bypass\nfrom mezzanine.utils.views import is_editable, paginate, render, set_cookie\nfrom mezzanine.utils.sites import has_site_permission\n\n\ndef set_device(request, device=\"\"):\n \"\"\"\n Sets a device name in a cookie when a user explicitly wants to go\n to the site for a particular device (eg mobile).\n \"\"\"\n response = redirect(add_cache_bypass(request.GET.get(\"next\") or \"/\"))\n set_cookie(response, \"mezzanine-device\", device, 60 * 60 * 24 * 365)\n return response\n\n\n@staff_member_required\ndef set_site(request):\n \"\"\"\n Put the selected site ID into the session - posted to from\n the \"Select site\" drop-down in the header of the admin. The\n site ID is then used in favour of the current request's\n domain in ``mezzanine.core.managers.CurrentSiteManager``.\n \"\"\"\n site_id = int(request.GET[\"site_id\"])\n if not request.user.is_superuser:\n try:\n SitePermission.objects.get(user=request.user, sites=site_id)\n except SitePermission.DoesNotExist:\n raise PermissionDenied\n request.session[\"site_id\"] = site_id\n admin_url = reverse(\"admin:index\")\n next = request.GET.get(\"next\") or admin_url\n # Don't redirect to a change view for an object that won't exist\n # on the selected site - go to its list view instead.\n if next.startswith(admin_url):\n parts = next.split(\"/\")\n if len(parts) > 4 and parts[4].isdigit():\n next = \"/\".join(parts[:4])\n return redirect(next)\n\n\ndef direct_to_template(request, template, extra_context=None, **kwargs):\n \"\"\"\n Replacement for Django's ``direct_to_template`` that uses\n ``TemplateResponse`` via ``mezzanine.utils.views.render``.\n \"\"\"\n context = extra_context or {}\n context[\"params\"] = kwargs\n for (key, value) in context.items():\n if callable(value):\n context[key] = value()\n return render(request, template, context)\n\n\n@staff_member_required\ndef edit(request):\n \"\"\"\n Process the inline editing form.\n \"\"\"\n model = get_model(request.POST[\"app\"], request.POST[\"model\"])\n obj = model.objects.get(id=request.POST[\"id\"])\n form = get_edit_form(obj, request.POST[\"fields\"], data=request.POST,\n files=request.FILES)\n if not (is_editable(obj, request) and has_site_permission(request.user)):\n response = _(\"Permission denied\")\n elif form.is_valid():\n form.save()\n model_admin = ModelAdmin(model, admin.site)\n message = model_admin.construct_change_message(request, form, None)\n model_admin.log_change(request, obj, message)\n response = \"\"\n else:\n response = form.errors.values()[0][0]\n return HttpResponse(unicode(response))\n\n\ndef search(request, template=\"search_results.html\"):\n \"\"\"\n Display search results. Takes an optional \"contenttype\" GET parameter\n in the form \"app-name.ModelName\" to limit search results to a single model.\n \"\"\"\n settings.use_editable()\n query = request.GET.get(\"q\", \"\")\n page = request.GET.get(\"page\", 1)\n per_page = settings.SEARCH_PER_PAGE\n max_paging_links = settings.MAX_PAGING_LINKS\n try:\n search_model = get_model(*request.GET.get(\"type\", \"\").split(\".\", 1))\n if not issubclass(search_model, Displayable):\n raise TypeError\n except TypeError:\n search_model = Displayable\n search_type = _(\"Everything\")\n else:\n search_type = search_model._meta.verbose_name_plural.capitalize()\n results = search_model.objects.search(query, for_user=request.user)\n paginated = paginate(results, page, per_page, max_paging_links)\n context = {\"query\": query, \"results\": paginated,\n \"search_type\": search_type}\n return render(request, template, context)\n\n\n@staff_member_required\ndef static_proxy(request):\n \"\"\"\n Serves TinyMCE plugins inside the inline popups and the uploadify\n SWF, as these are normally static files, and will break with\n cross-domain JavaScript errors if ``STATIC_URL`` is an external\n host. URL for the file is passed in via querystring in the inline\n popup plugin template.\n \"\"\"\n # Get the relative URL after STATIC_URL.\n url = request.GET[\"u\"]\n protocol = \"http\" if not request.is_secure() else \"https\"\n host = protocol + \"://\" + request.get_host()\n generic_host = \"//\" + request.get_host()\n for prefix in (host, generic_host, settings.STATIC_URL):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n mimetype = \"\"\n path = finders.find(url)\n if path:\n if isinstance(path, (list, tuple)):\n path = path[0]\n with open(path, \"rb\") as f:\n response = f.read()\n mimetype = \"application/octet-stream\"\n if url.endswith(\".htm\"):\n # Inject <base href=\"{{ STATIC_URL }}\"> into TinyMCE\n # plugins, since the path static files in these won't be\n # on the same domain.\n mimetype = \"text/html\"\n static_url = settings.STATIC_URL + os.path.split(url)[0] + \"/\"\n if not urlparse(static_url).scheme:\n static_url = urljoin(host, static_url)\n base_tag = \"<base href='%s'>\" % static_url\n response = response.replace(\"<head>\", \"<head>\" + base_tag)\n return HttpResponse(response, mimetype=mimetype)\n\n\n@requires_csrf_token\ndef page_not_found(request, template_name=\"errors/404.html\"):\n \"\"\"\n Mimics Django's 404 handler but with a different template path.\n \"\"\"\n context = RequestContext(request, {\n \"STATIC_URL\": settings.STATIC_URL,\n \"request_path\": request.path,\n })\n t = get_template(template_name)\n return HttpResponseNotFound(t.render(context))\n\n\n@requires_csrf_token\ndef server_error(request, template_name=\"errors/500.html\"):\n \"\"\"\n Mimics Django's error handler but adds ``STATIC_URL`` to the\n context.\n \"\"\"\n context = RequestContext(request, {\"STATIC_URL\": settings.STATIC_URL})\n t = get_template(template_name)\n return HttpResponseServerError(t.render(context))\n", "path": "mezzanine/core/views.py"}]}
| 2,911 | 144 |
gh_patches_debug_13647
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1161
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Starlette/FastAPI python agent setup examples result in TypeError
When following the [example](https://www.elastic.co/guide/en/apm/agent/python/current/starlette-support.html#starlette-fastapi) of setting up the Elastic APM agent with python + FastAPI, we find that we can't use `make_apm_client` as described as it has a required positional argument, `config`. The documentation however says that we should be able to call `make_apm_client` with no arguments and in that case it should look for environment variables and/or use defaults.
**To Reproduce**
Follow the example exactly:
```python
from fastapi import FastAPI
from elasticapm.contrib.starlette import make_apm_client, ElasticAPM
apm = make_apm_client()
# Client fails to start due to TypeError: TypeError: make_apm_client() missing 1 required positional argument: 'config'
app = FastAPI()
app.add_middleware(ElasticAPM, client=apm)
```
**Environment (please complete the following information)**
- OS: Mac, Linux, Ubuntu
- Python version: 3.8.8
- Framework and version [e.g. Django 2.1]: fastapi==0.65.1
- APM Server version: latest
- Agent version: 6.2.2
</issue>
<code>
[start of elasticapm/contrib/starlette/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from __future__ import absolute_import
33
34 import starlette
35 from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
36 from starlette.requests import Request
37 from starlette.responses import Response
38 from starlette.routing import Match, Mount
39 from starlette.types import ASGIApp
40
41 import elasticapm
42 import elasticapm.instrumentation.control
43 from elasticapm.base import Client
44 from elasticapm.conf import constants
45 from elasticapm.contrib.asyncio.traces import set_context
46 from elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response
47 from elasticapm.utils.disttracing import TraceParent
48 from elasticapm.utils.logging import get_logger
49
50 logger = get_logger("elasticapm.errors.client")
51
52
53 def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:
54 """Builds ElasticAPM client.
55
56 Args:
57 config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.
58 client_cls (Client): Must be Client or its child.
59 **defaults: Additional parameters for Client. See `elasticapm.base.Client`
60
61 Returns:
62 Client
63 """
64 if "framework_name" not in defaults:
65 defaults["framework_name"] = "starlette"
66 defaults["framework_version"] = starlette.__version__
67
68 return client_cls(config, **defaults)
69
70
71 class ElasticAPM(BaseHTTPMiddleware):
72 """
73 Starlette / FastAPI middleware for Elastic APM capturing.
74
75 >>> elasticapm = make_apm_client({
76 >>> 'SERVICE_NAME': 'myapp',
77 >>> 'DEBUG': True,
78 >>> 'SERVER_URL': 'http://localhost:8200',
79 >>> 'CAPTURE_HEADERS': True,
80 >>> 'CAPTURE_BODY': 'all'
81 >>> })
82
83 >>> app.add_middleware(ElasticAPM, client=elasticapm)
84
85 Pass an arbitrary APP_NAME and SECRET_TOKEN::
86
87 >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')
88
89 Pass an explicit client::
90
91 >>> elasticapm = ElasticAPM(app, client=client)
92
93 Automatically configure logging::
94
95 >>> elasticapm = ElasticAPM(app, logging=True)
96
97 Capture an exception::
98
99 >>> try:
100 >>> 1 / 0
101 >>> except ZeroDivisionError:
102 >>> elasticapm.capture_exception()
103
104 Capture a message::
105
106 >>> elasticapm.capture_message('hello, world!')
107 """
108
109 def __init__(self, app: ASGIApp, client: Client):
110 """
111
112 Args:
113 app (ASGIApp): Starlette app
114 client (Client): ElasticAPM Client
115 """
116 self.client = client
117
118 if self.client.config.instrument and self.client.config.enabled:
119 elasticapm.instrumentation.control.instrument()
120
121 super().__init__(app)
122
123 async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
124 """Processes the whole request APM capturing.
125
126 Args:
127 request (Request)
128 call_next (RequestResponseEndpoint): Next request process in Starlette.
129
130 Returns:
131 Response
132 """
133 await self._request_started(request)
134
135 try:
136 response = await call_next(request)
137 elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)
138 except Exception:
139 await self.capture_exception(
140 context={"request": await get_data_from_request(request, self.client.config, constants.ERROR)}
141 )
142 elasticapm.set_transaction_result("HTTP 5xx", override=False)
143 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)
144 elasticapm.set_context({"status_code": 500}, "response")
145
146 raise
147 else:
148 await self._request_finished(response)
149 finally:
150 self.client.end_transaction()
151
152 return response
153
154 async def capture_exception(self, *args, **kwargs):
155 """Captures your exception.
156
157 Args:
158 *args:
159 **kwargs:
160 """
161 self.client.capture_exception(*args, **kwargs)
162
163 async def capture_message(self, *args, **kwargs):
164 """Captures your message.
165
166 Args:
167 *args: Whatever
168 **kwargs: Whatever
169 """
170 self.client.capture_message(*args, **kwargs)
171
172 async def _request_started(self, request: Request):
173 """Captures the begin of the request processing to APM.
174
175 Args:
176 request (Request)
177 """
178 # When we consume the body, we replace the streaming mechanism with
179 # a mocked version -- this workaround came from
180 # https://github.com/encode/starlette/issues/495#issuecomment-513138055
181 # and we call the workaround here to make sure that regardless of
182 # `capture_body` settings, we will have access to the body if we need it.
183 if self.client.config.capture_body != "off":
184 await get_body(request)
185
186 if not self.client.should_ignore_url(request.url.path):
187 trace_parent = TraceParent.from_headers(dict(request.headers))
188 self.client.begin_transaction("request", trace_parent=trace_parent)
189
190 await set_context(
191 lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request"
192 )
193 transaction_name = self.get_route_name(request) or request.url.path
194 elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False)
195
196 async def _request_finished(self, response: Response):
197 """Captures the end of the request processing to APM.
198
199 Args:
200 response (Response)
201 """
202 await set_context(
203 lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), "response"
204 )
205
206 result = "HTTP {}xx".format(response.status_code // 100)
207 elasticapm.set_transaction_result(result, override=False)
208
209 def get_route_name(self, request: Request) -> str:
210 app = request.app
211 scope = request.scope
212 routes = app.routes
213 route_name = self._get_route_name(scope, routes)
214
215 # Starlette magically redirects requests if the path matches a route name with a trailing slash
216 # appended or removed. To not spam the transaction names list, we do the same here and put these
217 # redirects all in the same "redirect trailing slashes" transaction name
218 if not route_name and app.router.redirect_slashes and scope["path"] != "/":
219 redirect_scope = dict(scope)
220 if scope["path"].endswith("/"):
221 redirect_scope["path"] = scope["path"][:-1]
222 trim = True
223 else:
224 redirect_scope["path"] = scope["path"] + "/"
225 trim = False
226
227 route_name = self._get_route_name(redirect_scope, routes)
228 if route_name is not None:
229 route_name = route_name + "/" if trim else route_name[:-1]
230 return route_name
231
232 def _get_route_name(self, scope, routes, route_name=None):
233 for route in routes:
234 match, child_scope = route.matches(scope)
235 if match == Match.FULL:
236 route_name = route.path
237 child_scope = {**scope, **child_scope}
238 if isinstance(route, Mount):
239 child_route_name = self._get_route_name(child_scope, route.routes, route_name)
240 if child_route_name is None:
241 route_name = None
242 else:
243 route_name += child_route_name
244 return route_name
245 elif match == Match.PARTIAL and route_name is None:
246 route_name = route.path
247
[end of elasticapm/contrib/starlette/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py
--- a/elasticapm/contrib/starlette/__init__.py
+++ b/elasticapm/contrib/starlette/__init__.py
@@ -31,6 +31,8 @@
from __future__ import absolute_import
+from typing import Dict, Optional
+
import starlette
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
@@ -50,7 +52,7 @@
logger = get_logger("elasticapm.errors.client")
-def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:
+def make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:
"""Builds ElasticAPM client.
Args:
|
{"golden_diff": "diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py\n--- a/elasticapm/contrib/starlette/__init__.py\n+++ b/elasticapm/contrib/starlette/__init__.py\n@@ -31,6 +31,8 @@\n \n from __future__ import absolute_import\n \n+from typing import Dict, Optional\n+\n import starlette\n from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\n from starlette.requests import Request\n@@ -50,7 +52,7 @@\n logger = get_logger(\"elasticapm.errors.client\")\n \n \n-def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n+def make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n \n Args:\n", "issue": "Starlette/FastAPI python agent setup examples result in TypeError\nWhen following the [example](https://www.elastic.co/guide/en/apm/agent/python/current/starlette-support.html#starlette-fastapi) of setting up the Elastic APM agent with python + FastAPI, we find that we can't use `make_apm_client` as described as it has a required positional argument, `config`. The documentation however says that we should be able to call `make_apm_client` with no arguments and in that case it should look for environment variables and/or use defaults. \r\n\r\n**To Reproduce**\r\nFollow the example exactly: \r\n```python\r\nfrom fastapi import FastAPI\r\nfrom elasticapm.contrib.starlette import make_apm_client, ElasticAPM\r\n\r\napm = make_apm_client()\r\n# Client fails to start due to TypeError: TypeError: make_apm_client() missing 1 required positional argument: 'config'\r\n\r\napp = FastAPI()\r\napp.add_middleware(ElasticAPM, client=apm)\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Mac, Linux, Ubuntu\r\n- Python version: 3.8.8\r\n- Framework and version [e.g. Django 2.1]: fastapi==0.65.1\r\n- APM Server version: latest\r\n- Agent version: 6.2.2\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n if route_name is not None:\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount):\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}]}
| 3,454 | 198 |
gh_patches_debug_3260
|
rasdani/github-patches
|
git_diff
|
getredash__redash-5623
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Loading schema for Sqlite DB with "Order" column name fails
### Issue Summary
I added a Sqlite Database which has an column with the name `Order`.
When I try to create a query, the error `Schema refresh failed.` comes up.
### Steps to Reproduce
1. Add an Sqlite Database which has a column with the name `Order`
2. Try to create a query
3. Get the error `Schema refresh failed.`
### Technical details:
* Redash Version: cloned from master
* Browser/OS: Brave Browser & Ubuntu 18.1
* How did you install Redash: built from source
</issue>
<code>
[start of redash/query_runner/sqlite.py]
1 import logging
2 import sqlite3
3
4 from redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException
5 from redash.utils import json_dumps, json_loads
6
7 logger = logging.getLogger(__name__)
8
9
10 class Sqlite(BaseSQLQueryRunner):
11 noop_query = "pragma quick_check"
12
13 @classmethod
14 def configuration_schema(cls):
15 return {
16 "type": "object",
17 "properties": {"dbpath": {"type": "string", "title": "Database Path"}},
18 "required": ["dbpath"],
19 }
20
21 @classmethod
22 def type(cls):
23 return "sqlite"
24
25 def __init__(self, configuration):
26 super(Sqlite, self).__init__(configuration)
27
28 self._dbpath = self.configuration["dbpath"]
29
30 def _get_tables(self, schema):
31 query_table = "select tbl_name from sqlite_master where type='table'"
32 query_columns = "PRAGMA table_info(%s)"
33
34 results, error = self.run_query(query_table, None)
35
36 if error is not None:
37 raise Exception("Failed getting schema.")
38
39 results = json_loads(results)
40
41 for row in results["rows"]:
42 table_name = row["tbl_name"]
43 schema[table_name] = {"name": table_name, "columns": []}
44 results_table, error = self.run_query(query_columns % (table_name,), None)
45 if error is not None:
46 raise Exception("Failed getting schema.")
47
48 results_table = json_loads(results_table)
49 for row_column in results_table["rows"]:
50 schema[table_name]["columns"].append(row_column["name"])
51
52 return list(schema.values())
53
54 def run_query(self, query, user):
55 connection = sqlite3.connect(self._dbpath)
56
57 cursor = connection.cursor()
58
59 try:
60 cursor.execute(query)
61
62 if cursor.description is not None:
63 columns = self.fetch_columns([(i[0], None) for i in cursor.description])
64 rows = [
65 dict(zip((column["name"] for column in columns), row))
66 for row in cursor
67 ]
68
69 data = {"columns": columns, "rows": rows}
70 error = None
71 json_data = json_dumps(data)
72 else:
73 error = "Query completed but it returned no data."
74 json_data = None
75 except (KeyboardInterrupt, JobTimeoutException):
76 connection.cancel()
77 raise
78 finally:
79 connection.close()
80 return json_data, error
81
82
83 register(Sqlite)
84
[end of redash/query_runner/sqlite.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py
--- a/redash/query_runner/sqlite.py
+++ b/redash/query_runner/sqlite.py
@@ -29,7 +29,7 @@
def _get_tables(self, schema):
query_table = "select tbl_name from sqlite_master where type='table'"
- query_columns = "PRAGMA table_info(%s)"
+ query_columns = "PRAGMA table_info(\"%s\")"
results, error = self.run_query(query_table, None)
|
{"golden_diff": "diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py\n--- a/redash/query_runner/sqlite.py\n+++ b/redash/query_runner/sqlite.py\n@@ -29,7 +29,7 @@\n \n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n- query_columns = \"PRAGMA table_info(%s)\"\n+ query_columns = \"PRAGMA table_info(\\\"%s\\\")\"\n \n results, error = self.run_query(query_table, None)\n", "issue": "Loading schema for Sqlite DB with \"Order\" column name fails\n### Issue Summary\r\n\r\nI added a Sqlite Database which has an column with the name `Order`.\r\nWhen I try to create a query, the error `Schema refresh failed.` comes up.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add an Sqlite Database which has a column with the name `Order`\r\n2. Try to create a query\r\n3. Get the error `Schema refresh failed.`\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: cloned from master\r\n* Browser/OS: Brave Browser & Ubuntu 18.1\r\n* How did you install Redash: built from source\r\n\n", "before_files": [{"content": "import logging\nimport sqlite3\n\nfrom redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass Sqlite(BaseSQLQueryRunner):\n noop_query = \"pragma quick_check\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\"dbpath\": {\"type\": \"string\", \"title\": \"Database Path\"}},\n \"required\": [\"dbpath\"],\n }\n\n @classmethod\n def type(cls):\n return \"sqlite\"\n\n def __init__(self, configuration):\n super(Sqlite, self).__init__(configuration)\n\n self._dbpath = self.configuration[\"dbpath\"]\n\n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n query_columns = \"PRAGMA table_info(%s)\"\n\n results, error = self.run_query(query_table, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = row[\"tbl_name\"]\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n results_table, error = self.run_query(query_columns % (table_name,), None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results_table = json_loads(results_table)\n for row_column in results_table[\"rows\"]:\n schema[table_name][\"columns\"].append(row_column[\"name\"])\n\n return list(schema.values())\n\n def run_query(self, query, user):\n connection = sqlite3.connect(self._dbpath)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], None) for i in cursor.description])\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n error = \"Query completed but it returned no data.\"\n json_data = None\n except (KeyboardInterrupt, JobTimeoutException):\n connection.cancel()\n raise\n finally:\n connection.close()\n return json_data, error\n\n\nregister(Sqlite)\n", "path": "redash/query_runner/sqlite.py"}]}
| 1,365 | 120 |
gh_patches_debug_35067
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-3276
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RSS feed gives no information about packages build
Since commit 60dc56c that RSS feed doesn't give information about the packages affected in a build.
Example from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) :
```xml
<item>
<title>FEDORA-2019-59d394e0fd</title>
<link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link>
<description>- Update to 0.14.1
- Add --pkgorigins mode for Koji
- Correct pkg count in headers if there were invalid pkgs (RhBug:1596211)
- Prevent exiting with 0 if errors occur while finalizing repodata.
</description>
<pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate>
</item>
```
Also the link is invalid as in #3248
Looking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be:
```
<title>createrepo_c-0.14.1-1.fc30</title>
<link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link>
```
</issue>
<code>
[start of bodhi/server/renderers.py]
1 # Copyright © 2014-2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """Define special view renderers, such as RSS."""
19 import logging
20 import operator
21
22 from pytz import utc
23 from feedgen.feed import FeedGenerator
24 from pyramid.exceptions import HTTPBadRequest
25
26
27 log = logging.getLogger(__name__)
28
29
30 def rss(info):
31 """
32 Return a RSS renderer.
33
34 Args:
35 info (pyramid.renderers.RendererHelper): Unused.
36 Returns:
37 function: A function that can be used to render a RSS view.
38 """
39 def render(data, system):
40 """
41 Render the given data as an RSS view.
42
43 If the request's content type is set to the default, this function will change it to
44 application/rss+xml.
45
46 Args:
47 data (dict): A dictionary describing the information to be rendered. The information can
48 be different types of objects, such as updates, users, comments, or overrides.
49 system (pyramid.events.BeforeRender): Used to get the current request.
50 Returns:
51 str: An RSS document representing the given data.
52 """
53 request = system.get('request')
54 if request is not None:
55 response = request.response
56 ct = response.content_type
57 if ct == response.default_content_type:
58 response.content_type = 'application/rss+xml'
59
60 if 'updates' in data:
61 key = 'updates'
62 feed_title = 'Released updates'
63 elif 'users' in data:
64 key = 'users'
65 feed_title = 'Bodhi users'
66 elif 'comments' in data:
67 key = 'comments'
68 feed_title = 'User comments'
69 elif 'overrides' in data:
70 key = 'overrides'
71 feed_title = 'Update overrides'
72 else:
73 # This is a request we don't know how to render. Let's return BadRequest and log.
74 log.debug('Unable to render RSS feed for data: %s', data)
75 # See if we have a request so we can set a code without raising an Exception
76 if request is not None:
77 response.status = HTTPBadRequest.code
78 return 'Invalid RSS feed request'
79 else:
80 raise HTTPBadRequest('Invalid RSS feed request')
81
82 feed_description_list = []
83 for k in request.GET.keys():
84 feed_description_list.append('%s(%s)' % (k, request.GET[k]))
85 if feed_description_list:
86 feed_description = 'Filtered on: ' + ', '.join(feed_description_list)
87 else:
88 feed_description = "All %s" % (key)
89
90 feed = FeedGenerator()
91 feed.title(feed_title)
92 feed.link(href=request.url, rel='self')
93 feed.description(feed_description)
94 feed.language('en')
95
96 def linker(route, param, key):
97 def link_dict(obj):
98 return dict(href=request.route_url(route, **{param: obj[key]}))
99 return link_dict
100
101 getters = {
102 'updates': {
103 'title': operator.itemgetter('alias'),
104 'link': linker('update', 'id', 'alias'),
105 'description': operator.itemgetter('notes'),
106 'pubDate': lambda obj: utc.localize(obj['date_submitted']),
107 },
108 'users': {
109 'title': operator.itemgetter('name'),
110 'link': linker('user', 'name', 'name'),
111 'description': operator.itemgetter('name'),
112 },
113 'comments': {
114 'title': operator.itemgetter('rss_title'),
115 'link': linker('comment', 'id', 'id'),
116 'description': operator.itemgetter('text'),
117 'pubDate': lambda obj: utc.localize(obj['timestamp']),
118 },
119 'overrides': {
120 'title': operator.itemgetter('nvr'),
121 'link': linker('override', 'nvr', 'nvr'),
122 'description': operator.itemgetter('notes'),
123 'pubDate': lambda obj: utc.localize(obj['submission_date']),
124 },
125 }
126
127 for value in data[key]:
128 feed_item = feed.add_item()
129 for name, getter in getters[key].items():
130 # Because we have to use methods to fill feed entry attributes,
131 # it's done by getting methods by name and calling them
132 # on the same line.
133 getattr(feed_item, name)(getter(value))
134
135 return feed.rss_str()
136
137 return render
138
[end of bodhi/server/renderers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py
--- a/bodhi/server/renderers.py
+++ b/bodhi/server/renderers.py
@@ -23,6 +23,8 @@
from feedgen.feed import FeedGenerator
from pyramid.exceptions import HTTPBadRequest
+from bodhi.server.util import markup
+
log = logging.getLogger(__name__)
@@ -98,11 +100,44 @@
return dict(href=request.route_url(route, **{param: obj[key]}))
return link_dict
+ def describe_update(alias, notes, builds):
+ """
+ Wrap calls to operator.itemgetter to retrieve notes and builds list.
+
+ Methods are used to fill feed entry values, so we must use a wrapper
+ to get an HTML formatted description from the `notes` and the `builds`
+ properties of the update.
+
+ For example:
+ getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))
+ description_value = getter(update_data)
+
+ Args:
+ alias (operator.itemgetter): A callable object which returns update alias
+ as string.
+ notes (operator.itemgetter): A callable object which returns update notes
+ as string.
+ builds (operator.itemgetter): A callable object which returns a list of builds
+ associated to the update.
+ Returns:
+ function: A function which accepts a dict representing an update as parameter.
+ """
+ def describe(*args, **kwargs):
+ text = f'# {alias(*args, **kwargs)}\n'
+ text += f'## Packages in this update:\n'
+ for p in builds(*args, **kwargs):
+ text += f'* {p.nvr}\n'
+ text += f'## Update description:\n{notes(*args, **kwargs)}'
+ return markup(None, text)
+ return describe
+
getters = {
'updates': {
- 'title': operator.itemgetter('alias'),
+ 'title': operator.itemgetter('title'),
'link': linker('update', 'id', 'alias'),
- 'description': operator.itemgetter('notes'),
+ 'description': describe_update(operator.itemgetter('alias'),
+ operator.itemgetter('notes'),
+ operator.itemgetter('builds')),
'pubDate': lambda obj: utc.localize(obj['date_submitted']),
},
'users': {
|
{"golden_diff": "diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py\n--- a/bodhi/server/renderers.py\n+++ b/bodhi/server/renderers.py\n@@ -23,6 +23,8 @@\n from feedgen.feed import FeedGenerator\n from pyramid.exceptions import HTTPBadRequest\n \n+from bodhi.server.util import markup\n+\n \n log = logging.getLogger(__name__)\n \n@@ -98,11 +100,44 @@\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n \n+ def describe_update(alias, notes, builds):\n+ \"\"\"\n+ Wrap calls to operator.itemgetter to retrieve notes and builds list.\n+\n+ Methods are used to fill feed entry values, so we must use a wrapper\n+ to get an HTML formatted description from the `notes` and the `builds`\n+ properties of the update.\n+\n+ For example:\n+ getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))\n+ description_value = getter(update_data)\n+\n+ Args:\n+ alias (operator.itemgetter): A callable object which returns update alias\n+ as string.\n+ notes (operator.itemgetter): A callable object which returns update notes\n+ as string.\n+ builds (operator.itemgetter): A callable object which returns a list of builds\n+ associated to the update.\n+ Returns:\n+ function: A function which accepts a dict representing an update as parameter.\n+ \"\"\"\n+ def describe(*args, **kwargs):\n+ text = f'# {alias(*args, **kwargs)}\\n'\n+ text += f'## Packages in this update:\\n'\n+ for p in builds(*args, **kwargs):\n+ text += f'* {p.nvr}\\n'\n+ text += f'## Update description:\\n{notes(*args, **kwargs)}'\n+ return markup(None, text)\n+ return describe\n+\n getters = {\n 'updates': {\n- 'title': operator.itemgetter('alias'),\n+ 'title': operator.itemgetter('title'),\n 'link': linker('update', 'id', 'alias'),\n- 'description': operator.itemgetter('notes'),\n+ 'description': describe_update(operator.itemgetter('alias'),\n+ operator.itemgetter('notes'),\n+ operator.itemgetter('builds')),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n", "issue": "RSS feed gives no information about packages build\nSince commit 60dc56c that RSS feed doesn't give information about the packages affected in a build.\r\nExample from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) :\r\n```xml\r\n<item>\r\n <title>FEDORA-2019-59d394e0fd</title>\r\n <link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link>\r\n <description>- Update to 0.14.1\r\n- Add --pkgorigins mode for Koji\r\n- Correct pkg count in headers if there were invalid pkgs (RhBug:1596211)\r\n- Prevent exiting with 0 if errors occur while finalizing repodata.\r\n </description>\r\n <pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate>\r\n</item>\r\n```\r\n\r\nAlso the link is invalid as in #3248\r\n\r\nLooking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be:\r\n```\r\n <title>createrepo_c-0.14.1-1.fc30</title>\r\n <link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link>\r\n```\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define special view renderers, such as RSS.\"\"\"\nimport logging\nimport operator\n\nfrom pytz import utc\nfrom feedgen.feed import FeedGenerator\nfrom pyramid.exceptions import HTTPBadRequest\n\n\nlog = logging.getLogger(__name__)\n\n\ndef rss(info):\n \"\"\"\n Return a RSS renderer.\n\n Args:\n info (pyramid.renderers.RendererHelper): Unused.\n Returns:\n function: A function that can be used to render a RSS view.\n \"\"\"\n def render(data, system):\n \"\"\"\n Render the given data as an RSS view.\n\n If the request's content type is set to the default, this function will change it to\n application/rss+xml.\n\n Args:\n data (dict): A dictionary describing the information to be rendered. The information can\n be different types of objects, such as updates, users, comments, or overrides.\n system (pyramid.events.BeforeRender): Used to get the current request.\n Returns:\n str: An RSS document representing the given data.\n \"\"\"\n request = system.get('request')\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'application/rss+xml'\n\n if 'updates' in data:\n key = 'updates'\n feed_title = 'Released updates'\n elif 'users' in data:\n key = 'users'\n feed_title = 'Bodhi users'\n elif 'comments' in data:\n key = 'comments'\n feed_title = 'User comments'\n elif 'overrides' in data:\n key = 'overrides'\n feed_title = 'Update overrides'\n else:\n # This is a request we don't know how to render. Let's return BadRequest and log.\n log.debug('Unable to render RSS feed for data: %s', data)\n # See if we have a request so we can set a code without raising an Exception\n if request is not None:\n response.status = HTTPBadRequest.code\n return 'Invalid RSS feed request'\n else:\n raise HTTPBadRequest('Invalid RSS feed request')\n\n feed_description_list = []\n for k in request.GET.keys():\n feed_description_list.append('%s(%s)' % (k, request.GET[k]))\n if feed_description_list:\n feed_description = 'Filtered on: ' + ', '.join(feed_description_list)\n else:\n feed_description = \"All %s\" % (key)\n\n feed = FeedGenerator()\n feed.title(feed_title)\n feed.link(href=request.url, rel='self')\n feed.description(feed_description)\n feed.language('en')\n\n def linker(route, param, key):\n def link_dict(obj):\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n\n getters = {\n 'updates': {\n 'title': operator.itemgetter('alias'),\n 'link': linker('update', 'id', 'alias'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n 'title': operator.itemgetter('name'),\n 'link': linker('user', 'name', 'name'),\n 'description': operator.itemgetter('name'),\n },\n 'comments': {\n 'title': operator.itemgetter('rss_title'),\n 'link': linker('comment', 'id', 'id'),\n 'description': operator.itemgetter('text'),\n 'pubDate': lambda obj: utc.localize(obj['timestamp']),\n },\n 'overrides': {\n 'title': operator.itemgetter('nvr'),\n 'link': linker('override', 'nvr', 'nvr'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['submission_date']),\n },\n }\n\n for value in data[key]:\n feed_item = feed.add_item()\n for name, getter in getters[key].items():\n # Because we have to use methods to fill feed entry attributes,\n # it's done by getting methods by name and calling them\n # on the same line.\n getattr(feed_item, name)(getter(value))\n\n return feed.rss_str()\n\n return render\n", "path": "bodhi/server/renderers.py"}]}
| 2,306 | 536 |
gh_patches_debug_36153
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1556
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Python 2 syntax from events examples in comments
# Description
In `pyhf.events` there are a two examples of Python 2 syntax being used for
https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53
and
https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87
These examples should be updated to use Python 3 syntax.
Also the examples are wrong themselves. For example, the first example should be
```python
>>> import pyhf
>>> @pyhf.events.subscribe('myevent')
... def test(a,b):
... print(a+b)
...
>>> pyhf.events.trigger("myevent")(1,2)
3
```
</issue>
<code>
[start of src/pyhf/events.py]
1 import weakref
2 from functools import wraps
3
4 __events = {}
5 __disabled_events = set()
6
7 __all__ = [
8 "Callables",
9 "disable",
10 "enable",
11 "noop",
12 "register",
13 "subscribe",
14 "trigger",
15 ]
16
17
18 def __dir__():
19 return __all__
20
21
22 def noop(*args, **kwargs):
23 pass
24
25
26 class Callables:
27 def __init__(self):
28 self._callbacks = []
29
30 @property
31 def callbacks(self):
32 """
33 Get the current list of living callbacks.
34 """
35 self._flush()
36 return self._callbacks
37
38 def append(self, callback):
39 """
40 Append a new bound method as a callback to the list of callables.
41 """
42 try:
43 # methods
44 callback_ref = weakref.ref(callback.__func__), weakref.ref(
45 callback.__self__
46 )
47 except AttributeError:
48 callback_ref = weakref.ref(callback), None
49 self._callbacks.append(callback_ref)
50
51 def _flush(self):
52 """
53 Flush the list of callbacks with those who are weakly-referencing deleted objects.
54
55 Note: must interact with the self._callbacks directly, and not
56 self.callbacks, to avoid infinite recursion.
57 """
58 _callbacks = []
59 for func, arg in self._callbacks:
60 if arg is not None:
61 arg_ref = arg()
62 if arg_ref is None:
63 continue
64 _callbacks.append((func, arg))
65 self._callbacks = _callbacks
66
67 def __call__(self, *args, **kwargs):
68 for func, arg in self.callbacks:
69 # weakref: needs to be de-ref'd first before calling
70 if arg is not None:
71 func()(arg(), *args, **kwargs)
72 else:
73 func()(*args, **kwargs)
74
75 def __iter__(self):
76 return iter(self.callbacks)
77
78 def __getitem__(self, index):
79 return self.callbacks[index]
80
81 def __len__(self):
82 return len(self.callbacks)
83
84 def __repr__(self):
85 return f"Callables({self.callbacks})"
86
87
88 def subscribe(event):
89 """
90 Subscribe a function or object method as a callback to an event.
91
92 Note: this is meant to be used as a decorator.
93
94 Args:
95 event (:obj:`str`): The name of the event to subscribe to.
96
97 Returns:
98 :obj:`function`: Decorated function.
99 """
100 # Example:
101 #
102 # >>> @pyhf.events.subscribe('myevent')
103 # ... def test(a,b):
104 # ... print a+b
105 # ...
106 # >>> pyhf.events.trigger_myevent(1,2)
107 # 3
108 global __events
109
110 def __decorator(func):
111 __events.setdefault(event, Callables()).append(func)
112 return func
113
114 return __decorator
115
116
117 def register(event):
118 """
119 Register a function or object method to trigger an event. This creates two
120 events: ``{event_name}::before`` and ``{event_name}::after``.
121
122 Note: this is meant to be used as a decorator.
123
124 Args:
125 event (:obj:`str`): The name of the event to subscribe to.
126
127 Returns:
128 :obj:`function`: Decorated function.
129
130 """
131 # Examples:
132 #
133 # >>> @pyhf.events.register('test_func')
134 # ... def test(a,b):
135 # ... print a+b
136 # ...
137 # >>> @pyhf.events.subscribe('test_func::before')
138 # ... def precall():
139 # ... print 'before call'
140 # ...
141 # >>> @pyhf.events.subscribe('test_func::after')
142 # ... def postcall():
143 # ... print 'after call'
144 # ...
145 # >>> test(1,2)
146 # "before call"
147 # 3
148 # "after call"
149 # >>>
150
151 def _register(func):
152 @wraps(func)
153 def register_wrapper(*args, **kwargs):
154 trigger(f"{event:s}::before")()
155 result = func(*args, **kwargs)
156 trigger(f"{event:s}::after")()
157 return result
158
159 return register_wrapper
160
161 return _register
162
163
164 def trigger(event):
165 """
166 Trigger an event if not disabled.
167 """
168 global __events, __disabled_events, noop
169 is_noop = bool(event in __disabled_events or event not in __events)
170 return noop if is_noop else __events.get(event)
171
172
173 def disable(event):
174 """
175 Disable an event from firing.
176 """
177 global __disabled_events
178 __disabled_events.add(event)
179
180
181 def enable(event):
182 """
183 Enable an event to be fired if disabled.
184 """
185 global __disabled_events
186 __disabled_events.remove(event)
187
[end of src/pyhf/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pyhf/events.py b/src/pyhf/events.py
--- a/src/pyhf/events.py
+++ b/src/pyhf/events.py
@@ -89,22 +89,27 @@
"""
Subscribe a function or object method as a callback to an event.
- Note: this is meant to be used as a decorator.
+ .. note::
+
+ This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
Returns:
:obj:`function`: Decorated function.
+
+ Example:
+ >>> import pyhf
+ >>> @pyhf.events.subscribe("myevent")
+ ... def test(a, b):
+ ... print(a + b)
+ ...
+ >>> pyhf.events.trigger("myevent")(1, 2)
+ 3
+
"""
- # Example:
- #
- # >>> @pyhf.events.subscribe('myevent')
- # ... def test(a,b):
- # ... print a+b
- # ...
- # >>> pyhf.events.trigger_myevent(1,2)
- # 3
+
global __events
def __decorator(func):
@@ -119,7 +124,9 @@
Register a function or object method to trigger an event. This creates two
events: ``{event_name}::before`` and ``{event_name}::after``.
- Note: this is meant to be used as a decorator.
+ .. note::
+
+ This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
@@ -127,26 +134,26 @@
Returns:
:obj:`function`: Decorated function.
+ Example:
+ >>> import pyhf
+ >>> @pyhf.events.register("test_func")
+ ... def test(a, b):
+ ... print(a + b)
+ ...
+ >>> @pyhf.events.subscribe("test_func::before")
+ ... def precall():
+ ... print("before call")
+ ...
+ >>> @pyhf.events.subscribe("test_func::after")
+ ... def postcall():
+ ... print("after call")
+ ...
+ >>> test(1, 2)
+ before call
+ 3
+ after call
+
"""
- # Examples:
- #
- # >>> @pyhf.events.register('test_func')
- # ... def test(a,b):
- # ... print a+b
- # ...
- # >>> @pyhf.events.subscribe('test_func::before')
- # ... def precall():
- # ... print 'before call'
- # ...
- # >>> @pyhf.events.subscribe('test_func::after')
- # ... def postcall():
- # ... print 'after call'
- # ...
- # >>> test(1,2)
- # "before call"
- # 3
- # "after call"
- # >>>
def _register(func):
@wraps(func)
|
{"golden_diff": "diff --git a/src/pyhf/events.py b/src/pyhf/events.py\n--- a/src/pyhf/events.py\n+++ b/src/pyhf/events.py\n@@ -89,22 +89,27 @@\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n \n Returns:\n :obj:`function`: Decorated function.\n+\n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.subscribe(\"myevent\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> pyhf.events.trigger(\"myevent\")(1, 2)\n+ 3\n+\n \"\"\"\n- # Example:\n- #\n- # >>> @pyhf.events.subscribe('myevent')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> pyhf.events.trigger_myevent(1,2)\n- # 3\n+\n global __events\n \n def __decorator(func):\n@@ -119,7 +124,9 @@\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n@@ -127,26 +134,26 @@\n Returns:\n :obj:`function`: Decorated function.\n \n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.register(\"test_func\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::before\")\n+ ... def precall():\n+ ... print(\"before call\")\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::after\")\n+ ... def postcall():\n+ ... print(\"after call\")\n+ ...\n+ >>> test(1, 2)\n+ before call\n+ 3\n+ after call\n+\n \"\"\"\n- # Examples:\n- #\n- # >>> @pyhf.events.register('test_func')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::before')\n- # ... def precall():\n- # ... print 'before call'\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::after')\n- # ... def postcall():\n- # ... print 'after call'\n- # ...\n- # >>> test(1,2)\n- # \"before call\"\n- # 3\n- # \"after call\"\n- # >>>\n \n def _register(func):\n @wraps(func)\n", "issue": "Remove Python 2 syntax from events examples in comments\n# Description\r\n\r\nIn `pyhf.events` there are a two examples of Python 2 syntax being used for \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53\r\n\r\nand \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87\r\n\r\nThese examples should be updated to use Python 3 syntax.\r\n\r\nAlso the examples are wrong themselves. For example, the first example should be\r\n\r\n```python\r\n>>> import pyhf\r\n>>> @pyhf.events.subscribe('myevent')\r\n... def test(a,b):\r\n... print(a+b)\r\n... \r\n>>> pyhf.events.trigger(\"myevent\")(1,2)\r\n3\r\n```\n", "before_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set()\n\n__all__ = [\n \"Callables\",\n \"disable\",\n \"enable\",\n \"noop\",\n \"register\",\n \"subscribe\",\n \"trigger\",\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass Callables:\n def __init__(self):\n self._callbacks = []\n\n @property\n def callbacks(self):\n \"\"\"\n Get the current list of living callbacks.\n \"\"\"\n self._flush()\n return self._callbacks\n\n def append(self, callback):\n \"\"\"\n Append a new bound method as a callback to the list of callables.\n \"\"\"\n try:\n # methods\n callback_ref = weakref.ref(callback.__func__), weakref.ref(\n callback.__self__\n )\n except AttributeError:\n callback_ref = weakref.ref(callback), None\n self._callbacks.append(callback_ref)\n\n def _flush(self):\n \"\"\"\n Flush the list of callbacks with those who are weakly-referencing deleted objects.\n\n Note: must interact with the self._callbacks directly, and not\n self.callbacks, to avoid infinite recursion.\n \"\"\"\n _callbacks = []\n for func, arg in self._callbacks:\n if arg is not None:\n arg_ref = arg()\n if arg_ref is None:\n continue\n _callbacks.append((func, arg))\n self._callbacks = _callbacks\n\n def __call__(self, *args, **kwargs):\n for func, arg in self.callbacks:\n # weakref: needs to be de-ref'd first before calling\n if arg is not None:\n func()(arg(), *args, **kwargs)\n else:\n func()(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n def __getitem__(self, index):\n return self.callbacks[index]\n\n def __len__(self):\n return len(self.callbacks)\n\n def __repr__(self):\n return f\"Callables({self.callbacks})\"\n\n\ndef subscribe(event):\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(f\"{event:s}::before\")()\n result = func(*args, **kwargs)\n trigger(f\"{event:s}::after\")()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}]}
| 2,264 | 715 |
gh_patches_debug_11482
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Capture details of Celery Chains and Chords
Celery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented.
</issue>
<code>
[start of src/scout_apm/celery.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5
6 from celery.signals import before_task_publish, task_postrun, task_prerun
7
8 import scout_apm.core
9 from scout_apm.compat import datetime_to_timestamp
10 from scout_apm.core.tracked_request import TrackedRequest
11
12
13 def before_publish_callback(headers=None, properties=None, **kwargs):
14 if "scout_task_start" not in headers:
15 headers["scout_task_start"] = datetime_to_timestamp(dt.datetime.utcnow())
16
17
18 def prerun_callback(task=None, **kwargs):
19 tracked_request = TrackedRequest.instance()
20 tracked_request.mark_real_request()
21
22 start = getattr(task.request, "scout_task_start", None)
23 if start is not None:
24 now = datetime_to_timestamp(dt.datetime.utcnow())
25 try:
26 queue_time = now - start
27 except TypeError:
28 pass
29 else:
30 tracked_request.tag("queue_time", queue_time)
31
32 delivery_info = task.request.delivery_info
33 tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
34 tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
35 tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
36 tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
37
38 tracked_request.start_span(operation=("Job/" + task.name))
39
40
41 def postrun_callback(task=None, **kwargs):
42 tracked_request = TrackedRequest.instance()
43 tracked_request.stop_span()
44
45
46 def install():
47 installed = scout_apm.core.install()
48 if not installed:
49 return
50
51 before_task_publish.connect(before_publish_callback)
52 task_prerun.connect(prerun_callback)
53 task_postrun.connect(postrun_callback)
54
55
56 def uninstall():
57 before_task_publish.disconnect(before_publish_callback)
58 task_prerun.disconnect(prerun_callback)
59 task_postrun.disconnect(postrun_callback)
60
[end of src/scout_apm/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py
--- a/src/scout_apm/celery.py
+++ b/src/scout_apm/celery.py
@@ -29,6 +29,13 @@
else:
tracked_request.tag("queue_time", queue_time)
+ task_id = getattr(task.request, "id", None)
+ if task_id:
+ tracked_request.tag("task_id", task_id)
+ parent_task_id = getattr(task.request, "parent_id", None)
+ if parent_task_id:
+ tracked_request.tag("parent_task_id", parent_task_id)
+
delivery_info = task.request.delivery_info
tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
|
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -29,6 +29,13 @@\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n \n+ task_id = getattr(task.request, \"id\", None)\n+ if task_id:\n+ tracked_request.tag(\"task_id\", task_id)\n+ parent_task_id = getattr(task.request, \"parent_id\", None)\n+ if parent_task_id:\n+ tracked_request.tag(\"parent_task_id\", parent_task_id)\n+\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n", "issue": "Capture details of Celery Chains and Chords\nCelery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nfrom celery.signals import before_task_publish, task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef before_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_publish_callback)\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_publish_callback)\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]}
| 1,117 | 190 |
gh_patches_debug_20353
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-10604
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some languages don't have all strings available for translation
### Describe the issue
My project is here: https://hosted.weblate.org/projects/feeder/android-strings
A few languages Polish, French and Chinese (Simplified), are missing a dozen strings.
One example is the string `other_minutes` which is not available for translation in these languages.
I have tried re-scanning strings and similar with no change.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
Not sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings
look at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified)
### Expected behavior
All strings should be available for translation in all languages.
### Screenshots
_No response_
### Exception traceback
_No response_
### How do you run Weblate?
weblate.org service
### Weblate versions
_No response_
### Weblate deploy checks
_No response_
### Additional context
_No response_
</issue>
<code>
[start of weblate/addons/cleanup.py]
1 # Copyright © Michal Čihař <michal@weblate.org>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from django.utils.translation import gettext_lazy
6
7 from weblate.addons.base import UpdateBaseAddon
8 from weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT
9 from weblate.trans.exceptions import FileParseError
10
11
12 class BaseCleanupAddon(UpdateBaseAddon):
13 @classmethod
14 def can_install(cls, component, user):
15 if not component.has_template():
16 return False
17 return super().can_install(component, user)
18
19
20 class CleanupAddon(BaseCleanupAddon):
21 name = "weblate.cleanup.generic"
22 verbose = gettext_lazy("Cleanup translation files")
23 description = gettext_lazy(
24 "Update all translation files to match the monolingual base file. "
25 "For most file formats, this means removing stale translation keys "
26 "no longer present in the base file."
27 )
28 icon = "eraser.svg"
29 events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)
30
31 def update_translations(self, component, previous_head):
32 for translation in self.iterate_translations(component):
33 filenames = translation.store.cleanup_unused()
34 if filenames is None:
35 continue
36 self.extra_files.extend(filenames)
37 translation.store_hash()
38
39 def pre_commit(self, translation, author):
40 if translation.is_source and not translation.component.intermediate:
41 return
42 try:
43 filenames = translation.store.cleanup_unused()
44 except FileParseError:
45 return
46 if filenames is not None:
47 self.extra_files.extend(filenames)
48 translation.store_hash()
49
50
51 class RemoveBlankAddon(BaseCleanupAddon):
52 name = "weblate.cleanup.blank"
53 verbose = gettext_lazy("Remove blank strings")
54 description = gettext_lazy(
55 "Removes strings without a translation from translation files."
56 )
57 events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)
58 icon = "eraser.svg"
59
60 def update_translations(self, component, previous_head):
61 for translation in self.iterate_translations(component):
62 filenames = translation.store.cleanup_blank()
63 if filenames is None:
64 continue
65 self.extra_files.extend(filenames)
66 translation.store_hash()
67
68 def post_commit(self, component):
69 self.post_update(component, None, skip_push=True)
70
[end of weblate/addons/cleanup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py
--- a/weblate/addons/cleanup.py
+++ b/weblate/addons/cleanup.py
@@ -34,7 +34,7 @@
if filenames is None:
continue
self.extra_files.extend(filenames)
- translation.store_hash()
+ # Do not update hash here as this is just before parsing updated files
def pre_commit(self, translation, author):
if translation.is_source and not translation.component.intermediate:
@@ -63,7 +63,9 @@
if filenames is None:
continue
self.extra_files.extend(filenames)
- translation.store_hash()
+ # Do not update hash in post_update, only in post_commit
+ if previous_head == "weblate:post-commit":
+ translation.store_hash()
def post_commit(self, component):
- self.post_update(component, None, skip_push=True)
+ self.post_update(component, "weblate:post-commit", skip_push=True)
|
{"golden_diff": "diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py\n--- a/weblate/addons/cleanup.py\n+++ b/weblate/addons/cleanup.py\n@@ -34,7 +34,7 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash here as this is just before parsing updated files\n \n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n@@ -63,7 +63,9 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash in post_update, only in post_commit\n+ if previous_head == \"weblate:post-commit\":\n+ translation.store_hash()\n \n def post_commit(self, component):\n- self.post_update(component, None, skip_push=True)\n+ self.post_update(component, \"weblate:post-commit\", skip_push=True)\n", "issue": "Some languages don't have all strings available for translation\n### Describe the issue\n\nMy project is here: https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nA few languages Polish, French and Chinese (Simplified), are missing a dozen strings.\r\n\r\nOne example is the string `other_minutes` which is not available for translation in these languages.\r\n\r\nI have tried re-scanning strings and similar with no change.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\nNot sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nlook at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified)\n\n### Expected behavior\n\nAll strings should be available for translation in all languages.\n\n### Screenshots\n\n_No response_\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <michal@weblate.org>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import UpdateBaseAddon\nfrom weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT\nfrom weblate.trans.exceptions import FileParseError\n\n\nclass BaseCleanupAddon(UpdateBaseAddon):\n @classmethod\n def can_install(cls, component, user):\n if not component.has_template():\n return False\n return super().can_install(component, user)\n\n\nclass CleanupAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.generic\"\n verbose = gettext_lazy(\"Cleanup translation files\")\n description = gettext_lazy(\n \"Update all translation files to match the monolingual base file. \"\n \"For most file formats, this means removing stale translation keys \"\n \"no longer present in the base file.\"\n )\n icon = \"eraser.svg\"\n events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_unused()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n return\n try:\n filenames = translation.store.cleanup_unused()\n except FileParseError:\n return\n if filenames is not None:\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n\nclass RemoveBlankAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.blank\"\n verbose = gettext_lazy(\"Remove blank strings\")\n description = gettext_lazy(\n \"Removes strings without a translation from translation files.\"\n )\n events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)\n icon = \"eraser.svg\"\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_blank()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def post_commit(self, component):\n self.post_update(component, None, skip_push=True)\n", "path": "weblate/addons/cleanup.py"}]}
| 1,422 | 230 |
gh_patches_debug_32100
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1420
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simulator timestamps are broken in logs
Looks like gh-1411 didn't work properly. I'll try and find the fix shortly, but if we want to do a release before I do we should revert it.
</issue>
<code>
[start of cocotb/log.py]
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import os
33 import sys
34 import logging
35 import warnings
36
37 from cocotb.utils import (
38 get_sim_time, get_time_from_sim_steps, want_color_output
39 )
40
41 import cocotb.ANSI as ANSI
42
43 if "COCOTB_REDUCED_LOG_FMT" in os.environ:
44 _suppress = True
45 else:
46 _suppress = False
47
48 # Column alignment
49 _LEVEL_CHARS = len("CRITICAL") # noqa
50 _RECORD_CHARS = 35 # noqa
51 _FILENAME_CHARS = 20 # noqa
52 _LINENO_CHARS = 4 # noqa
53 _FUNCNAME_CHARS = 31 # noqa
54
55
56 def default_config():
57 """ Apply the default cocotb log formatting to the root logger.
58
59 This hooks up the logger to write to stdout, using either
60 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
61 on whether colored output is requested. It also adds a
62 :class:`SimTimeContextFilter` filter so that
63 :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.
64
65 The logging level for cocotb logs is set based on the
66 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
67
68 If desired, this logging configuration can be overwritten by calling
69 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
70 manually resetting the root logger instance, for which examples can be
71 found online.
72 """
73 # construct an appropriate handler
74 hdlr = logging.StreamHandler(sys.stdout)
75 if want_color_output():
76 hdlr.setFormatter(SimColourLogFormatter())
77 else:
78 hdlr.setFormatter(SimLogFormatter())
79
80 filter = SimTimeContextFilter()
81
82 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
83 logging.basicConfig()
84 logging.getLogger().handlers = [hdlr] # overwrite default handlers
85 logging.getLogger().filters = [filter]
86
87 # apply level settings for cocotb
88 log = logging.getLogger('cocotb')
89 level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
90 try:
91 _default_log = getattr(logging, level)
92 except AttributeError:
93 log.error("Unable to set logging level to %r" % level)
94 _default_log = logging.INFO
95 log.setLevel(_default_log)
96
97 # Notify GPI of log level, which it uses as an optimization to avoid
98 # calling into Python.
99 if "COCOTB_SIM" in os.environ:
100 import simulator
101 simulator.log_level(_default_log)
102
103
104 class SimBaseLog(logging.getLoggerClass()):
105 """ This class only exists for backwards compatibility """
106
107 @property
108 def logger(self):
109 warnings.warn(
110 "the .logger attribute should not be used now that `SimLog` "
111 "returns a native logger instance directly.",
112 DeprecationWarning, stacklevel=2)
113 return self
114
115 @property
116 def colour(self):
117 warnings.warn(
118 "the .colour attribute may be removed in future, use the "
119 "equivalent `cocotb.utils.want_color_output()` instead",
120 DeprecationWarning, stacklevel=2)
121 return want_color_output()
122
123
124 # this used to be a class, hence the unusual capitalization
125 def SimLog(name, ident=None):
126 """ Like logging.getLogger, but append a numeric identifier to the name """
127 if ident is not None:
128 name = "%s.0x%x" % (name, ident)
129 return logging.getLogger(name)
130
131
132 class SimTimeContextFilter(logging.Filter):
133 """
134 A filter to inject simulator times into the log records.
135
136 This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.
137
138 This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.
139 """
140
141 # needed to make our docs render well
142 def __init__(self, *args, **kwargs):
143 """ See :class:`logging.Filter` for argument descriptions """
144 super().__init__(*args, **kwargs)
145
146 def filter(self, record):
147 try:
148 record.created_sim_time = get_sim_time()
149 except RecursionError:
150 # get_sim_time may try to log - if that happens, we can't
151 # attach a simulator time to this message.
152 record.created_sim_time = None
153 return True
154
155
156 class SimLogFormatter(logging.Formatter):
157 """Log formatter to provide consistent log message handling.
158
159 This will only add simulator timestamps if the logger object has a
160 :class:`SimTimeContextFilter` filter attached, which cocotb ensures by
161 default.
162 """
163
164 # Removes the arguments from the base class. Docstring needed to make
165 # sphinx happy.
166 def __init__(self):
167 """ Takes no arguments. """
168 super().__init__()
169
170 # Justify and truncate
171 @staticmethod
172 def ljust(string, chars):
173 if len(string) > chars:
174 return ".." + string[(chars - 2) * -1:]
175 return string.ljust(chars)
176
177 @staticmethod
178 def rjust(string, chars):
179 if len(string) > chars:
180 return ".." + string[(chars - 2) * -1:]
181 return string.rjust(chars)
182
183 def _format(self, level, record, msg, coloured=False):
184 sim_time = getattr(record, 'created_sim_time', None)
185 if sim_time is None:
186 sim_time_str = " -.--ns"
187 else:
188 time_ns = get_time_from_sim_steps(sim_time, 'ns')
189 sim_time_str = "{:6.2f}ns".format(time_ns)
190 prefix = sim_time_str.rjust(11) + ' ' + level + ' '
191 if not _suppress:
192 prefix += self.ljust(record.name, _RECORD_CHARS) + \
193 self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \
194 ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \
195 ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '
196
197 # these lines are copied from the builtin logger
198 if record.exc_info:
199 # Cache the traceback text to avoid converting it multiple times
200 # (it's constant anyway)
201 if not record.exc_text:
202 record.exc_text = self.formatException(record.exc_info)
203 if record.exc_text:
204 if msg[-1:] != "\n":
205 msg = msg + "\n"
206 msg = msg + record.exc_text
207
208 prefix_len = len(prefix)
209 if coloured:
210 prefix_len -= (len(level) - _LEVEL_CHARS)
211 pad = "\n" + " " * (prefix_len)
212 return prefix + pad.join(msg.split('\n'))
213
214 def format(self, record):
215 """Prettify the log output, annotate with simulation time"""
216
217 msg = record.getMessage()
218 level = record.levelname.ljust(_LEVEL_CHARS)
219
220 return self._format(level, record, msg)
221
222
223 class SimColourLogFormatter(SimLogFormatter):
224 """Log formatter to provide consistent log message handling."""
225
226 loglevel2colour = {
227 logging.DEBUG : "%s",
228 logging.INFO : ANSI.COLOR_INFO + "%s" + ANSI.COLOR_DEFAULT,
229 logging.WARNING : ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
230 logging.ERROR : ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
231 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
232 }
233
234 def format(self, record):
235 """Prettify the log output, annotate with simulation time"""
236
237 msg = record.getMessage()
238
239 # Need to colour each line in case coloring is applied in the message
240 msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
241 level = (SimColourLogFormatter.loglevel2colour[record.levelno] %
242 record.levelname.ljust(_LEVEL_CHARS))
243
244 return self._format(level, record, msg, coloured=True)
245
246
247 def _filter_from_c(logger_name, level):
248 return logging.getLogger(logger_name).isEnabledFor(level)
249
250
251 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
252 """
253 This is for use from the C world, and allows us to insert C stack
254 information.
255 """
256 logger = logging.getLogger(logger_name)
257 if logger.isEnabledFor(level):
258 record = logger.makeRecord(
259 logger.name,
260 level,
261 filename,
262 lineno,
263 msg,
264 None,
265 None,
266 function_name
267 )
268 logger.handle(record)
269
[end of cocotb/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -72,17 +72,16 @@
"""
# construct an appropriate handler
hdlr = logging.StreamHandler(sys.stdout)
+ hdlr.addFilter(SimTimeContextFilter())
if want_color_output():
hdlr.setFormatter(SimColourLogFormatter())
else:
hdlr.setFormatter(SimLogFormatter())
- filter = SimTimeContextFilter()
logging.setLoggerClass(SimBaseLog) # For backwards compatibility
logging.basicConfig()
logging.getLogger().handlers = [hdlr] # overwrite default handlers
- logging.getLogger().filters = [filter]
# apply level settings for cocotb
log = logging.getLogger('cocotb')
@@ -139,9 +138,9 @@
"""
# needed to make our docs render well
- def __init__(self, *args, **kwargs):
- """ See :class:`logging.Filter` for argument descriptions """
- super().__init__(*args, **kwargs)
+ def __init__(self):
+ """ Takes no arguments """
+ super().__init__()
def filter(self, record):
try:
@@ -156,9 +155,9 @@
class SimLogFormatter(logging.Formatter):
"""Log formatter to provide consistent log message handling.
- This will only add simulator timestamps if the logger object has a
- :class:`SimTimeContextFilter` filter attached, which cocotb ensures by
- default.
+ This will only add simulator timestamps if the handler object this
+ formatter is attached to has a :class:`SimTimeContextFilter` filter
+ attached, which cocotb ensures by default.
"""
# Removes the arguments from the base class. Docstring needed to make
|
{"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -72,17 +72,16 @@\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n+ hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n \n- filter = SimTimeContextFilter()\n \n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n- logging.getLogger().filters = [filter]\n \n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n@@ -139,9 +138,9 @@\n \"\"\"\n \n # needed to make our docs render well\n- def __init__(self, *args, **kwargs):\n- \"\"\" See :class:`logging.Filter` for argument descriptions \"\"\"\n- super().__init__(*args, **kwargs)\n+ def __init__(self):\n+ \"\"\" Takes no arguments \"\"\"\n+ super().__init__()\n \n def filter(self, record):\n try:\n@@ -156,9 +155,9 @@\n class SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n \n- This will only add simulator timestamps if the logger object has a\n- :class:`SimTimeContextFilter` filter attached, which cocotb ensures by\n- default.\n+ This will only add simulator timestamps if the handler object this\n+ formatter is attached to has a :class:`SimTimeContextFilter` filter\n+ attached, which cocotb ensures by default.\n \"\"\"\n \n # Removes the arguments from the base class. Docstring needed to make\n", "issue": "Simulator timestamps are broken in logs\nLooks like gh-1411 didn't work properly. I'll try and find the fix shortly, but if we want to do a release before I do we should revert it.\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import (\n get_sim_time, get_time_from_sim_steps, want_color_output\n)\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n filter = SimTimeContextFilter()\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n logging.getLogger().filters = [filter]\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self, *args, **kwargs):\n \"\"\" See :class:`logging.Filter` for argument descriptions \"\"\"\n super().__init__(*args, **kwargs)\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the logger object has a\n :class:`SimTimeContextFilter` filter attached, which cocotb ensures by\n default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, 'created_sim_time', None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, 'ns')\n sim_time_str = \"{:6.2f}ns\".format(time_ns)\n prefix = sim_time_str.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]}
| 3,524 | 426 |
gh_patches_debug_3202
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2190
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `project_urls` to `setup.py`
This would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import find_packages, setup
4 import fastentrypoints # Monkey-patches setuptools.
5
6 from get_version import __version__
7
8 os.chdir(os.path.split(os.path.abspath(__file__))[0])
9
10 PKG = "hy"
11
12 long_description = """Hy is a Python <--> Lisp layer. It helps
13 make things work nicer, and lets Python and the Hy lisp variant play
14 nice together. """
15
16 setup(
17 name=PKG,
18 version=__version__,
19 install_requires=[
20 'rply>=0.7.7',
21 'funcparserlib>=1.0.0a0',
22 'colorama',
23 'astor>=0.8 ; python_version < "3.9"',
24 ],
25 python_requires = '>= 3.7, <= 3.10',
26 entry_points={
27 'console_scripts': [
28 'hy = hy.cmdline:hy_main',
29 'hy3 = hy.cmdline:hy_main',
30 'hyc = hy.cmdline:hyc_main',
31 'hyc3 = hy.cmdline:hyc_main',
32 'hy2py = hy.cmdline:hy2py_main',
33 'hy2py3 = hy.cmdline:hy2py_main',
34 ]
35 },
36 packages=find_packages(exclude=['tests*']),
37 package_data={
38 'hy': ['*.hy', '__pycache__/*'],
39 'hy.contrib': ['*.hy', '__pycache__/*'],
40 'hy.core': ['*.hy', '__pycache__/*'],
41 'hy.extra': ['*.hy', '__pycache__/*'],
42 },
43 data_files=[
44 ('get_version', ['get_version.py'])
45 ],
46 author="Paul Tagliamonte",
47 author_email="tag@pault.ag",
48 long_description=long_description,
49 description='Lisp and Python love each other.',
50 license="Expat",
51 url="http://hylang.org/",
52 platforms=['any'],
53 classifiers=[
54 "Development Status :: 4 - Beta",
55 "Intended Audience :: Developers",
56 "License :: DFSG approved",
57 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
58 "Operating System :: OS Independent",
59 "Programming Language :: Lisp",
60 "Programming Language :: Python",
61 "Programming Language :: Python :: 3",
62 "Programming Language :: Python :: 3.7",
63 "Programming Language :: Python :: 3.8",
64 "Programming Language :: Python :: 3.9",
65 "Programming Language :: Python :: 3.10",
66 "Topic :: Software Development :: Code Generators",
67 "Topic :: Software Development :: Compilers",
68 "Topic :: Software Development :: Libraries",
69 ]
70 )
71
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,5 +66,9 @@
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries",
- ]
+ ],
+ project_urls={
+ "Documentation": "https://docs.hylang.org/",
+ "Source": "https://github.com/hylang/hy",
+ }
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,5 +66,9 @@\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n- ]\n+ ],\n+ project_urls={\n+ \"Documentation\": \"https://docs.hylang.org/\",\n+ \"Source\": \"https://github.com/hylang/hy\",\n+ }\n )\n", "issue": "Add `project_urls` to `setup.py`\nThis would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=1.0.0a0',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires = '>= 3.7, <= 3.10',\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy': ['*.hy', '__pycache__/*'],\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"tag@pault.ag\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]}
| 1,275 | 107 |
gh_patches_debug_22425
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-68
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTML editor in create-text process not fully working
The HTML editor in create-text-process not working. I currently can't add links to other website or upload pictures ;)
<img width="803" alt="bildschirmfoto 2017-02-13 um 17 24 40" src="https://cloud.githubusercontent.com/assets/11075214/22891998/5bc735ac-f211-11e6-8766-a7588873b436.png">
</issue>
<code>
[start of meinberlin/urls.py]
1 """meinberlin URL Configuration."""
2
3 from allauth import urls as allauth_urls
4 from ckeditor_uploader import views as ck_views
5 from django.conf.urls import include
6 from django.conf.urls import url
7 from django.contrib import admin
8 from django.contrib.auth.decorators import login_required
9 from django.views.decorators.cache import never_cache
10 from django.views.i18n import javascript_catalog
11 from rest_framework import routers
12 from wagtail.wagtailadmin import urls as wagtailadmin_urls
13 from wagtail.wagtailcore import urls as wagtail_urls
14 from wagtail.wagtaildocs import urls as wagtaildocs_urls
15
16 from adhocracy4.comments.api import CommentViewSet
17 from adhocracy4.projects import urls as projects_urls
18 from adhocracy4.ratings.api import RatingViewSet
19
20 from apps.dashboard import urls as dashboard_urls
21 from apps.documents import urls as paragraph_urls
22 from apps.documents.api import DocumentViewSet
23 from apps.ideas import urls as ideas_urls
24
25 js_info_dict = {
26 'packages': ('adhocracy4.comments',),
27 }
28
29 router = routers.DefaultRouter()
30 router.register(r'ratings', RatingViewSet, base_name='ratings')
31 router.register(r'comments', CommentViewSet, base_name='comments')
32 router.register(r'documents', DocumentViewSet, base_name='documents')
33
34
35 urlpatterns = [
36 url(r'^django-admin/', include(admin.site.urls)),
37 url(r'^dashboard/', include(dashboard_urls)),
38
39 url(r'^admin/', include(wagtailadmin_urls)),
40 url(r'^accounts/', include(allauth_urls)),
41 url(r'^documents/', include(wagtaildocs_urls)),
42 url(r'^projects/', include(projects_urls)),
43
44 url(r'^ideas/', include(ideas_urls)),
45 url(r'^paragraphs/', include(paragraph_urls)),
46
47 url(r'^api/', include(router.urls)),
48
49 url(r'^upload/',
50 login_required(ck_views.upload), name='ckeditor_upload'),
51 url(r'^browse/',
52 never_cache(login_required(ck_views.browse)), name='ckeditor_browse'),
53
54 url(r'^jsi18n/$', javascript_catalog,
55 js_info_dict, name='javascript-catalog'),
56 url(r'', include(wagtail_urls)),
57 ]
58
[end of meinberlin/urls.py]
[start of meinberlin/settings/base.py]
1 """
2 Django settings for meinberlin project.
3
4 Generated by 'django-admin startproject' using Django 1.8.17.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.8/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.8/ref/settings/
11 """
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14 import os
15
16 PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 BASE_DIR = os.path.dirname(PROJECT_DIR)
18
19 # Application definition
20
21 INSTALLED_APPS = (
22 'django.contrib.sites',
23 'django.contrib.admin',
24 'django.contrib.auth',
25 'django.contrib.contenttypes',
26 'django.contrib.sessions',
27 'django.contrib.messages',
28 'django.contrib.staticfiles',
29
30 'wagtail.wagtailforms',
31 'wagtail.wagtailredirects',
32 'wagtail.wagtailembeds',
33 'wagtail.wagtailsites',
34 'wagtail.wagtailusers',
35 'wagtail.wagtailsnippets',
36 'wagtail.wagtaildocs',
37 'wagtail.wagtailimages',
38 'wagtail.wagtailsearch',
39 'wagtail.wagtailadmin',
40 'wagtail.wagtailcore',
41 'wagtail.contrib.wagtailstyleguide',
42
43 'taggit', # wagtail dependency
44 'rest_framework',
45 'allauth',
46 'allauth.account',
47 'allauth.socialaccount',
48 'rules.apps.AutodiscoverRulesConfig',
49 'easy_thumbnails',
50 'ckeditor',
51 'ckeditor_uploader',
52
53 'adhocracy4.contrib.apps.ContribConfig',
54 'adhocracy4.organisations.apps.OrganisationsConfig',
55 'adhocracy4.projects.apps.ProjectsConfig',
56 'adhocracy4.phases.apps.PhasesConfig',
57 'adhocracy4.modules.apps.ModulesConfig',
58 'adhocracy4.ratings.apps.RatingsConfig',
59 'adhocracy4.comments.apps.CommentsConfig',
60
61 'apps.contrib.apps.Config',
62 'apps.cms.apps.Config',
63 'apps.users.apps.Config',
64 'apps.projects.apps.Config',
65 'apps.organisations.apps.Config',
66 'apps.dashboard.apps.Config',
67
68 'apps.ideas.apps.Config',
69 'apps.documents.apps.Config',
70 )
71
72 MIDDLEWARE_CLASSES = (
73 'django.contrib.sessions.middleware.SessionMiddleware',
74 'django.middleware.common.CommonMiddleware',
75 'django.middleware.csrf.CsrfViewMiddleware',
76 'django.contrib.auth.middleware.AuthenticationMiddleware',
77 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
78 'django.contrib.messages.middleware.MessageMiddleware',
79 'django.middleware.clickjacking.XFrameOptionsMiddleware',
80 'django.middleware.security.SecurityMiddleware',
81 'django.middleware.locale.LocaleMiddleware',
82
83 'wagtail.wagtailcore.middleware.SiteMiddleware',
84 'wagtail.wagtailredirects.middleware.RedirectMiddleware',
85 )
86
87 SITE_ID = 1
88
89 ROOT_URLCONF = 'meinberlin.urls'
90
91 LOCALE_PATHS = [os.path.join(PROJECT_DIR, 'locale')]
92
93 TEMPLATES = [
94 {
95 'BACKEND': 'django.template.backends.django.DjangoTemplates',
96 'DIRS': [
97 os.path.join(PROJECT_DIR, 'templates'),
98 ],
99 'APP_DIRS': True,
100 'OPTIONS': {
101 'context_processors': [
102 'django.template.context_processors.debug',
103 'django.template.context_processors.request',
104 'django.contrib.auth.context_processors.auth',
105 'django.contrib.messages.context_processors.messages',
106 ],
107 },
108 },
109 ]
110
111 WSGI_APPLICATION = 'meinberlin.wsgi.application'
112
113
114 # Database
115 # https://docs.djangoproject.com/en/1.8/ref/settings/#databases
116
117 DATABASES = {
118 'default': {
119 'ENGINE': 'django.db.backends.sqlite3',
120 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
121 }
122 }
123
124
125 # Internationalization
126 # https://docs.djangoproject.com/en/1.8/topics/i18n/
127
128 LANGUAGE_CODE = 'en-us'
129
130 TIME_ZONE = 'UTC'
131
132 USE_I18N = True
133
134 USE_L10N = True
135
136 USE_TZ = True
137
138
139 # Static files (CSS, JavaScript, Images)
140 # https://docs.djangoproject.com/en/1.8/howto/static-files/
141
142 STATICFILES_DIRS = [
143 os.path.join(PROJECT_DIR, 'static'),
144 ]
145
146 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
147 STATIC_URL = '/static/'
148
149 IMAGE_ALIASES = {
150 '*': {
151 'max_size': 5*10**6,
152 'fileformats': ('image/png', 'image/jpeg', 'image/gif')
153 },
154 'heroimage': {'min_resolution': (1300, 600)},
155 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},
156 'avatar': {'min_resolution': (200, 200)},
157 'idea_image': {'min_resolution': (800, 200)},
158 }
159
160 ALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')
161
162
163 # Wagtail settings
164
165 WAGTAIL_SITE_NAME = 'meinberlin'
166
167 # Base URL to use when referring to full URLs within the Wagtail admin backend -
168 # e.g. in notification emails. Don't include '/admin' or a trailing slash
169 BASE_URL = 'http://localhost:8000'
170
171 # Authentication
172
173 AUTH_USER_MODEL = 'meinberlin_users.User'
174
175 AUTHENTICATION_BACKENDS = (
176 'rules.permissions.ObjectPermissionBackend',
177 'django.contrib.auth.backends.ModelBackend',
178 'allauth.account.auth_backends.AuthenticationBackend',
179 )
180
181 ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'
182 ACCOUNT_AUTHENTICATION_METHOD = 'email'
183 ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
184 ACCOUNT_EMAIL_REQUIRED = True
185 ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
186 ACCOUNT_USERNAME_REQUIRED = True
187 ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10
188 ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds
189 ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
190 ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
191
192 LOGIN_REDIRECT_URL = '/'
193
194 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
195
196
197 # ckeditor
198
199 CKEDITOR_UPLOAD_PATH = "uploads/"
200 CKEDITOR_RESTRICT_BY_USER = True
201 CKEDITOR_ALLOW_NONIMAGE_FILES = False
202
203 CKEDITOR_CONFIGS = {
204 'default': {
205 'width': '100%',
206 'toolbar': 'Custom',
207 'toolbar_Custom': [
208 ['Bold', 'Italic', 'Underline'],
209 ['NumberedList', 'BulletedList'],
210 ['Link', 'Unlink']
211 ]
212 },
213 'image-editor': {
214 'width': '100%',
215 'toolbar': 'Custom',
216 'toolbar_Custom': [
217 ['Bold', 'Italic', 'Underline'],
218 ['Image'],
219 ['NumberedList', 'BulletedList'],
220 ['Link', 'Unlink']
221 ]
222 }
223 }
224
225 BLEACH_LIST = {
226 'default' : {
227 'tags': ['p','strong','em','u','ol','li','ul','a'],
228 'attributes': {
229 'a': ['href', 'rel'],
230 },
231 },
232 'image-editor': {
233 'tags': ['p','strong','em','u','ol','li','ul','a','img'],
234 'attributes': {
235 'a': ['href', 'rel'],
236 'img': ['src', 'alt', 'style']
237 },
238 'styles': [
239 'float',
240 'margin',
241 'padding',
242 'width',
243 'height',
244 'margin-bottom',
245 'margin-top',
246 'margin-left',
247 'margin-right',
248 ],
249 }
250 }
251
252
253 # adhocracy4
254
255 A4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'
256
257 A4_RATEABLES = (
258 ('meinberlin_ideas', 'idea'),
259 )
260
261 A4_COMMENTABLES = (
262 ('meinberlin_ideas', 'idea'),
263 )
264
[end of meinberlin/settings/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py
--- a/meinberlin/settings/base.py
+++ b/meinberlin/settings/base.py
@@ -146,6 +146,9 @@
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
+MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
+MEDIA_URL = '/media/'
+
IMAGE_ALIASES = {
'*': {
'max_size': 5*10**6,
diff --git a/meinberlin/urls.py b/meinberlin/urls.py
--- a/meinberlin/urls.py
+++ b/meinberlin/urls.py
@@ -2,6 +2,7 @@
from allauth import urls as allauth_urls
from ckeditor_uploader import views as ck_views
+from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
@@ -55,3 +56,12 @@
js_info_dict, name='javascript-catalog'),
url(r'', include(wagtail_urls)),
]
+
+if settings.DEBUG:
+ from django.conf.urls.static import static
+ from django.contrib.staticfiles.urls import staticfiles_urlpatterns
+
+ # Serve static and media locally
+ urlpatterns += staticfiles_urlpatterns()
+ urlpatterns += static(settings.MEDIA_URL,
+ document_root=settings.MEDIA_ROOT)
|
{"golden_diff": "diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py\n--- a/meinberlin/settings/base.py\n+++ b/meinberlin/settings/base.py\n@@ -146,6 +146,9 @@\n STATIC_ROOT = os.path.join(BASE_DIR, 'static')\n STATIC_URL = '/static/'\n \n+MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n+MEDIA_URL = '/media/'\n+\n IMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\ndiff --git a/meinberlin/urls.py b/meinberlin/urls.py\n--- a/meinberlin/urls.py\n+++ b/meinberlin/urls.py\n@@ -2,6 +2,7 @@\n \n from allauth import urls as allauth_urls\n from ckeditor_uploader import views as ck_views\n+from django.conf import settings\n from django.conf.urls import include\n from django.conf.urls import url\n from django.contrib import admin\n@@ -55,3 +56,12 @@\n js_info_dict, name='javascript-catalog'),\n url(r'', include(wagtail_urls)),\n ]\n+\n+if settings.DEBUG:\n+ from django.conf.urls.static import static\n+ from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n+\n+ # Serve static and media locally\n+ urlpatterns += staticfiles_urlpatterns()\n+ urlpatterns += static(settings.MEDIA_URL,\n+ document_root=settings.MEDIA_ROOT)\n", "issue": "HTML editor in create-text process not fully working\nThe HTML editor in create-text-process not working. I currently can't add links to other website or upload pictures ;)\r\n\r\n<img width=\"803\" alt=\"bildschirmfoto 2017-02-13 um 17 24 40\" src=\"https://cloud.githubusercontent.com/assets/11075214/22891998/5bc735ac-f211-11e6-8766-a7588873b436.png\">\r\n\n", "before_files": [{"content": "\"\"\"meinberlin URL Configuration.\"\"\"\n\nfrom allauth import urls as allauth_urls\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\nfrom wagtail.wagtailadmin import urls as wagtailadmin_urls\nfrom wagtail.wagtailcore import urls as wagtail_urls\nfrom wagtail.wagtaildocs import urls as wagtaildocs_urls\n\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.projects import urls as projects_urls\nfrom adhocracy4.ratings.api import RatingViewSet\n\nfrom apps.dashboard import urls as dashboard_urls\nfrom apps.documents import urls as paragraph_urls\nfrom apps.documents.api import DocumentViewSet\nfrom apps.ideas import urls as ideas_urls\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'ratings', RatingViewSet, base_name='ratings')\nrouter.register(r'comments', CommentViewSet, base_name='comments')\nrouter.register(r'documents', DocumentViewSet, base_name='documents')\n\n\nurlpatterns = [\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^dashboard/', include(dashboard_urls)),\n\n url(r'^admin/', include(wagtailadmin_urls)),\n url(r'^accounts/', include(allauth_urls)),\n url(r'^documents/', include(wagtaildocs_urls)),\n url(r'^projects/', include(projects_urls)),\n\n url(r'^ideas/', include(ideas_urls)),\n url(r'^paragraphs/', include(paragraph_urls)),\n\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/',\n login_required(ck_views.upload), name='ckeditor_upload'),\n url(r'^browse/',\n never_cache(login_required(ck_views.browse)), name='ckeditor_browse'),\n\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n url(r'', include(wagtail_urls)),\n]\n", "path": "meinberlin/urls.py"}, {"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'adhocracy4.contrib.apps.ContribConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.ideas.apps.Config',\n 'apps.documents.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(PROJECT_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\n\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n\nA4_COMMENTABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n", "path": "meinberlin/settings/base.py"}]}
| 3,695 | 318 |
gh_patches_debug_14832
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1784
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[capture-col] When nothing is captured, "IndexError: list index out of range" is thrown
_Minor user exp bug_
**Small description**
When using `capture-col` on a column and the regex does not capture any values, the below error is thrown. No new column is created (as expected)
**Expected result**
Just to show the "no columns to add" error. Preferably, change the error to something more informative such as "no matches found, didn't add column"
**Actual result with screenshot**
```
Traceback (most recent call last):
File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/threads.py", line 200, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/features/regex.py", line 77, in addRegexColumns
vs.addColumnAtCursor(*cols.values())
File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/sheets.py", line 473, in addColumnAtCursor
firstnewcol = [c for c in cols if not c.hidden][0]
IndexError: list index out of range
```
I presume this error is from an insufficient catch of an exception and it is attempting to create the column even though there are no groups to show.
**Steps to reproduce with sample data and a .vd**
`# echo -e 'helloooo\nthere' | vd -f txt --config=/dev/null --visidata_dir=/dev/null`
```tsv
sheet col row longname input keystrokes comment
- text capture-col (ABC) ; add new column from capture groups of regex; requires example row
```
**Additional context**
v2.12dev
</issue>
<code>
[start of visidata/features/regex.py]
1 import re
2 import random
3
4 from visidata import asyncthread, options, vd
5 from visidata import VisiData, BaseSheet, Sheet, Column, Progress
6
7
8 @Sheet.api
9 def setSubst(sheet, cols, rows):
10 if not rows:
11 vd.warning('no %s selected' % sheet.rowtype)
12 return
13 modified = 'column' if len(cols) == 1 else 'columns'
14 rex = vd.input("transform %s by regex: " % modified, type="regex-subst")
15 setValuesFromRegex(cols, rows, rex)
16
17
18 vd.option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]', replay=True)
19 vd.option('regex_maxsplit', 0, 'maxsplit to pass to regex.split', replay=True)
20
21 @VisiData.api
22 def makeRegexSplitter(vd, regex, origcol):
23 return lambda row, regex=regex, origcol=origcol, maxsplit=options.regex_maxsplit: regex.split(origcol.getDisplayValue(row), maxsplit=maxsplit)
24
25 @VisiData.api
26 def makeRegexMatcher(vd, regex, origcol):
27 def _regexMatcher(row):
28 m = regex.search(origcol.getDisplayValue(row))
29 if m:
30 return m.groupdict() if m.groupdict() else m.groups()
31 return _regexMatcher
32
33
34 @Sheet.api
35 def RegexColumn(vs, regexMaker, origcol, regexstr):
36 regex = re.compile(regexstr, vs.regex_flags())
37 func = regexMaker(regex, origcol)
38 return Column(origcol.name+'_re',
39 getter=lambda col,row,func=func: func(row),
40 origCol=origcol)
41
42
43 @Sheet.api
44 @asyncthread
45 def addRegexColumns(vs, regexMaker, origcol, regexstr):
46 regexstr or vd.fail('regex required')
47
48 regex = re.compile(regexstr, vs.regex_flags())
49
50 func = regexMaker(regex, origcol)
51
52 cols = {}
53 ncols = 0 # number of new columns added already
54 for r in Progress(vs.getSampleRows()):
55 try:
56 m = func(r)
57 if not m:
58 continue
59 except Exception as e:
60 vd.exceptionCaught(e)
61
62 if isinstance(m, dict):
63 for name in m:
64 if name in cols:
65 continue
66 cols[name] = Column(origcol.name+'_'+str(name),
67 getter=lambda col,row,name=name,func=func: func(row)[name],
68 origCol=origcol)
69 elif isinstance(m, (tuple, list)):
70 for _ in range(len(m)-len(cols)):
71 cols[len(cols)] = Column(origcol.name+'_re'+str(len(cols)),
72 getter=lambda col,row,i=len(cols),func=func: func(row)[i],
73 origCol=origcol)
74 else:
75 raise TypeError("addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a "+type(m).__name__)
76
77 vs.addColumnAtCursor(*cols.values())
78
79
80 @VisiData.api
81 def regexTransform(vd, origcol, instr):
82 before, after = vd.parse_sed_transform(instr)
83 return lambda col,row,origcol=origcol,before=before,after=after,flags=origcol.sheet.regex_flags(): re.sub(before, after, origcol.getDisplayValue(row), flags=flags)
84
85
86 @VisiData.api
87 def parse_sed_transform(vd, instr):
88 i = indexWithEscape(instr, '/')
89 if i is None:
90 return instr, ''
91 else:
92 return instr[:i], instr[i+1:]
93
94
95 def indexWithEscape(s, char, escape_char='\\'):
96 i=0
97 while i < len(s):
98 if s[i] == escape_char:
99 i += 1
100 elif s[i] == char:
101 return i
102 i += 1
103
104 return None
105
106
107 @asyncthread
108 def setValuesFromRegex(cols, rows, rex):
109 transforms = [vd.regexTransform(col, rex) for col in cols]
110 vd.addUndoSetValues(cols, rows)
111 for r in Progress(rows, 'replacing'):
112 for col, transform in zip(cols, transforms):
113 col.setValueSafe(r, transform(col, r))
114 for col in cols:
115 col.recalc()
116
117
118 @BaseSheet.api
119 def regex_flags(sheet):
120 'Return flags to pass to regex functions from options'
121 return sum(getattr(re, f.upper()) for f in sheet.options.regex_flags)
122
123
124 Sheet.addCommand(':', 'split-col', 'addRegexColumns(makeRegexSplitter, cursorCol, input("split regex: ", type="regex-split"))', 'Add new columns from regex split')
125 Sheet.addCommand(';', 'capture-col', 'addRegexColumns(makeRegexMatcher, cursorCol, input("capture regex: ", type="regex-capture"))', 'add new column from capture groups of regex; requires example row')
126 Sheet.addCommand('', 'addcol-split', 'addColumnAtCursor(RegexColumn(makeRegexSplitter, cursorCol, input("split regex: ", type="regex-split")))', 'Add column split by regex')
127 Sheet.addCommand('', 'addcol-capture', 'addColumnAtCursor(RegexColumn(makeRegexMatcher, cursorCol, input("capture regex: ", type="regex-capture")))', 'Add column captured by regex')
128 Sheet.addCommand('*', 'addcol-subst', 'addColumnAtCursor(Column(cursorCol.name + "_re", getter=regexTransform(cursorCol, input("transform column by regex: ", type="regex-subst"))))', 'add column derived from current column, replacing regex with subst (may include \1 backrefs)')
129 Sheet.addCommand('g*', 'setcol-subst', 'setSubst([cursorCol], someSelectedRows)', 'regex/subst - modify selected rows in current column, replacing regex with subst, (may include backreferences \\1 etc)')
130 Sheet.addCommand('gz*', 'setcol-subst-all', 'setSubst(visibleCols, someSelectedRows)', 'modify selected rows in all visible columns, replacing regex with subst (may include \\1 backrefs)')
131
132
133 vd.addMenuItems('''
134 Edit > Modify > selected cells > regex substitution > setcol-subst
135 Column > Add column > capture by regex > addcol-capture
136 Column > Add column > split by regex > addcol-split
137 Column > Add column > subst by regex > addcol-subst
138 Column > Split > split-col
139 Row > Select > by regex > current column > select-col-regex
140 Row > Select > by regex > all columns > select-cols-regex
141 Row > Unselect > by regex > current column > unselect-col-regex
142 Row > Unselect > by regex > all columns > unselect-cols-regex
143 ''')
144
[end of visidata/features/regex.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/visidata/features/regex.py b/visidata/features/regex.py
--- a/visidata/features/regex.py
+++ b/visidata/features/regex.py
@@ -24,6 +24,8 @@
@VisiData.api
def makeRegexMatcher(vd, regex, origcol):
+ if not regex.groups:
+ vd.fail('specify a capture group') #1778
def _regexMatcher(row):
m = regex.search(origcol.getDisplayValue(row))
if m:
@@ -74,6 +76,10 @@
else:
raise TypeError("addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a "+type(m).__name__)
+ if not cols:
+ vd.warning("no regex matches found, didn't add column")
+ return
+
vs.addColumnAtCursor(*cols.values())
|
{"golden_diff": "diff --git a/visidata/features/regex.py b/visidata/features/regex.py\n--- a/visidata/features/regex.py\n+++ b/visidata/features/regex.py\n@@ -24,6 +24,8 @@\n \n @VisiData.api\n def makeRegexMatcher(vd, regex, origcol):\n+ if not regex.groups:\n+ vd.fail('specify a capture group') #1778\n def _regexMatcher(row):\n m = regex.search(origcol.getDisplayValue(row))\n if m:\n@@ -74,6 +76,10 @@\n else:\n raise TypeError(\"addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a \"+type(m).__name__)\n \n+ if not cols:\n+ vd.warning(\"no regex matches found, didn't add column\")\n+ return\n+\n vs.addColumnAtCursor(*cols.values())\n", "issue": "[capture-col] When nothing is captured, \"IndexError: list index out of range\" is thrown\n_Minor user exp bug_\r\n\r\n**Small description**\r\nWhen using `capture-col` on a column and the regex does not capture any values, the below error is thrown. No new column is created (as expected)\r\n\r\n**Expected result**\r\nJust to show the \"no columns to add\" error. Preferably, change the error to something more informative such as \"no matches found, didn't add column\"\r\n\r\n**Actual result with screenshot**\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/threads.py\", line 200, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/features/regex.py\", line 77, in addRegexColumns\r\n vs.addColumnAtCursor(*cols.values())\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/sheets.py\", line 473, in addColumnAtCursor\r\n firstnewcol = [c for c in cols if not c.hidden][0]\r\nIndexError: list index out of range\r\n```\r\nI presume this error is from an insufficient catch of an exception and it is attempting to create the column even though there are no groups to show.\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n`# echo -e 'helloooo\\nthere' | vd -f txt --config=/dev/null --visidata_dir=/dev/null`\r\n\r\n```tsv\r\nsheet\tcol\trow\tlongname\tinput\tkeystrokes\tcomment\r\n-\ttext\t\tcapture-col\t(ABC)\t;\tadd new column from capture groups of regex; requires example row\r\n```\r\n\r\n**Additional context**\r\nv2.12dev\n", "before_files": [{"content": "import re\nimport random\n\nfrom visidata import asyncthread, options, vd\nfrom visidata import VisiData, BaseSheet, Sheet, Column, Progress\n\n\n@Sheet.api\ndef setSubst(sheet, cols, rows):\n if not rows:\n vd.warning('no %s selected' % sheet.rowtype)\n return\n modified = 'column' if len(cols) == 1 else 'columns'\n rex = vd.input(\"transform %s by regex: \" % modified, type=\"regex-subst\")\n setValuesFromRegex(cols, rows, rex)\n\n\nvd.option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]', replay=True)\nvd.option('regex_maxsplit', 0, 'maxsplit to pass to regex.split', replay=True)\n\n@VisiData.api\ndef makeRegexSplitter(vd, regex, origcol):\n return lambda row, regex=regex, origcol=origcol, maxsplit=options.regex_maxsplit: regex.split(origcol.getDisplayValue(row), maxsplit=maxsplit)\n\n@VisiData.api\ndef makeRegexMatcher(vd, regex, origcol):\n def _regexMatcher(row):\n m = regex.search(origcol.getDisplayValue(row))\n if m:\n return m.groupdict() if m.groupdict() else m.groups()\n return _regexMatcher\n\n\n@Sheet.api\ndef RegexColumn(vs, regexMaker, origcol, regexstr):\n regex = re.compile(regexstr, vs.regex_flags())\n func = regexMaker(regex, origcol)\n return Column(origcol.name+'_re',\n getter=lambda col,row,func=func: func(row),\n origCol=origcol)\n\n\n@Sheet.api\n@asyncthread\ndef addRegexColumns(vs, regexMaker, origcol, regexstr):\n regexstr or vd.fail('regex required')\n\n regex = re.compile(regexstr, vs.regex_flags())\n\n func = regexMaker(regex, origcol)\n\n cols = {}\n ncols = 0 # number of new columns added already\n for r in Progress(vs.getSampleRows()):\n try:\n m = func(r)\n if not m:\n continue\n except Exception as e:\n vd.exceptionCaught(e)\n\n if isinstance(m, dict):\n for name in m:\n if name in cols:\n continue\n cols[name] = Column(origcol.name+'_'+str(name),\n getter=lambda col,row,name=name,func=func: func(row)[name],\n origCol=origcol)\n elif isinstance(m, (tuple, list)):\n for _ in range(len(m)-len(cols)):\n cols[len(cols)] = Column(origcol.name+'_re'+str(len(cols)),\n getter=lambda col,row,i=len(cols),func=func: func(row)[i],\n origCol=origcol)\n else:\n raise TypeError(\"addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a \"+type(m).__name__)\n\n vs.addColumnAtCursor(*cols.values())\n\n\n@VisiData.api\ndef regexTransform(vd, origcol, instr):\n before, after = vd.parse_sed_transform(instr)\n return lambda col,row,origcol=origcol,before=before,after=after,flags=origcol.sheet.regex_flags(): re.sub(before, after, origcol.getDisplayValue(row), flags=flags)\n\n\n@VisiData.api\ndef parse_sed_transform(vd, instr):\n i = indexWithEscape(instr, '/')\n if i is None:\n return instr, ''\n else:\n return instr[:i], instr[i+1:]\n\n\ndef indexWithEscape(s, char, escape_char='\\\\'):\n i=0\n while i < len(s):\n if s[i] == escape_char:\n i += 1\n elif s[i] == char:\n return i\n i += 1\n\n return None\n\n\n@asyncthread\ndef setValuesFromRegex(cols, rows, rex):\n transforms = [vd.regexTransform(col, rex) for col in cols]\n vd.addUndoSetValues(cols, rows)\n for r in Progress(rows, 'replacing'):\n for col, transform in zip(cols, transforms):\n col.setValueSafe(r, transform(col, r))\n for col in cols:\n col.recalc()\n\n\n@BaseSheet.api\ndef regex_flags(sheet):\n 'Return flags to pass to regex functions from options'\n return sum(getattr(re, f.upper()) for f in sheet.options.regex_flags)\n\n\nSheet.addCommand(':', 'split-col', 'addRegexColumns(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\"))', 'Add new columns from regex split')\nSheet.addCommand(';', 'capture-col', 'addRegexColumns(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\"))', 'add new column from capture groups of regex; requires example row')\nSheet.addCommand('', 'addcol-split', 'addColumnAtCursor(RegexColumn(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\")))', 'Add column split by regex')\nSheet.addCommand('', 'addcol-capture', 'addColumnAtCursor(RegexColumn(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\")))', 'Add column captured by regex')\nSheet.addCommand('*', 'addcol-subst', 'addColumnAtCursor(Column(cursorCol.name + \"_re\", getter=regexTransform(cursorCol, input(\"transform column by regex: \", type=\"regex-subst\"))))', 'add column derived from current column, replacing regex with subst (may include \\1 backrefs)')\nSheet.addCommand('g*', 'setcol-subst', 'setSubst([cursorCol], someSelectedRows)', 'regex/subst - modify selected rows in current column, replacing regex with subst, (may include backreferences \\\\1 etc)')\nSheet.addCommand('gz*', 'setcol-subst-all', 'setSubst(visibleCols, someSelectedRows)', 'modify selected rows in all visible columns, replacing regex with subst (may include \\\\1 backrefs)')\n\n\nvd.addMenuItems('''\n Edit > Modify > selected cells > regex substitution > setcol-subst\n Column > Add column > capture by regex > addcol-capture\n Column > Add column > split by regex > addcol-split\n Column > Add column > subst by regex > addcol-subst\n Column > Split > split-col\n Row > Select > by regex > current column > select-col-regex\n Row > Select > by regex > all columns > select-cols-regex\n Row > Unselect > by regex > current column > unselect-col-regex\n Row > Unselect > by regex > all columns > unselect-cols-regex\n''')\n", "path": "visidata/features/regex.py"}]}
| 2,714 | 200 |
gh_patches_debug_39739
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1878
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem with live.russia.tv
I have Problem with the Plugin live.russia.tv :
```
#SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:Москва 24 HD
#DESCRIPTION Москва 24 HD
#SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD
#DESCRIPTION Rossija 1 HD
#SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD
#DESCRIPTION Rossija RTR HD
#SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD
#DESCRIPTION Rossija 24 HD
```
The Channels not working on streamlink - from PC work the channels ok.
</issue>
<code>
[start of src/streamlink/plugins/live_russia_tv.py]
1 import re
2 from streamlink.plugin import Plugin
3 from streamlink.plugin.api import http
4 from streamlink.stream import HLSStream
5
6 class LiveRussia(Plugin):
7 url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
8 iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
9 stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
10
11 @classmethod
12 def can_handle_url(cls, url):
13 return cls.url_re.match(url) is not None
14
15 def _get_streams(self):
16 res = http.get(self.url)
17 iframe_result = re.search(self.iframe_re, res.text)
18
19 if not iframe_result:
20 self.logger.error("The requested content is unavailable.")
21 return
22
23 res = http.get(iframe_result.group(1))
24 stream_url_result = re.search(self.stream_re, res.text)
25
26 if not stream_url_result:
27 self.logger.error("The requested content is unavailable.")
28 return
29
30 return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
31
32
33 __plugin__ = LiveRussia
[end of src/streamlink/plugins/live_russia_tv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py
--- a/src/streamlink/plugins/live_russia_tv.py
+++ b/src/streamlink/plugins/live_russia_tv.py
@@ -1,33 +1,81 @@
+import logging
import re
+
from streamlink.plugin import Plugin
-from streamlink.plugin.api import http
-from streamlink.stream import HLSStream
+from streamlink.plugin.api import http, validate
+from streamlink.plugin.api.utils import itertags
+from streamlink.stream import HLSStream, HTTPStream
+
+log = logging.getLogger(__name__)
+
class LiveRussia(Plugin):
- url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
- iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
- stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
+ url_re = re.compile(r"https?://(?:www\.|live\.)?russia.tv")
+ _data_re = re.compile(r"""window\.pl\.data\.([\w_]+)\s*=\s*['"]?(.*?)['"]?;""")
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
+ def _get_iframe_url(self, url):
+ res = http.get(url)
+ for iframe in itertags(res.text, 'iframe'):
+ src = iframe.attributes.get("src")
+ if src:
+ return src
+
+ def _get_stream_info_url(self, url):
+ data = {}
+ res = http.get(url)
+ for m in self._data_re.finditer(res.text):
+ data[m.group(1)] = m.group(2)
+
+ log.debug("Got pl_data={0}".format(data))
+
+ if data:
+ if data["isVod"] == '0':
+ return "https:{domain}/iframe/datalive/id/{id}/sid/{sid}".format(**data)
+ else:
+ return "https:{domain}/iframe/datavideo/id/{id}/sid/{sid}".format(**data)
+
def _get_streams(self):
- res = http.get(self.url)
- iframe_result = re.search(self.iframe_re, res.text)
+ iframe_url = self._get_iframe_url(self.url)
+
+ if iframe_url:
+ log.debug("Found iframe URL={0}".format(iframe_url))
+ info_url = self._get_stream_info_url(iframe_url)
+
+ if info_url:
+ log.debug("Getting info from URL: {0}".format(info_url))
+ res = http.get(info_url, headers={"Referer": iframe_url})
+ data = http.json(res)
+
+ if data['status'] == 200:
+ for media in data['data']['playlist']['medialist']:
+ if media['errors']:
+ log.error(media['errors'].replace('\n', '').replace('\r', ''))
+
+ for media_type in media.get('sources', []):
+
+ if media_type == "m3u8":
+ hls_url = media['sources'][media_type]['auto']
+ for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
+ yield s
+
+ if media_type == "http":
+ for pix, url in media['sources'][media_type].items():
+ yield "{0}p".format(pix), HTTPStream(self.session, url)
+ else:
+ log.error("An error occurred: {0}".format(data['errors'].replace('\n', '').replace('\r', '')))
+ else:
+ log.error("Unable to get stream info URL")
+ else:
+ log.error("Could not find video iframe")
+
- if not iframe_result:
- self.logger.error("The requested content is unavailable.")
- return
- res = http.get(iframe_result.group(1))
- stream_url_result = re.search(self.stream_re, res.text)
- if not stream_url_result:
- self.logger.error("The requested content is unavailable.")
- return
- return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
-__plugin__ = LiveRussia
\ No newline at end of file
+__plugin__ = LiveRussia
|
{"golden_diff": "diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py\n--- a/src/streamlink/plugins/live_russia_tv.py\n+++ b/src/streamlink/plugins/live_russia_tv.py\n@@ -1,33 +1,81 @@\n+import logging\n import re\n+\n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n-from streamlink.stream import HLSStream\n+from streamlink.plugin.api import http, validate\n+from streamlink.plugin.api.utils import itertags\n+from streamlink.stream import HLSStream, HTTPStream\n+\n+log = logging.getLogger(__name__)\n+\n \n class LiveRussia(Plugin):\n- url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n- iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n- stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n+ url_re = re.compile(r\"https?://(?:www\\.|live\\.)?russia.tv\")\n+ _data_re = re.compile(r\"\"\"window\\.pl\\.data\\.([\\w_]+)\\s*=\\s*['\"]?(.*?)['\"]?;\"\"\")\n \n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n+ def _get_iframe_url(self, url):\n+ res = http.get(url)\n+ for iframe in itertags(res.text, 'iframe'):\n+ src = iframe.attributes.get(\"src\")\n+ if src:\n+ return src\n+\n+ def _get_stream_info_url(self, url):\n+ data = {}\n+ res = http.get(url)\n+ for m in self._data_re.finditer(res.text):\n+ data[m.group(1)] = m.group(2)\n+\n+ log.debug(\"Got pl_data={0}\".format(data))\n+\n+ if data:\n+ if data[\"isVod\"] == '0':\n+ return \"https:{domain}/iframe/datalive/id/{id}/sid/{sid}\".format(**data)\n+ else:\n+ return \"https:{domain}/iframe/datavideo/id/{id}/sid/{sid}\".format(**data)\n+\n def _get_streams(self):\n- res = http.get(self.url)\n- iframe_result = re.search(self.iframe_re, res.text)\n+ iframe_url = self._get_iframe_url(self.url)\n+\n+ if iframe_url:\n+ log.debug(\"Found iframe URL={0}\".format(iframe_url))\n+ info_url = self._get_stream_info_url(iframe_url)\n+\n+ if info_url:\n+ log.debug(\"Getting info from URL: {0}\".format(info_url))\n+ res = http.get(info_url, headers={\"Referer\": iframe_url})\n+ data = http.json(res)\n+\n+ if data['status'] == 200:\n+ for media in data['data']['playlist']['medialist']:\n+ if media['errors']:\n+ log.error(media['errors'].replace('\\n', '').replace('\\r', ''))\n+\n+ for media_type in media.get('sources', []):\n+\n+ if media_type == \"m3u8\":\n+ hls_url = media['sources'][media_type]['auto']\n+ for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n+ yield s\n+\n+ if media_type == \"http\":\n+ for pix, url in media['sources'][media_type].items():\n+ yield \"{0}p\".format(pix), HTTPStream(self.session, url)\n+ else:\n+ log.error(\"An error occurred: {0}\".format(data['errors'].replace('\\n', '').replace('\\r', '')))\n+ else:\n+ log.error(\"Unable to get stream info URL\")\n+ else:\n+ log.error(\"Could not find video iframe\")\n+\n \n- if not iframe_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- res = http.get(iframe_result.group(1))\n- stream_url_result = re.search(self.stream_re, res.text)\n \n- if not stream_url_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n \n \n-__plugin__ = LiveRussia\n\\ No newline at end of file\n+__plugin__ = LiveRussia\n", "issue": "Problem with live.russia.tv\nI have Problem with the Plugin live.russia.tv : \r\n```\r\n#SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:\u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#DESCRIPTION \u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD\r\n#DESCRIPTION Rossija 1 HD\r\n#SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD\r\n#DESCRIPTION Rossija RTR HD\r\n#SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD\r\n#DESCRIPTION Rossija 24 HD\r\n```\r\nThe Channels not working on streamlink - from PC work the channels ok.\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nclass LiveRussia(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n iframe_result = re.search(self.iframe_re, res.text)\n\n if not iframe_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n res = http.get(iframe_result.group(1))\n stream_url_result = re.search(self.stream_re, res.text)\n\n if not stream_url_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n\n\n__plugin__ = LiveRussia", "path": "src/streamlink/plugins/live_russia_tv.py"}]}
| 1,219 | 980 |
gh_patches_debug_27786
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-1033
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem of fit with sherpa
Hi @cdeil and @joleroi ,
I've got some problem to do a spectral fit with sherpa on CTA simulated data. I don't really understand why it's happening. Everything worked fine ~2 weeks ago.
Here is a small example to illustrate the problem:
```
import astropy.units as u
from gammapy.spectrum.models import PowerLaw
from gammapy.scripts.cta_utils import CTAObservationSimulation, Target, ObservationParameters
from gammapy.scripts import CTAPerf
# observation parameters
alpha = 0.2 * u.Unit('')
livetime = 100 * u.h
emin = 0.05 * u.TeV
emax = 5. * u.TeV
obs_param = ObservationParameters(alpha=alpha,
livetime=livetime,
emin=emin,
emax=emax)
# target
pwl = PowerLaw(amplitude=1e-11 * u.Unit('1/(s cm2 TeV)'),
index=2.,
reference=1. * u.TeV)
target = Target(name='bob', model=pwl)
# perf
dir_south = './CTA-Performance-South-20170323/'
perf = CTAPerf.read('$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz')
# simu
obs_id = 0
simu = CTAObservationSimulation.simulate_obs(perf=perf,
target=target,
obs_param=obs_param,
obs_id=obs_id)
# simu output
print(simu)
stats = simu.stats_table()
print(stats[['energy_min', 'energy_max', 'background', 'excess', 'sigma']])
simu.write(outdir='./', use_sherpa=True)
from sherpa.astro.datastack import DataStack
import sherpa.astro.datastack as sh
from sherpa_model import Absorption
simu_output_dir = './'
filename = simu_output_dir + 'pha_obs' + str(obs_id) + '.fits'
sh.load_data(id=obs_id, filename=filename)
sherpa_model = 'powlaw1d.pwl'
sh.set_source(obs_id, sherpa_model)
pwl.ampl = 1.e-12 * u.Unit('1/(s cm2 TeV)').to('1/(s cm2 keV)')
pwl.ref = 1 * u.TeV.to('keV')
pwl.gamma = 2.
sh.notice(emin.to('keV')*0.99,emax.to('keV')*1.01)
print('JLK: threshold for spectra: ({},{})'.format(emin.to('keV')*0.99,emax.to('keV')*1.01))
sh.set_stat('WStat')
sh.fit()
sh.conf()
```
which results in weird NaN values and implies problem for the fit:
```
Dataset = 0
Method = levmar
Statistic = wstat
Initial fit statistic = nan
Final fit statistic = nan at function evaluation 33
Data points = 12
Degrees of freedom = 10
Reduced statistic = nan
Change in statistic = nan
pwl.gamma 2
pwl.ampl 1e-21
```
Do you have any idea what could cause that? OGIP format writting or something like that?
Thanks in advance ++
</issue>
<code>
[start of gammapy/scripts/cta_utils.py]
1 from __future__ import absolute_import, division, print_function, unicode_literals
2 import numpy as np
3 import astropy.units as u
4 from ..spectrum import SpectrumObservation
5 from ..spectrum.utils import CountsPredictor
6 from ..spectrum.core import PHACountsSpectrum
7 from ..utils.random import get_random_state
8
9 __all__ = [
10 'Target',
11 'ObservationParameters',
12 'CTAObservationSimulation',
13 ]
14
15
16 class Target(object):
17 """Observation target information.
18
19 Parameters
20 ----------
21 name : `str`
22 Name of the source
23 model : `~gammapy.spectrum.models.SpectralModel`
24 Model of the source
25 """
26
27 def __init__(self, name=None,
28 model=None):
29 self.name = name
30 self.model = model
31
32 def __str__(self):
33 """Target report (`str`)."""
34 ss = '*** Target parameters ***\n'
35 ss += 'Name={}\n'.format(self.name)
36 for par in self.model.parameters.parameters:
37 ss += '{}={} {}\n'.format(par.name, str(par.value), par.unit)
38 return ss
39
40 def from_fermi_lat_catalogue(name):
41 raise NotImplementedError
42
43
44 class ObservationParameters(object):
45 """Container for observation parameters.
46
47 Parameters
48 ----------
49 alpha : `~astropy.units.Quantity`
50 Normalisation between ON and OFF regions
51 livetime : `~astropy.units.Quantity`
52 Observation time
53 emin : `~astropy.units.Quantity`
54 Minimal energy for simulation
55 emax : `~astropy.units.Quantity`
56 Maximal energy for simulation
57 """
58
59 def __init__(self, alpha=None, livetime=None,
60 emin=None, emax=None):
61 self.alpha = alpha
62 self.livetime = livetime
63 self.emin = emin
64 self.emax = emax
65
66 def __str__(self):
67 """Observation summary report (`str`)."""
68 ss = '*** Observation parameters summary ***\n'
69 ss += 'alpha={} [{}]\n'.format(self.alpha.value, self.alpha.unit)
70 ss += 'livetime={} [{}]\n'.format(self.livetime.value,
71 self.livetime.unit)
72 ss += 'emin={} [{}]\n'.format(self.emin.value, self.emin.unit)
73 ss += 'emax={} [{}]\n'.format(self.emax.value, self.emax.unit)
74 return ss
75
76
77 class CTAObservationSimulation(object):
78 """Simulate observation for one IRF and target.
79
80 TODO : Should be merge with `~gammapy.spectrum.SpectrumSimulation`
81
82 Parameters
83 ----------
84 perf : `~gammapy.scripts.CTAPerf`
85 CTA performance
86 target : `~gammapy.scripts.Target`
87 Source
88 """
89
90 @staticmethod
91 def simulate_obs(perf, target, obs_param, obs_id=0):
92 """
93 Simulate observation with given parameters
94
95 Parameters
96 ----------
97 perf : `~gammapy.scripts.CTAPerf`
98 CTA performance
99 target : `~gammapy.scripts.Target`
100 Source
101 obs_param : `~gammapy.scripts.ObservationParameters`
102 Observation parameters
103 obs_id : `int`, optional
104 Observation Id
105 """
106 livetime = obs_param.livetime
107 alpha = obs_param.alpha.value
108 emin = obs_param.emin
109 emax = obs_param.emax
110
111 model = target.model
112
113 # Compute expected counts
114 reco_energy = perf.bkg.energy
115 bkg_rate_values = perf.bkg.data.data * livetime.to('s')
116 predicted_counts = CountsPredictor(model=model,
117 aeff=perf.aeff,
118 livetime=livetime,
119 edisp=perf.rmf)
120 predicted_counts.run()
121 npred = predicted_counts.npred
122 # set negative values to zero (interpolation issue)
123 idx = np.where(npred.data.data < 0.)
124 npred.data.data[idx] = 0
125
126 # Randomise counts
127 rand = get_random_state('random-seed')
128 on_counts = rand.poisson(npred.data.data.value) # excess
129 bkg_counts = rand.poisson(bkg_rate_values.value) # bkg in ON region
130 off_counts = rand.poisson(
131 bkg_rate_values.value / alpha) # bkg in OFF region
132
133 on_counts += bkg_counts # evts in ON region
134
135 meta = dict(EXPOSURE=livetime.to('s').value)
136
137 on_vector = PHACountsSpectrum(
138 data=on_counts,
139 backscal=1,
140 energy_lo=reco_energy.lo,
141 energy_hi=reco_energy.hi,
142 meta=meta,
143 )
144
145 off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,
146 energy_hi=reco_energy.hi,
147 data=off_counts,
148 backscal=1. / alpha,
149 is_bkg=True,
150 )
151
152 obs = SpectrumObservation(on_vector=on_vector,
153 off_vector=off_vector,
154 aeff=perf.aeff,
155 edisp=perf.rmf)
156 obs.obs_id = obs_id
157
158 # Set threshold according to the closest energy reco from bkg bins
159 idx_min = np.abs(reco_energy.lo - emin).argmin()
160 idx_max = np.abs(reco_energy.lo - emax).argmin()
161 obs.lo_threshold = reco_energy.lo[idx_min]
162 obs.hi_threshold = reco_energy.lo[idx_max]
163
164 return obs
165
166 @staticmethod
167 def plot_simu(simu, target):
168 import matplotlib.pyplot as plt
169 fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
170 figsize=(10, 5))
171
172 # Spectrum plot
173 energy_range = [0.01 * u.TeV, 100 * u.TeV]
174 target.model.plot(ax=ax1, energy_range=energy_range,
175 label='Model')
176 plt.text(0.55, 0.65, target.__str__(),
177 style='italic', transform=ax1.transAxes, fontsize=7,
178 bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})
179 ax1.set_xlim([energy_range[0].value, energy_range[1].value])
180 ax1.set_ylim(1.e-17, 1.e-5)
181 ax1.grid(which='both')
182 ax1.legend(loc=0)
183
184 # Counts plot
185 on_off = simu.on_vector.data.data.value
186 off = 1. / simu.off_vector.backscal * simu.off_vector.data.data.value
187 excess = on_off - off
188 bins = simu.on_vector.energy.lo.value
189 x = simu.on_vector.energy.nodes.value
190 ax2.hist(x, bins=bins, weights=on_off,
191 facecolor='blue', alpha=1, label='ON')
192 ax2.hist(x, bins=bins, weights=off,
193 facecolor='green', alpha=1, label='OFF')
194 ax2.hist(x, bins=bins, weights=excess,
195 facecolor='red', alpha=1, label='EXCESS')
196 ax2.legend(loc='best')
197 ax2.set_xscale('log')
198 ax2.set_xlabel('Energy [TeV]')
199 ax2.set_ylabel('Expected counts')
200 ax2.set_xlim([energy_range[0].value, energy_range[1].value])
201 ax2.set_ylim([0.0001, on_off.max() * (1 + 0.05)])
202 ax2.vlines(simu.lo_threshold.value, 0, 1.1 * on_off.max(),
203 linestyles='dashed')
204 ax2.grid(which='both')
205 plt.text(0.55, 0.05, simu.__str__(),
206 style='italic', transform=ax2.transAxes, fontsize=7,
207 bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})
208 plt.tight_layout()
209
[end of gammapy/scripts/cta_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gammapy/scripts/cta_utils.py b/gammapy/scripts/cta_utils.py
--- a/gammapy/scripts/cta_utils.py
+++ b/gammapy/scripts/cta_utils.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, division, print_function, unicode_literals
+
import numpy as np
import astropy.units as u
from ..spectrum import SpectrumObservation
@@ -132,22 +132,21 @@
on_counts += bkg_counts # evts in ON region
- meta = dict(EXPOSURE=livetime.to('s').value)
-
on_vector = PHACountsSpectrum(
data=on_counts,
backscal=1,
energy_lo=reco_energy.lo,
energy_hi=reco_energy.hi,
- meta=meta,
)
+ on_vector.livetime = livetime
off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,
energy_hi=reco_energy.hi,
data=off_counts,
backscal=1. / alpha,
is_bkg=True,
)
+ off_vector.livetime = livetime
obs = SpectrumObservation(on_vector=on_vector,
off_vector=off_vector,
|
{"golden_diff": "diff --git a/gammapy/scripts/cta_utils.py b/gammapy/scripts/cta_utils.py\n--- a/gammapy/scripts/cta_utils.py\n+++ b/gammapy/scripts/cta_utils.py\n@@ -1,4 +1,4 @@\n-from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import numpy as np\n import astropy.units as u\n from ..spectrum import SpectrumObservation\n@@ -132,22 +132,21 @@\n \n on_counts += bkg_counts # evts in ON region\n \n- meta = dict(EXPOSURE=livetime.to('s').value)\n-\n on_vector = PHACountsSpectrum(\n data=on_counts,\n backscal=1,\n energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n- meta=meta,\n )\n \n+ on_vector.livetime = livetime\n off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n data=off_counts,\n backscal=1. / alpha,\n is_bkg=True,\n )\n+ off_vector.livetime = livetime\n \n obs = SpectrumObservation(on_vector=on_vector,\n off_vector=off_vector,\n", "issue": "Problem of fit with sherpa\nHi @cdeil and @joleroi , \r\nI've got some problem to do a spectral fit with sherpa on CTA simulated data. I don't really understand why it's happening. Everything worked fine ~2 weeks ago.\r\n\r\nHere is a small example to illustrate the problem: \r\n```\r\nimport astropy.units as u\r\n\r\nfrom gammapy.spectrum.models import PowerLaw\r\nfrom gammapy.scripts.cta_utils import CTAObservationSimulation, Target, ObservationParameters\r\n\r\nfrom gammapy.scripts import CTAPerf\r\n\r\n# observation parameters\r\nalpha = 0.2 * u.Unit('')\r\nlivetime = 100 * u.h\r\nemin = 0.05 * u.TeV\r\nemax = 5. * u.TeV\r\nobs_param = ObservationParameters(alpha=alpha,\r\n livetime=livetime,\r\n emin=emin,\r\n emax=emax)\r\n\r\n# target\r\npwl = PowerLaw(amplitude=1e-11 * u.Unit('1/(s cm2 TeV)'),\r\n index=2.,\r\n reference=1. * u.TeV)\r\ntarget = Target(name='bob', model=pwl)\r\n\r\n# perf\r\ndir_south = './CTA-Performance-South-20170323/'\r\nperf = CTAPerf.read('$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz')\r\n\r\n# simu\r\nobs_id = 0\r\nsimu = CTAObservationSimulation.simulate_obs(perf=perf,\r\n target=target,\r\n obs_param=obs_param,\r\n obs_id=obs_id)\r\n\r\n# simu output\r\nprint(simu)\r\nstats = simu.stats_table()\r\nprint(stats[['energy_min', 'energy_max', 'background', 'excess', 'sigma']])\r\nsimu.write(outdir='./', use_sherpa=True)\r\n\r\n\r\nfrom sherpa.astro.datastack import DataStack\r\nimport sherpa.astro.datastack as sh\r\nfrom sherpa_model import Absorption\r\n\r\nsimu_output_dir = './'\r\nfilename = simu_output_dir + 'pha_obs' + str(obs_id) + '.fits'\r\nsh.load_data(id=obs_id, filename=filename)\r\nsherpa_model = 'powlaw1d.pwl'\r\nsh.set_source(obs_id, sherpa_model)\r\n \r\npwl.ampl = 1.e-12 * u.Unit('1/(s cm2 TeV)').to('1/(s cm2 keV)')\r\npwl.ref = 1 * u.TeV.to('keV')\r\npwl.gamma = 2.\r\n\r\nsh.notice(emin.to('keV')*0.99,emax.to('keV')*1.01)\r\nprint('JLK: threshold for spectra: ({},{})'.format(emin.to('keV')*0.99,emax.to('keV')*1.01))\r\n\r\nsh.set_stat('WStat')\r\nsh.fit()\r\nsh.conf()\r\n```\r\n\r\nwhich results in weird NaN values and implies problem for the fit:\r\n```\r\nDataset = 0\r\nMethod = levmar\r\nStatistic = wstat\r\nInitial fit statistic = nan\r\nFinal fit statistic = nan at function evaluation 33\r\nData points = 12\r\nDegrees of freedom = 10\r\nReduced statistic = nan\r\nChange in statistic = nan\r\n pwl.gamma 2 \r\n pwl.ampl 1e-21 \r\n```\r\n \r\nDo you have any idea what could cause that? OGIP format writting or something like that?\r\n\r\nThanks in advance ++\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport astropy.units as u\nfrom ..spectrum import SpectrumObservation\nfrom ..spectrum.utils import CountsPredictor\nfrom ..spectrum.core import PHACountsSpectrum\nfrom ..utils.random import get_random_state\n\n__all__ = [\n 'Target',\n 'ObservationParameters',\n 'CTAObservationSimulation',\n]\n\n\nclass Target(object):\n \"\"\"Observation target information.\n\n Parameters\n ----------\n name : `str`\n Name of the source\n model : `~gammapy.spectrum.models.SpectralModel`\n Model of the source\n \"\"\"\n\n def __init__(self, name=None,\n model=None):\n self.name = name\n self.model = model\n\n def __str__(self):\n \"\"\"Target report (`str`).\"\"\"\n ss = '*** Target parameters ***\\n'\n ss += 'Name={}\\n'.format(self.name)\n for par in self.model.parameters.parameters:\n ss += '{}={} {}\\n'.format(par.name, str(par.value), par.unit)\n return ss\n\n def from_fermi_lat_catalogue(name):\n raise NotImplementedError\n\n\nclass ObservationParameters(object):\n \"\"\"Container for observation parameters.\n\n Parameters\n ----------\n alpha : `~astropy.units.Quantity`\n Normalisation between ON and OFF regions\n livetime : `~astropy.units.Quantity`\n Observation time\n emin : `~astropy.units.Quantity`\n Minimal energy for simulation\n emax : `~astropy.units.Quantity`\n Maximal energy for simulation\n \"\"\"\n\n def __init__(self, alpha=None, livetime=None,\n emin=None, emax=None):\n self.alpha = alpha\n self.livetime = livetime\n self.emin = emin\n self.emax = emax\n\n def __str__(self):\n \"\"\"Observation summary report (`str`).\"\"\"\n ss = '*** Observation parameters summary ***\\n'\n ss += 'alpha={} [{}]\\n'.format(self.alpha.value, self.alpha.unit)\n ss += 'livetime={} [{}]\\n'.format(self.livetime.value,\n self.livetime.unit)\n ss += 'emin={} [{}]\\n'.format(self.emin.value, self.emin.unit)\n ss += 'emax={} [{}]\\n'.format(self.emax.value, self.emax.unit)\n return ss\n\n\nclass CTAObservationSimulation(object):\n \"\"\"Simulate observation for one IRF and target.\n\n TODO : Should be merge with `~gammapy.spectrum.SpectrumSimulation`\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n \"\"\"\n\n @staticmethod\n def simulate_obs(perf, target, obs_param, obs_id=0):\n \"\"\"\n Simulate observation with given parameters\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n obs_param : `~gammapy.scripts.ObservationParameters`\n Observation parameters\n obs_id : `int`, optional\n Observation Id\n \"\"\"\n livetime = obs_param.livetime\n alpha = obs_param.alpha.value\n emin = obs_param.emin\n emax = obs_param.emax\n\n model = target.model\n\n # Compute expected counts\n reco_energy = perf.bkg.energy\n bkg_rate_values = perf.bkg.data.data * livetime.to('s')\n predicted_counts = CountsPredictor(model=model,\n aeff=perf.aeff,\n livetime=livetime,\n edisp=perf.rmf)\n predicted_counts.run()\n npred = predicted_counts.npred\n # set negative values to zero (interpolation issue)\n idx = np.where(npred.data.data < 0.)\n npred.data.data[idx] = 0\n\n # Randomise counts\n rand = get_random_state('random-seed')\n on_counts = rand.poisson(npred.data.data.value) # excess\n bkg_counts = rand.poisson(bkg_rate_values.value) # bkg in ON region\n off_counts = rand.poisson(\n bkg_rate_values.value / alpha) # bkg in OFF region\n\n on_counts += bkg_counts # evts in ON region\n\n meta = dict(EXPOSURE=livetime.to('s').value)\n\n on_vector = PHACountsSpectrum(\n data=on_counts,\n backscal=1,\n energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n meta=meta,\n )\n\n off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n data=off_counts,\n backscal=1. / alpha,\n is_bkg=True,\n )\n\n obs = SpectrumObservation(on_vector=on_vector,\n off_vector=off_vector,\n aeff=perf.aeff,\n edisp=perf.rmf)\n obs.obs_id = obs_id\n\n # Set threshold according to the closest energy reco from bkg bins\n idx_min = np.abs(reco_energy.lo - emin).argmin()\n idx_max = np.abs(reco_energy.lo - emax).argmin()\n obs.lo_threshold = reco_energy.lo[idx_min]\n obs.hi_threshold = reco_energy.lo[idx_max]\n\n return obs\n\n @staticmethod\n def plot_simu(simu, target):\n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,\n figsize=(10, 5))\n\n # Spectrum plot\n energy_range = [0.01 * u.TeV, 100 * u.TeV]\n target.model.plot(ax=ax1, energy_range=energy_range,\n label='Model')\n plt.text(0.55, 0.65, target.__str__(),\n style='italic', transform=ax1.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n ax1.set_xlim([energy_range[0].value, energy_range[1].value])\n ax1.set_ylim(1.e-17, 1.e-5)\n ax1.grid(which='both')\n ax1.legend(loc=0)\n\n # Counts plot\n on_off = simu.on_vector.data.data.value\n off = 1. / simu.off_vector.backscal * simu.off_vector.data.data.value\n excess = on_off - off\n bins = simu.on_vector.energy.lo.value\n x = simu.on_vector.energy.nodes.value\n ax2.hist(x, bins=bins, weights=on_off,\n facecolor='blue', alpha=1, label='ON')\n ax2.hist(x, bins=bins, weights=off,\n facecolor='green', alpha=1, label='OFF')\n ax2.hist(x, bins=bins, weights=excess,\n facecolor='red', alpha=1, label='EXCESS')\n ax2.legend(loc='best')\n ax2.set_xscale('log')\n ax2.set_xlabel('Energy [TeV]')\n ax2.set_ylabel('Expected counts')\n ax2.set_xlim([energy_range[0].value, energy_range[1].value])\n ax2.set_ylim([0.0001, on_off.max() * (1 + 0.05)])\n ax2.vlines(simu.lo_threshold.value, 0, 1.1 * on_off.max(),\n linestyles='dashed')\n ax2.grid(which='both')\n plt.text(0.55, 0.05, simu.__str__(),\n style='italic', transform=ax2.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n plt.tight_layout()\n", "path": "gammapy/scripts/cta_utils.py"}]}
| 3,611 | 286 |
gh_patches_debug_11877
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1048
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
import will crash ctfd
<!--
If this is a bug report please fill out the template below.
If this is a feature request please describe the behavior that you'd like to see.
-->
**Environment**:
- CTFd Version/Commit: 2.1.3
- Operating System: ubuntu 18.04
- Web Browser and Version: Opera 60.0.3255.170
**What happened?**
trying to import db (zip file)
**What did you expect to happen?**
it would import db (zip file)
**How to reproduce your issue**
**Any associated stack traces or error logs**
Failed to disable foreign key checks. Continuing.
Error: No support for ALTER of constraints in SQLite dialect
I believe it's Alembic fault
</issue>
<code>
[start of migrations/versions/b5551cd26764_add_captain_column_to_teams.py]
1 """Add captain column to Teams
2
3 Revision ID: b5551cd26764
4 Revises: 4e4d5a9ea000
5 Create Date: 2019-04-12 00:29:08.021141
6
7 """
8 from CTFd.models import db
9 from alembic import op
10 import sqlalchemy as sa
11 from sqlalchemy.sql import text, table, column, and_
12
13 # revision identifiers, used by Alembic.
14 revision = 'b5551cd26764'
15 down_revision = '4e4d5a9ea000'
16 branch_labels = None
17 depends_on = None
18
19 teams_table = table('teams',
20 column('id', db.Integer),
21 column('captain_id', db.Integer),
22 )
23
24 users_table = table('users',
25 column('id', db.Integer),
26 column('team_id', db.Integer),
27 )
28
29
30 def upgrade():
31 # ### commands auto generated by Alembic - please adjust! ###
32 op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))
33 op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
34
35 connection = op.get_bind()
36 for team in connection.execute(teams_table.select()):
37 users = connection.execute(
38 users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)
39 )
40 for user in users:
41 connection.execute(
42 teams_table.update().where(
43 teams_table.c.id == team.id
44 ).values(
45 captain_id=user.id
46 )
47 )
48 # ### end Alembic commands ###
49
50
51 def downgrade():
52 # ### commands auto generated by Alembic - please adjust! ###
53 op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')
54 op.drop_column('teams', 'captain_id')
55 # ### end Alembic commands ###
56
[end of migrations/versions/b5551cd26764_add_captain_column_to_teams.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
--- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
+++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
@@ -30,7 +30,11 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))
- op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
+
+ bind = op.get_bind()
+ url = str(bind.engine.url)
+ if url.startswith('sqlite') is False:
+ op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
connection = op.get_bind()
for team in connection.execute(teams_table.select()):
|
{"golden_diff": "diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n--- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n+++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n@@ -30,7 +30,11 @@\n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n- op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n+\n+ bind = op.get_bind()\n+ url = str(bind.engine.url)\n+ if url.startswith('sqlite') is False:\n+ op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n \n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n", "issue": "import will crash ctfd\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.1.3\r\n - Operating System: ubuntu 18.04\r\n - Web Browser and Version: Opera 60.0.3255.170\r\n\r\n**What happened?**\r\ntrying to import db (zip file)\r\n**What did you expect to happen?**\r\nit would import db (zip file)\r\n**How to reproduce your issue**\r\n\r\n**Any associated stack traces or error logs**\r\nFailed to disable foreign key checks. Continuing.\r\nError: No support for ALTER of constraints in SQLite dialect\r\n\r\nI believe it's Alembic fault \n", "before_files": [{"content": "\"\"\"Add captain column to Teams\n\nRevision ID: b5551cd26764\nRevises: 4e4d5a9ea000\nCreate Date: 2019-04-12 00:29:08.021141\n\n\"\"\"\nfrom CTFd.models import db\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import text, table, column, and_\n\n# revision identifiers, used by Alembic.\nrevision = 'b5551cd26764'\ndown_revision = '4e4d5a9ea000'\nbranch_labels = None\ndepends_on = None\n\nteams_table = table('teams',\n column('id', db.Integer),\n column('captain_id', db.Integer),\n)\n\nusers_table = table('users',\n column('id', db.Integer),\n column('team_id', db.Integer),\n)\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n\n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n users = connection.execute(\n users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)\n )\n for user in users:\n connection.execute(\n teams_table.update().where(\n teams_table.c.id == team.id\n ).values(\n captain_id=user.id\n )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')\n op.drop_column('teams', 'captain_id')\n # ### end Alembic commands ###\n", "path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py"}]}
| 1,283 | 257 |
gh_patches_debug_7915
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-3457
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows seg fault when defaultView set to Runner
</issue>
<code>
[start of psychopy/app/pavlovia_ui/menu.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 import wx
9 import requests
10
11 from psychopy import logging
12 from .. import dialogs
13 from .functions import logInPavlovia
14 from psychopy.app.pavlovia_ui.project import syncProject
15 from .search import SearchFrame
16 from .project import ProjectEditor
17 from psychopy.localization import _translate
18 from psychopy.projects import pavlovia
19
20
21 class PavloviaMenu(wx.Menu):
22 app = None
23 appData = None
24 currentUser = None
25 knownUsers = None
26 searchDlg = None
27
28 def __init__(self, parent):
29 wx.Menu.__init__(self)
30 self.parent = parent # type: BuilderFrame
31 PavloviaMenu.app = parent.app
32 keys = self.app.keys
33 # from prefs fetch info about prev usernames and projects
34 PavloviaMenu.appData = self.app.prefs.appData['projects']
35
36 # item = self.Append(wx.ID_ANY, _translate("Tell me more..."))
37 # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())
38
39 PavloviaMenu.knownUsers = pavlovia.knownUsers
40
41 # sub-menu for usernames and login
42 self.userMenu = wx.Menu()
43 # if a user was previously logged in then set them as current
44 lastPavUser = PavloviaMenu.appData['pavloviaUser']
45 if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):
46 lastPavUser = None
47 # if lastPavUser and not PavloviaMenu.currentUser:
48 # self.setUser(PavloviaMenu.appData['pavloviaUser'])
49 for name in self.knownUsers:
50 self.addToSubMenu(name, self.userMenu, self.onSetUser)
51 self.userMenu.AppendSeparator()
52 self.loginBtn = self.userMenu.Append(wx.ID_ANY,
53 _translate("Log in to Pavlovia...\t{}")
54 .format(keys['pavlovia_logIn']))
55 parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())
56 self.AppendSubMenu(self.userMenu, _translate("User"))
57
58 # search
59 self.searchBtn = self.Append(wx.ID_ANY,
60 _translate("Search Pavlovia\t{}")
61 .format(keys['projectsFind']))
62 parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())
63
64 # new
65 self.newBtn = self.Append(wx.ID_ANY,
66 _translate("New...\t{}").format(keys['projectsNew']))
67 parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())
68
69 self.syncBtn = self.Append(wx.ID_ANY,
70 _translate("Sync\t{}").format(keys['projectsSync']))
71 parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())
72
73 def addToSubMenu(self, name, menu, function):
74 item = menu.Append(wx.ID_ANY, name)
75 self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())
76
77 def onAbout(self, event):
78 wx.GetApp().followLink(event)
79
80 def onSetUser(self, event):
81 user = self.userMenu.GetLabelText(event.GetId())
82 self.setUser(user)
83
84 def setUser(self, user=None):
85
86 if user is None and PavloviaMenu.appData['pavloviaUser']:
87 user = PavloviaMenu.appData['pavloviaUser']
88
89 if user in [PavloviaMenu.currentUser, None]:
90 return # nothing to do here. Move along please.
91
92 PavloviaMenu.currentUser = user
93 PavloviaMenu.appData['pavloviaUser'] = user
94 if user in pavlovia.knownUsers:
95 token = pavlovia.knownUsers[user]['token']
96 try:
97 pavlovia.getCurrentSession().setToken(token)
98 except requests.exceptions.ConnectionError:
99 logging.warning("Tried to log in to Pavlovia but no network "
100 "connection")
101 return
102 else:
103 if hasattr(self, 'onLogInPavlovia'):
104 self.onLogInPavlovia()
105
106 if PavloviaMenu.searchDlg:
107 PavloviaMenu.searchDlg.updateUserProjs()
108
109 def onSync(self, event):
110 retVal = syncProject(parent=self.parent, project=self.parent.project)
111 if hasattr(self.parent, 'gitFeedback'):
112 self.parent.gitFeedback(retVal)
113
114 def onSearch(self, event):
115 PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)
116 PavloviaMenu.searchDlg.Show()
117
118 def onLogInPavlovia(self, event=None):
119 logInPavlovia(parent=self.parent)
120
121 def onNew(self, event):
122 """Create a new project
123 """
124 if pavlovia.getCurrentSession().user.username:
125 projEditor = ProjectEditor()
126 if projEditor.ShowModal() == wx.ID_OK:
127 self.parent.project = projEditor.project
128 # do a first sync as well
129 retVal = syncProject(parent=self.parent, project=projEditor.project)
130 self.parent.gitFeedback(retVal)
131 else:
132 infoDlg = dialogs.MessageDialog(parent=None, type='Info',
133 message=_translate(
134 "You need to log in"
135 " to create a project"))
136 infoDlg.Show()
137
[end of psychopy/app/pavlovia_ui/menu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py
--- a/psychopy/app/pavlovia_ui/menu.py
+++ b/psychopy/app/pavlovia_ui/menu.py
@@ -82,9 +82,9 @@
self.setUser(user)
def setUser(self, user=None):
-
- if user is None and PavloviaMenu.appData['pavloviaUser']:
- user = PavloviaMenu.appData['pavloviaUser']
+ if PavloviaMenu.appData:
+ if user is None and PavloviaMenu.appData['pavloviaUser']:
+ user = PavloviaMenu.appData['pavloviaUser']
if user in [PavloviaMenu.currentUser, None]:
return # nothing to do here. Move along please.
|
{"golden_diff": "diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py\n--- a/psychopy/app/pavlovia_ui/menu.py\n+++ b/psychopy/app/pavlovia_ui/menu.py\n@@ -82,9 +82,9 @@\n self.setUser(user)\n \n def setUser(self, user=None):\n-\n- if user is None and PavloviaMenu.appData['pavloviaUser']:\n- user = PavloviaMenu.appData['pavloviaUser']\n+ if PavloviaMenu.appData:\n+ if user is None and PavloviaMenu.appData['pavloviaUser']:\n+ user = PavloviaMenu.appData['pavloviaUser']\n \n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n", "issue": "Windows seg fault when defaultView set to Runner\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport wx\nimport requests\n\nfrom psychopy import logging\nfrom .. import dialogs\nfrom .functions import logInPavlovia\nfrom psychopy.app.pavlovia_ui.project import syncProject\nfrom .search import SearchFrame\nfrom .project import ProjectEditor\nfrom psychopy.localization import _translate\nfrom psychopy.projects import pavlovia\n\n\nclass PavloviaMenu(wx.Menu):\n app = None\n appData = None\n currentUser = None\n knownUsers = None\n searchDlg = None\n\n def __init__(self, parent):\n wx.Menu.__init__(self)\n self.parent = parent # type: BuilderFrame\n PavloviaMenu.app = parent.app\n keys = self.app.keys\n # from prefs fetch info about prev usernames and projects\n PavloviaMenu.appData = self.app.prefs.appData['projects']\n\n # item = self.Append(wx.ID_ANY, _translate(\"Tell me more...\"))\n # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())\n\n PavloviaMenu.knownUsers = pavlovia.knownUsers\n\n # sub-menu for usernames and login\n self.userMenu = wx.Menu()\n # if a user was previously logged in then set them as current\n lastPavUser = PavloviaMenu.appData['pavloviaUser']\n if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):\n lastPavUser = None\n # if lastPavUser and not PavloviaMenu.currentUser:\n # self.setUser(PavloviaMenu.appData['pavloviaUser'])\n for name in self.knownUsers:\n self.addToSubMenu(name, self.userMenu, self.onSetUser)\n self.userMenu.AppendSeparator()\n self.loginBtn = self.userMenu.Append(wx.ID_ANY,\n _translate(\"Log in to Pavlovia...\\t{}\")\n .format(keys['pavlovia_logIn']))\n parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())\n self.AppendSubMenu(self.userMenu, _translate(\"User\"))\n\n # search\n self.searchBtn = self.Append(wx.ID_ANY,\n _translate(\"Search Pavlovia\\t{}\")\n .format(keys['projectsFind']))\n parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())\n\n # new\n self.newBtn = self.Append(wx.ID_ANY,\n _translate(\"New...\\t{}\").format(keys['projectsNew']))\n parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())\n\n self.syncBtn = self.Append(wx.ID_ANY,\n _translate(\"Sync\\t{}\").format(keys['projectsSync']))\n parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())\n\n def addToSubMenu(self, name, menu, function):\n item = menu.Append(wx.ID_ANY, name)\n self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())\n\n def onAbout(self, event):\n wx.GetApp().followLink(event)\n\n def onSetUser(self, event):\n user = self.userMenu.GetLabelText(event.GetId())\n self.setUser(user)\n\n def setUser(self, user=None):\n\n if user is None and PavloviaMenu.appData['pavloviaUser']:\n user = PavloviaMenu.appData['pavloviaUser']\n\n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n\n PavloviaMenu.currentUser = user\n PavloviaMenu.appData['pavloviaUser'] = user\n if user in pavlovia.knownUsers:\n token = pavlovia.knownUsers[user]['token']\n try:\n pavlovia.getCurrentSession().setToken(token)\n except requests.exceptions.ConnectionError:\n logging.warning(\"Tried to log in to Pavlovia but no network \"\n \"connection\")\n return\n else:\n if hasattr(self, 'onLogInPavlovia'):\n self.onLogInPavlovia()\n\n if PavloviaMenu.searchDlg:\n PavloviaMenu.searchDlg.updateUserProjs()\n\n def onSync(self, event):\n retVal = syncProject(parent=self.parent, project=self.parent.project)\n if hasattr(self.parent, 'gitFeedback'):\n self.parent.gitFeedback(retVal)\n\n def onSearch(self, event):\n PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)\n PavloviaMenu.searchDlg.Show()\n\n def onLogInPavlovia(self, event=None):\n logInPavlovia(parent=self.parent)\n\n def onNew(self, event):\n \"\"\"Create a new project\n \"\"\"\n if pavlovia.getCurrentSession().user.username:\n projEditor = ProjectEditor()\n if projEditor.ShowModal() == wx.ID_OK:\n self.parent.project = projEditor.project\n # do a first sync as well\n retVal = syncProject(parent=self.parent, project=projEditor.project)\n self.parent.gitFeedback(retVal)\n else:\n infoDlg = dialogs.MessageDialog(parent=None, type='Info',\n message=_translate(\n \"You need to log in\"\n \" to create a project\"))\n infoDlg.Show()\n", "path": "psychopy/app/pavlovia_ui/menu.py"}]}
| 2,077 | 195 |
gh_patches_debug_13301
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-219
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Core Agent inheriting and holding open sockets from parent
Reported via Slack:
```
Mike O'Toole 9:12 AM
hey there -- are there any instructions available on installing/starting the core-agent separately from a django app?
cschneid:scout-orange: 9:13 AM
@Mike O'Toole - the binary gets downloaded to /tmp/scout_apm_core/… and has a help command.
Mike O'Toole 9:13 AM
we're running into an issue where the core-agent process seems to hang on to the ports that are uWSGI is binding when it first starts up. Then when we try to restart our uWSGI process, it tries to bind the ports again, and it can't because the core-agent process has still got them
cschneid:scout-orange: 9:14 AM
That is odd, it’s a totally different process, so shouldn’t ever have access to port file descriptors.
Mike O'Toole 9:15 AM
yeah -- that's what i thought too, but looking at lsof this is what im seeing:
core-agen 23301 chuck-uwsgi 8u IPv4 7817953 0t0 TCP *:3031 (LISTEN)
core-agen 23301 chuck-uwsgi 9u IPv4 7817954 0t0 TCP *:8080 (LISTEN)
cschneid:scout-orange: 9:15 AM
ok, well that’s not intended… :slightly_smiling_face:
Mike O'Toole 9:15 AM
3031/8080 are the ports that uWSGI is binding
cschneid:scout-orange: 9:16 AM
For now, you’re looking for core-agent start --socket-path <match the default, or the explicit config>
Mike O'Toole 9:16 AM
sweet, thanks
cschneid:scout-orange: 9:17 AM
the default socket path is:
def derive_socket_path(self):
return "{}/{}/core-agent.sock".format(
self.config.value("core_agent_dir"),
self.config.value("core_agent_full_name"),
which would be something like: /tmp/scout_apm_core/scout_apm_core-latest-x86_64-apple-darwin/core-agent.sock
I’m really confused by the core agent holding onto those ports. We launch via subprocess.check_call( ... which I suppose would maintain file descriptors?
If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. (POSIX only). The default varies by platform: Always true on POSIX.
from the docs.
sounds like it should have closed the socket handles?
Mike O'Toole 9:22 AM
hmmm weird
cschneid:scout-orange: 9:22 AM
What version of python are you on? I can try to reproduce
Mike O'Toole 9:24 AM
python 2.7.6
ahh, here's someone having a related issue with uWSGI
https://stackoverflow.com/questions/48376664/uwsgi-subprocess-create-a-new-process-the-socket-close-wait
sounds like that config option is what we need
cschneid:scout-orange: 9:25 AM
ok, the docs I shared were from 3.5, I wonder if that behavior changed
```
</issue>
<code>
[start of src/scout_apm/core/core_agent_manager.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import hashlib
5 import json
6 import logging
7 import os
8 import subprocess
9 import tarfile
10 import time
11
12 import requests
13
14 from scout_apm.core.context import AgentContext
15
16 logger = logging.getLogger(__name__)
17
18
19 class CoreAgentManager(object):
20 def __init__(self):
21 self.core_agent_bin_path = None
22 self.core_agent_bin_version = None
23 self.core_agent_dir = "{}/{}".format(
24 AgentContext.instance.config.value("core_agent_dir"),
25 AgentContext.instance.config.value("core_agent_full_name"),
26 )
27 self.downloader = CoreAgentDownloader(
28 self.core_agent_dir,
29 AgentContext.instance.config.value("core_agent_full_name"),
30 )
31
32 def launch(self):
33 if not AgentContext.instance.config.value("core_agent_launch"):
34 logger.debug(
35 "Not attempting to launch Core Agent "
36 "due to 'core_agent_launch' setting."
37 )
38 return False
39
40 if not self.verify():
41 if not AgentContext.instance.config.value("core_agent_download"):
42 logger.debug(
43 "Not attempting to download Core Agent due "
44 "to 'core_agent_download' setting."
45 )
46 return False
47
48 self.download()
49
50 if not self.verify():
51 logger.debug("Failed to verify Core Agent. Not launching Core Agent.")
52 return False
53
54 return self.run()
55
56 def download(self):
57 self.downloader.download()
58
59 def run(self):
60 try:
61 subprocess.check_call(
62 self.agent_binary()
63 + self.daemonize_flag()
64 + self.log_level()
65 + self.log_file()
66 + self.config_file()
67 + self.socket_path()
68 )
69 except Exception:
70 # TODO detect failure of launch properly
71 logger.exception("Error running Core Agent")
72 return False
73 return True
74
75 def agent_binary(self):
76 return [self.core_agent_bin_path, "start"]
77
78 def daemonize_flag(self):
79 return ["--daemonize", "true"]
80
81 def socket_path(self):
82 socket_path = AgentContext.instance.config.value("socket_path")
83 return ["--socket", socket_path]
84
85 def log_level(self):
86 level = AgentContext.instance.config.value("log_level")
87 return ["--log-level", level]
88
89 def log_file(self):
90 path = AgentContext.instance.config.value("log_file")
91 if path is not None:
92 return ["--log-file", path]
93 else:
94 return []
95
96 def config_file(self):
97 path = AgentContext.instance.config.value("config_file")
98 if path is not None:
99 return ["--config-file", path]
100 else:
101 return []
102
103 def verify(self):
104 manifest = CoreAgentManifest(self.core_agent_dir + "/manifest.json")
105 if not manifest.is_valid():
106 logger.debug(
107 "Core Agent verification failed: CoreAgentManifest is not valid."
108 )
109 self.core_agent_bin_path = None
110 self.core_agent_bin_version = None
111 return False
112
113 bin_path = self.core_agent_dir + "/" + manifest.bin_name
114 if sha256_digest(bin_path) == manifest.sha256:
115 self.core_agent_bin_path = bin_path
116 self.core_agent_bin_version = manifest.bin_version
117 return True
118 else:
119 logger.debug("Core Agent verification failed: SHA mismatch.")
120 self.core_agent_bin_path = None
121 self.core_agent_bin_version = None
122 return False
123
124
125 class CoreAgentDownloader(object):
126 def __init__(self, download_destination, core_agent_full_name):
127 self.stale_download_secs = 120
128 self.destination = download_destination
129 self.core_agent_full_name = core_agent_full_name
130 self.package_location = self.destination + "/{}.tgz".format(
131 self.core_agent_full_name
132 )
133 self.download_lock_path = self.destination + "/download.lock"
134 self.download_lock_fd = None
135
136 def download(self):
137 self.create_core_agent_dir()
138 self.obtain_download_lock()
139 if self.download_lock_fd is not None:
140 try:
141 self.download_package()
142 self.untar()
143 except OSError:
144 logger.exception("Exception raised while downloading Core Agent")
145 finally:
146 self.release_download_lock()
147
148 def create_core_agent_dir(self):
149 try:
150 os.makedirs(
151 self.destination, AgentContext.instance.config.core_agent_permissions()
152 )
153 except OSError:
154 pass
155
156 def obtain_download_lock(self):
157 self.clean_stale_download_lock()
158 try:
159 self.download_lock_fd = os.open(
160 self.download_lock_path,
161 os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,
162 )
163 except OSError as e:
164 logger.debug(
165 "Could not obtain download lock on %s: %r", self.download_lock_path, e
166 )
167 self.download_lock_fd = None
168
169 def clean_stale_download_lock(self):
170 try:
171 delta = time.time() - os.stat(self.download_lock_path).st_ctime
172 if delta > self.stale_download_secs:
173 logger.debug("Clearing stale download lock file.")
174 os.unlink(self.download_lock_path)
175 except OSError:
176 pass
177
178 def release_download_lock(self):
179 if self.download_lock_fd is not None:
180 os.unlink(self.download_lock_path)
181 os.close(self.download_lock_fd)
182
183 def download_package(self):
184 logger.debug("Downloading: %s to %s", self.full_url(), self.package_location)
185 req = requests.get(self.full_url(), stream=True)
186 with open(self.package_location, "wb") as f:
187 for chunk in req.iter_content(1024 * 1000):
188 f.write(chunk)
189
190 def untar(self):
191 t = tarfile.open(self.package_location, "r")
192 t.extractall(self.destination)
193
194 def full_url(self):
195 return "{root_url}/{core_agent_full_name}.tgz".format(
196 root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name
197 )
198
199 def root_url(self):
200 return AgentContext.instance.config.value("download_url")
201
202
203 class CoreAgentManifest(object):
204 def __init__(self, path):
205 self.manifest_path = path
206 self.bin_name = None
207 self.bin_version = None
208 self.sha256 = None
209 self.valid = False
210 try:
211 self.parse()
212 except (ValueError, TypeError, OSError, IOError) as e:
213 logger.debug("Error parsing Core Agent Manifest: %r", e)
214
215 def parse(self):
216 logger.debug("Parsing Core Agent manifest path: %s", self.manifest_path)
217 with open(self.manifest_path) as manifest_file:
218 self.raw = manifest_file.read()
219 self.json = json.loads(self.raw)
220 self.version = self.json["version"]
221 self.bin_version = self.json["core_agent_version"]
222 self.bin_name = self.json["core_agent_binary"]
223 self.sha256 = self.json["core_agent_binary_sha256"]
224 self.valid = True
225 logger.debug("Core Agent manifest json: %s", self.json)
226
227 def is_valid(self):
228 return self.valid
229
230
231 def sha256_digest(filename, block_size=65536):
232 try:
233 sha256 = hashlib.sha256()
234 with open(filename, "rb") as f:
235 for block in iter(lambda: f.read(block_size), b""):
236 sha256.update(block)
237 return sha256.hexdigest()
238 except OSError as e:
239 logger.debug("Error on digest: %r", e)
240 return None
241
[end of src/scout_apm/core/core_agent_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py
--- a/src/scout_apm/core/core_agent_manager.py
+++ b/src/scout_apm/core/core_agent_manager.py
@@ -59,12 +59,15 @@
def run(self):
try:
subprocess.check_call(
- self.agent_binary()
- + self.daemonize_flag()
- + self.log_level()
- + self.log_file()
- + self.config_file()
- + self.socket_path()
+ (
+ self.agent_binary()
+ + self.daemonize_flag()
+ + self.log_level()
+ + self.log_file()
+ + self.config_file()
+ + self.socket_path()
+ ),
+ close_fds=True,
)
except Exception:
# TODO detect failure of launch properly
|
{"golden_diff": "diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py\n--- a/src/scout_apm/core/core_agent_manager.py\n+++ b/src/scout_apm/core/core_agent_manager.py\n@@ -59,12 +59,15 @@\n def run(self):\n try:\n subprocess.check_call(\n- self.agent_binary()\n- + self.daemonize_flag()\n- + self.log_level()\n- + self.log_file()\n- + self.config_file()\n- + self.socket_path()\n+ (\n+ self.agent_binary()\n+ + self.daemonize_flag()\n+ + self.log_level()\n+ + self.log_file()\n+ + self.config_file()\n+ + self.socket_path()\n+ ),\n+ close_fds=True,\n )\n except Exception:\n # TODO detect failure of launch properly\n", "issue": "Core Agent inheriting and holding open sockets from parent\nReported via Slack:\r\n\r\n```\r\n\r\nMike O'Toole 9:12 AM\r\nhey there -- are there any instructions available on installing/starting the core-agent separately from a django app?\r\ncschneid:scout-orange: 9:13 AM\r\n@Mike O'Toole - the binary gets downloaded to /tmp/scout_apm_core/\u2026 and has a help command.\r\nMike O'Toole 9:13 AM\r\nwe're running into an issue where the core-agent process seems to hang on to the ports that are uWSGI is binding when it first starts up. Then when we try to restart our uWSGI process, it tries to bind the ports again, and it can't because the core-agent process has still got them\r\ncschneid:scout-orange: 9:14 AM\r\nThat is odd, it\u2019s a totally different process, so shouldn\u2019t ever have access to port file descriptors.\r\nMike O'Toole 9:15 AM\r\nyeah -- that's what i thought too, but looking at lsof this is what im seeing:\r\ncore-agen 23301 chuck-uwsgi 8u IPv4 7817953 0t0 TCP *:3031 (LISTEN)\r\ncore-agen 23301 chuck-uwsgi 9u IPv4 7817954 0t0 TCP *:8080 (LISTEN)\r\ncschneid:scout-orange: 9:15 AM\r\nok, well that\u2019s not intended\u2026 :slightly_smiling_face:\r\nMike O'Toole 9:15 AM\r\n3031/8080 are the ports that uWSGI is binding\r\ncschneid:scout-orange: 9:16 AM\r\nFor now, you\u2019re looking for core-agent start --socket-path <match the default, or the explicit config> \r\nMike O'Toole 9:16 AM\r\nsweet, thanks\r\ncschneid:scout-orange: 9:17 AM\r\nthe default socket path is:\r\n def derive_socket_path(self):\r\n return \"{}/{}/core-agent.sock\".format(\r\n self.config.value(\"core_agent_dir\"),\r\n self.config.value(\"core_agent_full_name\"),\r\nwhich would be something like: /tmp/scout_apm_core/scout_apm_core-latest-x86_64-apple-darwin/core-agent.sock\r\nI\u2019m really confused by the core agent holding onto those ports. We launch via subprocess.check_call( ... which I suppose would maintain file descriptors?\r\nIf close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. (POSIX only). The default varies by platform: Always true on POSIX. \r\nfrom the docs.\r\nsounds like it should have closed the socket handles?\r\nMike O'Toole 9:22 AM\r\nhmmm weird\r\ncschneid:scout-orange: 9:22 AM\r\nWhat version of python are you on? I can try to reproduce\r\nMike O'Toole 9:24 AM\r\npython 2.7.6\r\nahh, here's someone having a related issue with uWSGI\r\n\r\nhttps://stackoverflow.com/questions/48376664/uwsgi-subprocess-create-a-new-process-the-socket-close-wait\r\n\r\nsounds like that config option is what we need\r\ncschneid:scout-orange: 9:25 AM\r\nok, the docs I shared were from 3.5, I wonder if that behavior changed\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\n\nimport requests\n\nfrom scout_apm.core.context import AgentContext\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n AgentContext.instance.config.value(\"core_agent_dir\"),\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir,\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n\n def launch(self):\n if not AgentContext.instance.config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not AgentContext.instance.config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n subprocess.check_call(\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n socket_path = AgentContext.instance.config.value(\"socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n level = AgentContext.instance.config.value(\"log_level\")\n return [\"--log-level\", level]\n\n def log_file(self):\n path = AgentContext.instance.config.value(\"log_file\")\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n path = AgentContext.instance.config.value(\"config_file\")\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n self.download_package()\n self.untar()\n except OSError:\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(\n self.destination, AgentContext.instance.config.core_agent_permissions()\n )\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as e:\n logger.debug(\n \"Could not obtain download lock on %s: %r\", self.download_lock_path, e\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n logger.debug(\"Downloading: %s to %s\", self.full_url(), self.package_location)\n req = requests.get(self.full_url(), stream=True)\n with open(self.package_location, \"wb\") as f:\n for chunk in req.iter_content(1024 * 1000):\n f.write(chunk)\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return AgentContext.instance.config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n except (ValueError, TypeError, OSError, IOError) as e:\n logger.debug(\"Error parsing Core Agent Manifest: %r\", e)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as e:\n logger.debug(\"Error on digest: %r\", e)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}]}
| 3,584 | 192 |
gh_patches_debug_11710
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2317
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scrolling containers should be focusable by default
`ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`.
Check this doesn't break any of the example apps.
</issue>
<code>
[start of src/textual/containers.py]
1 """
2 Container widgets for quick styling.
3
4 """
5
6
7 from .widget import Widget
8
9
10 class Container(Widget):
11 """Simple container widget, with vertical layout."""
12
13 DEFAULT_CSS = """
14 Container {
15 height: 1fr;
16 layout: vertical;
17 overflow: auto;
18 }
19 """
20
21
22 class Vertical(Widget):
23 """A container which arranges children vertically."""
24
25 DEFAULT_CSS = """
26 Vertical {
27 width: 1fr;
28 layout: vertical;
29 overflow: hidden hidden;
30 }
31 """
32
33
34 class VerticalScroll(Widget):
35 """A container which arranges children vertically, with an automatic vertical scrollbar."""
36
37 DEFAULT_CSS = """
38 VerticalScroll {
39 width: 1fr;
40 layout: vertical;
41 overflow-y: auto;
42 }
43 """
44
45
46 class Horizontal(Widget):
47 """A container which arranges children horizontally."""
48
49 DEFAULT_CSS = """
50 Horizontal {
51 height: 1fr;
52 layout: horizontal;
53 overflow: hidden hidden;
54 }
55 """
56
57
58 class HorizontalScroll(Widget):
59 """A container which arranges children horizontally, with an automatic horizontal scrollbar."""
60
61 DEFAULT_CSS = """
62 HorizontalScroll {
63 height: 1fr;
64 layout: horizontal;
65 overflow-x: auto;
66 }
67 """
68
69
70 class Center(Widget):
71 """A container which centers children horizontally."""
72
73 DEFAULT_CSS = """
74 Center {
75 align-horizontal: center;
76 height: auto;
77 width: 1fr;
78 }
79 """
80
81
82 class Middle(Widget):
83 """A container which aligns children vertically in the middle."""
84
85 DEFAULT_CSS = """
86 Middle {
87 align-vertical: middle;
88 width: auto;
89 height: 1fr;
90 }
91 """
92
93
94 class Grid(Widget):
95 """A container with grid alignment."""
96
97 DEFAULT_CSS = """
98 Grid {
99 height: 1fr;
100 layout: grid;
101 }
102 """
103
104
105 class Content(Widget, can_focus=True, can_focus_children=False):
106 """A container for content such as text."""
107
108 DEFAULT_CSS = """
109 VerticalScroll {
110 height: 1fr;
111 layout: vertical;
112 overflow-y: auto;
113 }
114 """
115
[end of src/textual/containers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/textual/containers.py b/src/textual/containers.py
--- a/src/textual/containers.py
+++ b/src/textual/containers.py
@@ -31,7 +31,7 @@
"""
-class VerticalScroll(Widget):
+class VerticalScroll(Widget, can_focus=True):
"""A container which arranges children vertically, with an automatic vertical scrollbar."""
DEFAULT_CSS = """
@@ -55,7 +55,7 @@
"""
-class HorizontalScroll(Widget):
+class HorizontalScroll(Widget, can_focus=True):
"""A container which arranges children horizontally, with an automatic horizontal scrollbar."""
DEFAULT_CSS = """
|
{"golden_diff": "diff --git a/src/textual/containers.py b/src/textual/containers.py\n--- a/src/textual/containers.py\n+++ b/src/textual/containers.py\n@@ -31,7 +31,7 @@\n \"\"\"\n \n \n-class VerticalScroll(Widget):\n+class VerticalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n@@ -55,7 +55,7 @@\n \"\"\"\n \n \n-class HorizontalScroll(Widget):\n+class HorizontalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n", "issue": "Scrolling containers should be focusable by default\n`ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`.\n\nCheck this doesn't break any of the example apps.\n", "before_files": [{"content": "\"\"\"\nContainer widgets for quick styling.\n\n\"\"\"\n\n\nfrom .widget import Widget\n\n\nclass Container(Widget):\n \"\"\"Simple container widget, with vertical layout.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Container {\n height: 1fr;\n layout: vertical;\n overflow: auto;\n }\n \"\"\"\n\n\nclass Vertical(Widget):\n \"\"\"A container which arranges children vertically.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Vertical {\n width: 1fr;\n layout: vertical;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass VerticalScroll(Widget):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n width: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n\n\nclass Horizontal(Widget):\n \"\"\"A container which arranges children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Horizontal {\n height: 1fr;\n layout: horizontal;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass HorizontalScroll(Widget):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HorizontalScroll {\n height: 1fr;\n layout: horizontal;\n overflow-x: auto;\n }\n \"\"\"\n\n\nclass Center(Widget):\n \"\"\"A container which centers children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Center {\n align-horizontal: center;\n height: auto;\n width: 1fr;\n }\n \"\"\"\n\n\nclass Middle(Widget):\n \"\"\"A container which aligns children vertically in the middle.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Middle {\n align-vertical: middle;\n width: auto;\n height: 1fr;\n }\n \"\"\"\n\n\nclass Grid(Widget):\n \"\"\"A container with grid alignment.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Grid {\n height: 1fr;\n layout: grid;\n }\n \"\"\"\n\n\nclass Content(Widget, can_focus=True, can_focus_children=False):\n \"\"\"A container for content such as text.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n height: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n", "path": "src/textual/containers.py"}]}
| 1,279 | 144 |
gh_patches_debug_5083
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-17744
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gcd
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/math.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
8 @to_ivy_arrays_and_back
9 def sin(x, name=None):
10 return ivy.sin(x)
11
12
13 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
14 @to_ivy_arrays_and_back
15 def cos(x, name=None):
16 return ivy.cos(x)
17
18
19 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
20 @to_ivy_arrays_and_back
21 def acos(x, name=None):
22 return ivy.acos(x)
23
24
25 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
26 @to_ivy_arrays_and_back
27 def cosh(x, name=None):
28 return ivy.cosh(x)
29
30
31 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
32 @to_ivy_arrays_and_back
33 def tanh(x, name=None):
34 return ivy.tanh(x)
35
36
37 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
38 @to_ivy_arrays_and_back
39 def acosh(x, name=None):
40 return ivy.acosh(x)
41
42
43 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
44 @to_ivy_arrays_and_back
45 def asin(x, name=None):
46 return ivy.asin(x)
47
48
49 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
50 @to_ivy_arrays_and_back
51 def log(x, name=None):
52 return ivy.log(x)
53
54
55 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
56 @to_ivy_arrays_and_back
57 def divide(x, y, name=None):
58 return ivy.divide(x, y)
59
60
61 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
62 @to_ivy_arrays_and_back
63 def abs(x, name=None):
64 return ivy.abs(x)
65
66
67 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
68 @to_ivy_arrays_and_back
69 def multiply(x, y, name=None):
70 return ivy.multiply(x, y)
71
72
73 @with_unsupported_dtypes(
74 {"2.5.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle"
75 )
76 @to_ivy_arrays_and_back
77 def add(x, y, name=None):
78 return ivy.add(x, y)
79
80
81 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
82 @to_ivy_arrays_and_back
83 def subtract(x, y, name=None):
84 return ivy.subtract(x, y)
85
86
87 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
88 @to_ivy_arrays_and_back
89 def sqrt(x, name=None):
90 return ivy.sqrt(x)
91
92
93 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
94 @to_ivy_arrays_and_back
95 def atanh(x, name=None):
96 return ivy.atanh(x)
97
98
99 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
100 @to_ivy_arrays_and_back
101 def atan(x, name=None):
102 return ivy.atan(x)
103
104
105 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
106 @to_ivy_arrays_and_back
107 def round(x, name=None):
108 return ivy.round(x)
109
110
111 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
112 @to_ivy_arrays_and_back
113 def ceil(x, name=None):
114 return ivy.ceil(x)
115
116
117 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
118 @to_ivy_arrays_and_back
119 def sinh(x, name=None):
120 return ivy.sinh(x)
121
122
123 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
124 @to_ivy_arrays_and_back
125 def pow(x, y, name=None):
126 return ivy.pow(x, y)
127
128
129 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle")
130 @to_ivy_arrays_and_back
131 def conj(x, name=None):
132 return ivy.conj(x)
133
134
135 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
136 @to_ivy_arrays_and_back
137 def floor(x, name=None):
138 return ivy.floor(x)
139
140
141 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
142 @to_ivy_arrays_and_back
143 def remainder(x, y, name=None):
144 return ivy.remainder(x, y)
145
146
147 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
148 @to_ivy_arrays_and_back
149 def log2(x, name=None):
150 return ivy.log2(x)
151
152
153 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
154 @to_ivy_arrays_and_back
155 def log1p(x, name=None):
156 return ivy.log1p(x)
157
158
159 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
160 @to_ivy_arrays_and_back
161 def rad2deg(x, name=None):
162 return ivy.rad2deg(x)
163
164
165 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
166 @to_ivy_arrays_and_back
167 def deg2rad(x, name=None):
168 return ivy.deg2rad(x)
169
170
171 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
172 @to_ivy_arrays_and_back
173 def gcd(x, y, name=None):
174 return ivy.gcd(x, y)
175
176
177 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
178 @to_ivy_arrays_and_back
179 def tan(x, name=None):
180 return ivy.tan(x)
181
182
183 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
184 @to_ivy_arrays_and_back
185 def atan2(x, y, name=None):
186 return ivy.atan2(x, y)
187
188
189 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
190 @to_ivy_arrays_and_back
191 def square(x, name=None):
192 return ivy.square(x)
193
194
195 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
196 @to_ivy_arrays_and_back
197 def sign(x, name=None):
198 return ivy.sign(x)
199
200
201 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
202 @to_ivy_arrays_and_back
203 def neg(x, name=None):
204 return ivy.negative(x)
205
206
207 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
208 @to_ivy_arrays_and_back
209 def exp(x, name=None):
210 return ivy.exp(x)
211
212
213 @with_supported_dtypes(
214 {
215 "2.4.2 and below": (
216 "float32",
217 "float64",
218 "int32",
219 "int64",
220 "complex64",
221 "complex128",
222 )
223 },
224 "paddle",
225 )
226 @to_ivy_arrays_and_back
227 def cumprod(x, dim=None, dtype=None, name=None):
228 return ivy.cumprod(x, axis=dim, dtype=dtype)
229
230
231 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
232 @to_ivy_arrays_and_back
233 def reciprocal(x, name=None):
234 return ivy.reciprocal(x)
235
236
237 @with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle")
238 @to_ivy_arrays_and_back
239 def fmin(x, y, name=None):
240 return ivy.fmin(x, y)
241
[end of ivy/functional/frontends/paddle/tensor/math.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py
--- a/ivy/functional/frontends/paddle/tensor/math.py
+++ b/ivy/functional/frontends/paddle/tensor/math.py
@@ -234,7 +234,14 @@
return ivy.reciprocal(x)
+@with_supported_dtypes({"2.5.0 and below": ("int32", "int64")}, "paddle")
+@to_ivy_arrays_and_back
+def gcd(x, y, name=None):
+ return ivy.gcd(x, y)
+
+
@with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle")
@to_ivy_arrays_and_back
def fmin(x, y, name=None):
return ivy.fmin(x, y)
+
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -234,7 +234,14 @@\n return ivy.reciprocal(x)\n \n \n+@with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def gcd(x, y, name=None):\n+ return ivy.gcd(x, y)\n+\n+ \n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n @to_ivy_arrays_and_back\n def fmin(x, y, name=None):\n return ivy.fmin(x, y)\n+\n", "issue": "gcd\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
| 3,362 | 202 |
gh_patches_debug_14494
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-3163
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update code for when Engine metric qubits are formatted differently
There is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`.
</issue>
<code>
[start of cirq/google/engine/calibration.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Calibration wrapper for calibrations returned from the Quantum Engine."""
15
16 from collections import abc, defaultdict
17 import datetime
18
19 from typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING
20
21 from cirq import devices, vis
22 from cirq.google.api import v2
23
24 if TYPE_CHECKING:
25 import cirq
26
27
28 class Calibration(abc.Mapping):
29 """A convenience wrapper for calibrations that acts like a dictionary.
30
31 Calibrations act as dictionaries whose keys are the names of the metric,
32 and whose values are the metric values. The metric values themselves are
33 represented as a dictionary. These metric value dictionaries have
34 keys that are tuples of `cirq.GridQubit`s and values that are lists of the
35 metric values for those qubits. If a metric acts globally and is attached
36 to no specified number of qubits, the map will be from the empty tuple
37 to the metrics values.
38
39 Calibrations act just like a python dictionary. For example you can get
40 a list of all of the metric names using
41
42 `calibration.keys()`
43
44 and query a single value by looking up the name by index:
45
46 `calibration['t1']`
47
48 Attributes:
49 timestamp: The time that this calibration was run, in milliseconds since
50 the epoch.
51 """
52
53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:
54 self.timestamp = calibration.timestamp_ms
55 self._metric_dict = self._compute_metric_dict(calibration.metrics)
56
57 def _compute_metric_dict(
58 self, metrics: v2.metrics_pb2.MetricsSnapshot
59 ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:
60 results: Dict[str, Dict[Tuple[devices.
61 GridQubit, ...], Any]] = defaultdict(dict)
62 for metric in metrics:
63 name = metric.name
64 # Flatten the values to a list, removing keys containing type names
65 # (e.g. proto version of each value is {<type>: value}).
66 flat_values = [
67 getattr(v, v.WhichOneof('val')) for v in metric.values
68 ]
69 if metric.targets:
70 targets = [
71 t[1:] if t.startswith('q') else t for t in metric.targets
72 ]
73 # TODO: Remove when calibrations don't prepend this.
74 # Github issue: https://github.com/quantumlib/Cirq/issues/2963
75 qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)
76 results[name][qubits] = flat_values
77 else:
78 assert len(results[name]) == 0, (
79 'Only one metric of a given name can have no targets. '
80 'Found multiple for key {}'.format(name))
81 results[name][()] = flat_values
82 return results
83
84 def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:
85 """Supports getting calibrations by index.
86
87 Calibration may be accessed by key:
88
89 `calibration['t1']`.
90
91 This returns a map from tuples of `cirq.GridQubit`s to a list of the
92 values of the metric. If there are no targets, the only key will only
93 be an empty tuple.
94 """
95 if not isinstance(key, str):
96 raise TypeError(
97 'Calibration metrics only have string keys. Key was {}'.format(
98 key))
99 if key not in self._metric_dict:
100 raise KeyError('Metric named {} not in calibration'.format(key))
101 return self._metric_dict[key]
102
103 def __iter__(self) -> Iterator:
104 return iter(self._metric_dict)
105
106 def __len__(self) -> int:
107 return len(self._metric_dict)
108
109 def __str__(self) -> str:
110
111 return 'Calibration(keys={})'.format(list(sorted(self.keys())))
112
113 def timestamp_str(self,
114 tz: Optional[datetime.tzinfo] = None,
115 timespec: str = 'auto') -> str:
116 """Return a string for the calibration timestamp.
117
118 Args:
119 tz: The timezone for the string. If None, the method uses the
120 platform's local date and time.
121 timespec: See datetime.isoformat for valid values.
122
123 Returns:
124 The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.
125 """
126 dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)
127 dt += datetime.timedelta(microseconds=self.timestamp % 1000000)
128 return dt.isoformat(sep=' ', timespec=timespec)
129
130 def heatmap(self, key: str) -> vis.Heatmap:
131 """Return a heatmap for metrics that target single qubits.
132
133 Args:
134 key: The metric key to return a heatmap for.
135
136 Returns:
137 A `cirq.Heatmap` for the metric.
138
139 Raises:
140 AssertionError if the heatmap is not for single qubits or the metric
141 values are not single floats.
142 """
143 metrics = self[key]
144 assert all(len(k) == 1 for k in metrics.keys()), (
145 'Heatmaps are only supported if all the targets in a metric'
146 ' are single qubits.')
147 assert all(len(k) == 1 for k in metrics.values()), (
148 'Heatmaps are only supported if all the values in a metric'
149 ' are single metric values.')
150 value_map = {qubit: value for (qubit,), (value,) in metrics.items()}
151 return vis.Heatmap(value_map)
152
[end of cirq/google/engine/calibration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py
--- a/cirq/google/engine/calibration.py
+++ b/cirq/google/engine/calibration.py
@@ -67,12 +67,8 @@
getattr(v, v.WhichOneof('val')) for v in metric.values
]
if metric.targets:
- targets = [
- t[1:] if t.startswith('q') else t for t in metric.targets
- ]
- # TODO: Remove when calibrations don't prepend this.
- # Github issue: https://github.com/quantumlib/Cirq/issues/2963
- qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)
+ qubits = tuple(
+ v2.grid_qubit_from_proto_id(t) for t in metric.targets)
results[name][qubits] = flat_values
else:
assert len(results[name]) == 0, (
|
{"golden_diff": "diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py\n--- a/cirq/google/engine/calibration.py\n+++ b/cirq/google/engine/calibration.py\n@@ -67,12 +67,8 @@\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n- targets = [\n- t[1:] if t.startswith('q') else t for t in metric.targets\n- ]\n- # TODO: Remove when calibrations don't prepend this.\n- # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n- qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n+ qubits = tuple(\n+ v2.grid_qubit_from_proto_id(t) for t in metric.targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n", "issue": "Update code for when Engine metric qubits are formatted differently\nThere is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Calibration wrapper for calibrations returned from the Quantum Engine.\"\"\"\n\nfrom collections import abc, defaultdict\nimport datetime\n\nfrom typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING\n\nfrom cirq import devices, vis\nfrom cirq.google.api import v2\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass Calibration(abc.Mapping):\n \"\"\"A convenience wrapper for calibrations that acts like a dictionary.\n\n Calibrations act as dictionaries whose keys are the names of the metric,\n and whose values are the metric values. The metric values themselves are\n represented as a dictionary. These metric value dictionaries have\n keys that are tuples of `cirq.GridQubit`s and values that are lists of the\n metric values for those qubits. If a metric acts globally and is attached\n to no specified number of qubits, the map will be from the empty tuple\n to the metrics values.\n\n Calibrations act just like a python dictionary. For example you can get\n a list of all of the metric names using\n\n `calibration.keys()`\n\n and query a single value by looking up the name by index:\n\n `calibration['t1']`\n\n Attributes:\n timestamp: The time that this calibration was run, in milliseconds since\n the epoch.\n \"\"\"\n\n def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\n self.timestamp = calibration.timestamp_ms\n self._metric_dict = self._compute_metric_dict(calibration.metrics)\n\n def _compute_metric_dict(\n self, metrics: v2.metrics_pb2.MetricsSnapshot\n ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:\n results: Dict[str, Dict[Tuple[devices.\n GridQubit, ...], Any]] = defaultdict(dict)\n for metric in metrics:\n name = metric.name\n # Flatten the values to a list, removing keys containing type names\n # (e.g. proto version of each value is {<type>: value}).\n flat_values = [\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n targets = [\n t[1:] if t.startswith('q') else t for t in metric.targets\n ]\n # TODO: Remove when calibrations don't prepend this.\n # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n 'Only one metric of a given name can have no targets. '\n 'Found multiple for key {}'.format(name))\n results[name][()] = flat_values\n return results\n\n def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:\n \"\"\"Supports getting calibrations by index.\n\n Calibration may be accessed by key:\n\n `calibration['t1']`.\n\n This returns a map from tuples of `cirq.GridQubit`s to a list of the\n values of the metric. If there are no targets, the only key will only\n be an empty tuple.\n \"\"\"\n if not isinstance(key, str):\n raise TypeError(\n 'Calibration metrics only have string keys. Key was {}'.format(\n key))\n if key not in self._metric_dict:\n raise KeyError('Metric named {} not in calibration'.format(key))\n return self._metric_dict[key]\n\n def __iter__(self) -> Iterator:\n return iter(self._metric_dict)\n\n def __len__(self) -> int:\n return len(self._metric_dict)\n\n def __str__(self) -> str:\n\n return 'Calibration(keys={})'.format(list(sorted(self.keys())))\n\n def timestamp_str(self,\n tz: Optional[datetime.tzinfo] = None,\n timespec: str = 'auto') -> str:\n \"\"\"Return a string for the calibration timestamp.\n\n Args:\n tz: The timezone for the string. If None, the method uses the\n platform's local date and time.\n timespec: See datetime.isoformat for valid values.\n\n Returns:\n The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)\n dt += datetime.timedelta(microseconds=self.timestamp % 1000000)\n return dt.isoformat(sep=' ', timespec=timespec)\n\n def heatmap(self, key: str) -> vis.Heatmap:\n \"\"\"Return a heatmap for metrics that target single qubits.\n\n Args:\n key: The metric key to return a heatmap for.\n\n Returns:\n A `cirq.Heatmap` for the metric.\n\n Raises:\n AssertionError if the heatmap is not for single qubits or the metric\n values are not single floats.\n \"\"\"\n metrics = self[key]\n assert all(len(k) == 1 for k in metrics.keys()), (\n 'Heatmaps are only supported if all the targets in a metric'\n ' are single qubits.')\n assert all(len(k) == 1 for k in metrics.values()), (\n 'Heatmaps are only supported if all the values in a metric'\n ' are single metric values.')\n value_map = {qubit: value for (qubit,), (value,) in metrics.items()}\n return vis.Heatmap(value_map)\n", "path": "cirq/google/engine/calibration.py"}]}
| 2,323 | 215 |
gh_patches_debug_14127
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-core-4998
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ini_file module doesn't match existing key when it is indented
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
`ini_file` module
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.1.1.0
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
N/A
##### SUMMARY
When a key in an INI file exists but is prefixed with whitespace, the `ini_file` module adds a new entry rather than updating the existing entry.
##### STEPS TO REPRODUCE
Playbook:
```
- hosts: localhost
gather_facts: no
tasks:
- ini_file:
dest: ./foo.ini
section: main
option: foo
value: baz
```
INI file:
```
[main]
foo = bar
```
##### EXPECTED RESULTS
```
[main]
foo = baz
```
##### ACTUAL RESULTS
```
[main]
foo = bar
foo = baz
```
</issue>
<code>
[start of files/ini_file.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
5 # (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
6 #
7 # This file is part of Ansible
8 #
9 # Ansible is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # Ansible is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 DOCUMENTATION = '''
24 ---
25 module: ini_file
26 short_description: Tweak settings in INI files
27 extends_documentation_fragment: files
28 description:
29 - Manage (add, remove, change) individual settings in an INI-style file without having
30 to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
31 sections if they don't exist.
32 - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
33 version_added: "0.9"
34 options:
35 dest:
36 description:
37 - Path to the INI-style file; this file is created if required
38 required: true
39 default: null
40 section:
41 description:
42 - Section name in INI file. This is added if C(state=present) automatically when
43 a single value is being set.
44 required: true
45 default: null
46 option:
47 description:
48 - if set (required for changing a I(value)), this is the name of the option.
49 - May be omitted if adding/removing a whole I(section).
50 required: false
51 default: null
52 value:
53 description:
54 - the string value to be associated with an I(option). May be omitted when removing an I(option).
55 required: false
56 default: null
57 backup:
58 description:
59 - Create a backup file including the timestamp information so you can get
60 the original file back if you somehow clobbered it incorrectly.
61 required: false
62 default: "no"
63 choices: [ "yes", "no" ]
64 others:
65 description:
66 - all arguments accepted by the M(file) module also work here
67 required: false
68 state:
69 description:
70 - If set to C(absent) the option or section will be removed if present instead of created.
71 required: false
72 default: "present"
73 choices: [ "present", "absent" ]
74 no_extra_spaces:
75 description:
76 - do not insert spaces before and after '=' symbol
77 required: false
78 default: false
79 version_added: "2.1"
80 notes:
81 - While it is possible to add an I(option) without specifying a I(value), this makes
82 no sense.
83 - A section named C(default) cannot be added by the module, but if it exists, individual
84 options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)
85 Either use M(template) to create a base INI file with a C([default]) section, or use
86 M(lineinfile) to add the missing line.
87 requirements: [ ConfigParser ]
88 author:
89 - "Jan-Piet Mens (@jpmens)"
90 - "Ales Nosek (@noseka1)"
91 '''
92
93 EXAMPLES = '''
94 # Ensure "fav=lemonade is in section "[drinks]" in specified file
95 - ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes
96
97 - ini_file: dest=/etc/anotherconf
98 section=drinks
99 option=temperature
100 value=cold
101 backup=yes
102 '''
103
104 import os
105 import re
106
107 # ==============================================================
108 # match_opt
109
110 def match_opt(option, line):
111 option = re.escape(option)
112 return re.match('%s( |\t)*=' % option, line) \
113 or re.match('# *%s( |\t)*=' % option, line) \
114 or re.match('; *%s( |\t)*=' % option, line)
115
116 # ==============================================================
117 # match_active_opt
118
119 def match_active_opt(option, line):
120 option = re.escape(option)
121 return re.match('%s( |\t)*=' % option, line)
122
123 # ==============================================================
124 # do_ini
125
126 def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):
127
128
129 if not os.path.exists(filename):
130 try:
131 open(filename,'w').close()
132 except:
133 module.fail_json(msg="Destination file %s not writable" % filename)
134 ini_file = open(filename, 'r')
135 try:
136 ini_lines = ini_file.readlines()
137 # append a fake section line to simplify the logic
138 ini_lines.append('[')
139 finally:
140 ini_file.close()
141
142 within_section = not section
143 section_start = 0
144 changed = False
145 if no_extra_spaces:
146 assignment_format = '%s=%s\n'
147 else:
148 assignment_format = '%s = %s\n'
149
150 for index, line in enumerate(ini_lines):
151 if line.startswith('[%s]' % section):
152 within_section = True
153 section_start = index
154 elif line.startswith('['):
155 if within_section:
156 if state == 'present':
157 # insert missing option line at the end of the section
158 for i in range(index, 0, -1):
159 # search backwards for previous non-blank or non-comment line
160 if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
161 ini_lines.insert(i, assignment_format % (option, value))
162 changed = True
163 break
164 elif state == 'absent' and not option:
165 # remove the entire section
166 del ini_lines[section_start:index]
167 changed = True
168 break
169 else:
170 if within_section and option:
171 if state == 'present':
172 # change the existing option line
173 if match_opt(option, line):
174 newline = assignment_format % (option, value)
175 changed = ini_lines[index] != newline
176 ini_lines[index] = newline
177 if changed:
178 # remove all possible option occurences from the rest of the section
179 index = index + 1
180 while index < len(ini_lines):
181 line = ini_lines[index]
182 if line.startswith('['):
183 break
184 if match_active_opt(option, line):
185 del ini_lines[index]
186 else:
187 index = index + 1
188 break
189 else:
190 # comment out the existing option line
191 if match_active_opt(option, line):
192 ini_lines[index] = '#%s' % ini_lines[index]
193 changed = True
194 break
195
196 # remove the fake section line
197 del ini_lines[-1:]
198
199 if not within_section and option and state == 'present':
200 ini_lines.append('[%s]\n' % section)
201 ini_lines.append(assignment_format % (option, value))
202 changed = True
203
204
205 backup_file = None
206 if changed and not module.check_mode:
207 if backup:
208 backup_file = module.backup_local(filename)
209 ini_file = open(filename, 'w')
210 try:
211 ini_file.writelines(ini_lines)
212 finally:
213 ini_file.close()
214
215 return (changed, backup_file)
216
217 # ==============================================================
218 # main
219
220 def main():
221
222 module = AnsibleModule(
223 argument_spec = dict(
224 dest = dict(required=True),
225 section = dict(required=True),
226 option = dict(required=False),
227 value = dict(required=False),
228 backup = dict(default='no', type='bool'),
229 state = dict(default='present', choices=['present', 'absent']),
230 no_extra_spaces = dict(required=False, default=False, type='bool')
231 ),
232 add_file_common_args = True,
233 supports_check_mode = True
234 )
235
236 dest = os.path.expanduser(module.params['dest'])
237 section = module.params['section']
238 option = module.params['option']
239 value = module.params['value']
240 state = module.params['state']
241 backup = module.params['backup']
242 no_extra_spaces = module.params['no_extra_spaces']
243
244 (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)
245
246 file_args = module.load_file_common_arguments(module.params)
247 changed = module.set_fs_attributes_if_different(file_args, changed)
248
249 results = { 'changed': changed, 'msg': "OK", 'dest': dest }
250 if backup_file is not None:
251 results['backup_file'] = backup_file
252
253 # Mission complete
254 module.exit_json(**results)
255
256 # import module snippets
257 from ansible.module_utils.basic import *
258 if __name__ == '__main__':
259 main()
260
[end of files/ini_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/files/ini_file.py b/files/ini_file.py
--- a/files/ini_file.py
+++ b/files/ini_file.py
@@ -109,7 +109,7 @@
def match_opt(option, line):
option = re.escape(option)
- return re.match('%s( |\t)*=' % option, line) \
+ return re.match(' *%s( |\t)*=' % option, line) \
or re.match('# *%s( |\t)*=' % option, line) \
or re.match('; *%s( |\t)*=' % option, line)
@@ -118,7 +118,7 @@
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('%s( |\t)*=' % option, line)
+ return re.match(' *%s( |\t)*=' % option, line)
# ==============================================================
# do_ini
|
{"golden_diff": "diff --git a/files/ini_file.py b/files/ini_file.py\n--- a/files/ini_file.py\n+++ b/files/ini_file.py\n@@ -109,7 +109,7 @@\n \n def match_opt(option, line):\n option = re.escape(option)\n- return re.match('%s( |\\t)*=' % option, line) \\\n+ return re.match(' *%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n \n@@ -118,7 +118,7 @@\n \n def match_active_opt(option, line):\n option = re.escape(option)\n- return re.match('%s( |\\t)*=' % option, line)\n+ return re.match(' *%s( |\\t)*=' % option, line)\n \n # ==============================================================\n # do_ini\n", "issue": "ini_file module doesn't match existing key when it is indented\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`ini_file` module\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\nansible 2.1.1.0\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nN/A\n##### SUMMARY\n\nWhen a key in an INI file exists but is prefixed with whitespace, the `ini_file` module adds a new entry rather than updating the existing entry.\n##### STEPS TO REPRODUCE\n\nPlaybook:\n\n```\n- hosts: localhost\n gather_facts: no\n tasks:\n - ini_file:\n dest: ./foo.ini\n section: main\n option: foo\n value: baz\n```\n\nINI file:\n\n```\n[main]\n foo = bar\n```\n##### EXPECTED RESULTS\n\n```\n[main]\n foo = baz\n```\n##### ACTUAL RESULTS\n\n```\n[main]\n foo = bar\nfoo = baz\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\nversion_added: \"0.9\"\noptions:\n dest:\n description:\n - Path to the INI-style file; this file is created if required\n required: true\n default: null\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n required: true\n default: null\n option:\n description:\n - if set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n required: false\n default: null\n value:\n description:\n - the string value to be associated with an I(option). May be omitted when removing an I(option).\n required: false\n default: null\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n others:\n description:\n - all arguments accepted by the M(file) module also work here\n required: false\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n required: false\n default: \"present\"\n choices: [ \"present\", \"absent\" ]\n no_extra_spaces:\n description:\n - do not insert spaces before and after '=' symbol\n required: false\n default: false\n version_added: \"2.1\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - A section named C(default) cannot be added by the module, but if it exists, individual\n options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)\n Either use M(template) to create a base INI file with a C([default]) section, or use\n M(lineinfile) to add the missing line.\nrequirements: [ ConfigParser ]\nauthor:\n - \"Jan-Piet Mens (@jpmens)\"\n - \"Ales Nosek (@noseka1)\"\n'''\n\nEXAMPLES = '''\n# Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes\n\n- ini_file: dest=/etc/anotherconf\n section=drinks\n option=temperature\n value=cold\n backup=yes\n'''\n\nimport os\nimport re\n\n# ==============================================================\n# match_opt\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# match_active_opt\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# do_ini\n\ndef do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):\n\n\n if not os.path.exists(filename):\n try:\n open(filename,'w').close()\n except:\n module.fail_json(msg=\"Destination file %s not writable\" % filename)\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n finally:\n ini_file.close()\n\n within_section = not section\n section_start = 0\n changed = False\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n for i in range(index, 0, -1):\n # search backwards for previous non-blank or non-comment line\n if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n ini_lines.insert(i, assignment_format % (option, value))\n changed = True\n break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n changed = ini_lines[index] != newline\n ini_lines[index] = newline\n if changed:\n # remove all possible option occurences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n else:\n # comment out the existing option line\n if match_active_opt(option, line):\n ini_lines[index] = '#%s' % ini_lines[index]\n changed = True\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return (changed, backup_file)\n\n# ==============================================================\n# main\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n dest = dict(required=True),\n section = dict(required=True),\n option = dict(required=False),\n value = dict(required=False),\n backup = dict(default='no', type='bool'),\n state = dict(default='present', choices=['present', 'absent']),\n no_extra_spaces = dict(required=False, default=False, type='bool')\n ),\n add_file_common_args = True,\n supports_check_mode = True\n )\n\n dest = os.path.expanduser(module.params['dest'])\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n\n (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)\n\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = { 'changed': changed, 'msg': \"OK\", 'dest': dest }\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "files/ini_file.py"}]}
| 3,447 | 211 |
gh_patches_debug_18922
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1664
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OpenID payload cache uses the wrong cache key
The `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51
No matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.
</issue>
<code>
[start of setup.py]
1 import codecs
2 import os
3 from setuptools import setup, find_packages
4
5 # abspath here because setup.py may be __main__, in which case
6 # __file__ is not guaranteed to be absolute
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 def read_file(filename):
11 """Open a related file and return its content."""
12 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
13 content = f.read()
14 return content
15
16
17 README = read_file('README.rst')
18 CHANGELOG = read_file('CHANGELOG.rst')
19 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
20
21 REQUIREMENTS = [
22 'bcrypt',
23 'colander >= 1.4.0',
24 'cornice',
25 'cornice_swagger >= 0.5.1',
26 'dockerflow',
27 'jsonschema',
28 'jsonpatch',
29 'logging-color-formatter >= 1.0.1', # Message interpolations.
30 'python-dateutil',
31 'pyramid >= 1.9.1, < 2.0',
32 'pyramid_multiauth >= 0.8', # User on policy selected event.
33 'transaction',
34 # pyramid_tm changed the location of their tween in 2.x and one of
35 # our tests fails on 2.0.
36 'pyramid_tm >= 2.1',
37 'requests',
38 'waitress',
39 'ujson >= 1.35',
40 ]
41
42 POSTGRESQL_REQUIRES = [
43 'SQLAlchemy',
44 'psycopg2 > 2.5',
45 'zope.sqlalchemy',
46 ]
47
48 REDIS_REQUIRES = [
49 'kinto_redis'
50 ]
51
52 MEMCACHED_REQUIRES = [
53 'python-memcached'
54 ]
55
56 SETUP_REQUIRES = [
57 'pytest-runner'
58 ]
59
60 TEST_REQUIREMENTS = [
61 'bravado_core',
62 'pytest',
63 'WebTest'
64 ]
65
66 DEPENDENCY_LINKS = []
67
68 MONITORING_REQUIRES = [
69 'raven',
70 'statsd',
71 'newrelic',
72 'werkzeug',
73 ]
74
75 ENTRY_POINTS = {
76 'paste.app_factory': [
77 'main = kinto:main',
78 ],
79 'console_scripts': [
80 'kinto = kinto.__main__:main'
81 ],
82 }
83
84
85 setup(name='kinto',
86 version='9.1.1',
87 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
88 long_description='{}\n\n{}\n\n{}'.format(README, CHANGELOG, CONTRIBUTORS),
89 license='Apache License (2.0)',
90 classifiers=[
91 'Programming Language :: Python',
92 'Programming Language :: Python :: 3',
93 'Programming Language :: Python :: 3.5',
94 'Programming Language :: Python :: 3.6',
95 'Programming Language :: Python :: Implementation :: CPython',
96 'Topic :: Internet :: WWW/HTTP',
97 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
98 'License :: OSI Approved :: Apache Software License'
99 ],
100 keywords='web sync json storage services',
101 author='Mozilla Services',
102 author_email='storage-team@mozilla.com',
103 url='https://github.com/Kinto/kinto',
104 packages=find_packages(),
105 package_data={'': ['*.rst', '*.py', '*.yaml']},
106 include_package_data=True,
107 zip_safe=False,
108 setup_requires=SETUP_REQUIRES,
109 tests_require=TEST_REQUIREMENTS,
110 install_requires=REQUIREMENTS,
111 extras_require={
112 'redis': REDIS_REQUIRES,
113 'memcached': MEMCACHED_REQUIRES,
114 'postgresql': POSTGRESQL_REQUIRES,
115 'monitoring': MONITORING_REQUIRES,
116 },
117 test_suite='tests',
118 dependency_links=DEPENDENCY_LINKS,
119 entry_points=ENTRY_POINTS)
120
[end of setup.py]
[start of kinto/plugins/openid/__init__.py]
1 import re
2
3 import requests
4 from pyramid import authentication as base_auth
5 from pyramid.interfaces import IAuthenticationPolicy
6 from zope.interface import implementer
7
8 from kinto.core import logger
9 from kinto.core import utils as core_utils
10 from kinto.core.openapi import OpenAPI
11
12 from .utils import fetch_openid_config
13
14
15 @implementer(IAuthenticationPolicy)
16 class OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):
17 def __init__(self, issuer, client_id, realm='Realm', **kwargs):
18 self.realm = realm
19 self.issuer = issuer
20 self.client_id = client_id
21 self.client_secret = kwargs.get('client_secret', '')
22 self.header_type = kwargs.get('header_type', 'Bearer')
23 self.userid_field = kwargs.get('userid_field', 'sub')
24 self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))
25
26 # Fetch OpenID config (at instantiation, ie. startup)
27 self.oid_config = fetch_openid_config(issuer)
28
29 self._jwt_keys = None
30
31 def unauthenticated_userid(self, request):
32 """Return the userid or ``None`` if token could not be verified.
33 """
34 settings = request.registry.settings
35 hmac_secret = settings['userid_hmac_secret']
36
37 authorization = request.headers.get('Authorization', '')
38 try:
39 authmeth, access_token = authorization.split(' ', 1)
40 except ValueError:
41 return None
42
43 if authmeth.lower() != self.header_type.lower():
44 return None
45
46 # XXX JWT Access token
47 # https://auth0.com/docs/tokens/access-token#access-token-format
48
49 # Check cache if these tokens were already verified.
50 hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
51 cache_key = 'openid:verify:%s'.format(hmac_tokens)
52 payload = request.registry.cache.get(cache_key)
53 if payload is None:
54 # This can take some time.
55 payload = self._verify_token(access_token)
56 if payload is None:
57 return None
58 # Save for next time / refresh ttl.
59 request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)
60 # Extract meaningful field from userinfo (eg. email or sub)
61 return payload.get(self.userid_field)
62
63 def forget(self, request):
64 """A no-op. Credentials are sent on every request.
65 Return WWW-Authenticate Realm header for Bearer token.
66 """
67 return [('WWW-Authenticate', '%s realm="%s"' % (self.header_type, self.realm))]
68
69 def _verify_token(self, access_token):
70 uri = self.oid_config['userinfo_endpoint']
71 # Opaque access token string. Fetch user info from profile.
72 try:
73 resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})
74 resp.raise_for_status()
75 userprofile = resp.json()
76 return userprofile
77
78 except (requests.exceptions.HTTPError, ValueError, KeyError) as e:
79 logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))
80 return None
81
82
83 def includeme(config):
84 # Activate end-points.
85 config.scan('kinto.plugins.openid.views')
86
87 settings = config.get_settings()
88
89 openid_policies = []
90 for k, v in settings.items():
91 m = re.match('multiauth\.policy\.(.*)\.use', k)
92 if m:
93 if v.endswith('OpenIDConnectPolicy'):
94 openid_policies.append(m.group(1))
95
96 if len(openid_policies) == 0:
97 # Do not add the capability if no policy is configured.
98 return
99
100 providers_infos = []
101 for name in openid_policies:
102 issuer = settings['multiauth.policy.%s.issuer' % name]
103 openid_config = fetch_openid_config(issuer)
104
105 client_id = settings['multiauth.policy.%s.client_id' % name]
106 header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')
107
108 providers_infos.append({
109 'name': name,
110 'issuer': openid_config['issuer'],
111 'auth_path': '/openid/%s/login' % name,
112 'client_id': client_id,
113 'header_type': header_type,
114 'userinfo_endpoint': openid_config['userinfo_endpoint'],
115 })
116
117 OpenAPI.expose_authentication_method(name, {
118 'type': 'oauth2',
119 'authorizationUrl': openid_config['authorization_endpoint'],
120 })
121
122 config.add_api_capability(
123 'openid',
124 description='OpenID connect support.',
125 url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',
126 providers=providers_infos)
127
[end of kinto/plugins/openid/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py
--- a/kinto/plugins/openid/__init__.py
+++ b/kinto/plugins/openid/__init__.py
@@ -48,7 +48,7 @@
# Check cache if these tokens were already verified.
hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
- cache_key = 'openid:verify:%s'.format(hmac_tokens)
+ cache_key = 'openid:verify:{}'.format(hmac_tokens)
payload = request.registry.cache.get(cache_key)
if payload is None:
# This can take some time.
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,7 @@
setup(name='kinto',
- version='9.1.1',
+ version='9.1.2',
description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
long_description='{}\n\n{}\n\n{}'.format(README, CHANGELOG, CONTRIBUTORS),
license='Apache License (2.0)',
|
{"golden_diff": "diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py\n--- a/kinto/plugins/openid/__init__.py\n+++ b/kinto/plugins/openid/__init__.py\n@@ -48,7 +48,7 @@\n \n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n- cache_key = 'openid:verify:%s'.format(hmac_tokens)\n+ cache_key = 'openid:verify:{}'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -83,7 +83,7 @@\n \n \n setup(name='kinto',\n- version='9.1.1',\n+ version='9.1.2',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description='{}\\n\\n{}\\n\\n{}'.format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n", "issue": "OpenID payload cache uses the wrong cache key\nThe `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51\r\nNo matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.\r\n\r\n\n", "before_files": [{"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'bcrypt',\n 'colander >= 1.4.0',\n 'cornice',\n 'cornice_swagger >= 0.5.1',\n 'dockerflow',\n 'jsonschema',\n 'jsonpatch',\n 'logging-color-formatter >= 1.0.1', # Message interpolations.\n 'python-dateutil',\n 'pyramid >= 1.9.1, < 2.0',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n 'pyramid_tm >= 2.1',\n 'requests',\n 'waitress',\n 'ujson >= 1.35',\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nMEMCACHED_REQUIRES = [\n 'python-memcached'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='9.1.1',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description='{}\\n\\n{}\\n\\n{}'.format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'License :: OSI Approved :: Apache Software License'\n ],\n keywords='web sync json storage services',\n author='Mozilla Services',\n author_email='storage-team@mozilla.com',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'memcached': MEMCACHED_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite='tests',\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}, {"content": "import re\n\nimport requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm='Realm', **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get('client_secret', '')\n self.header_type = kwargs.get('header_type', 'Bearer')\n self.userid_field = kwargs.get('userid_field', 'sub')\n self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings['userid_hmac_secret']\n\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, access_token = authorization.split(' ', 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = 'openid:verify:%s'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [('WWW-Authenticate', '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config['userinfo_endpoint']\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))\n return None\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan('kinto.plugins.openid.views')\n\n settings = config.get_settings()\n\n openid_policies = []\n for k, v in settings.items():\n m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n if m:\n if v.endswith('OpenIDConnectPolicy'):\n openid_policies.append(m.group(1))\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings['multiauth.policy.%s.issuer' % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings['multiauth.policy.%s.client_id' % name]\n header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')\n\n providers_infos.append({\n 'name': name,\n 'issuer': openid_config['issuer'],\n 'auth_path': '/openid/%s/login' % name,\n 'client_id': client_id,\n 'header_type': header_type,\n 'userinfo_endpoint': openid_config['userinfo_endpoint'],\n })\n\n OpenAPI.expose_authentication_method(name, {\n 'type': 'oauth2',\n 'authorizationUrl': openid_config['authorization_endpoint'],\n })\n\n config.add_api_capability(\n 'openid',\n description='OpenID connect support.',\n url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',\n providers=providers_infos)\n", "path": "kinto/plugins/openid/__init__.py"}]}
| 3,018 | 253 |
gh_patches_debug_35158
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-527
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restricting/redirecting output of commands to bot-commands for regular users
**Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below).
My idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed.
**Four times !free in python-discussion:**

**Old message for context:**
Now the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels.
I want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`).
Personally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels.
</issue>
<code>
[start of bot/cogs/free.py]
1 import logging
2 from datetime import datetime
3 from operator import itemgetter
4
5 from discord import Colour, Embed, Member, utils
6 from discord.ext.commands import Bot, Cog, Context, command
7
8 from bot.constants import Categories, Channels, Free, STAFF_ROLES
9 from bot.decorators import redirect_output
10
11 log = logging.getLogger(__name__)
12
13 TIMEOUT = Free.activity_timeout
14 RATE = Free.cooldown_rate
15 PER = Free.cooldown_per
16
17
18 class Free(Cog):
19 """Tries to figure out which help channels are free."""
20
21 PYTHON_HELP_ID = Categories.python_help
22
23 @command(name="free", aliases=('f',))
24 @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)
25 async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:
26 """
27 Lists free help channels by likeliness of availability.
28
29 seek is used only when this command is invoked in a help channel.
30 You cannot override seek without mentioning a user first.
31
32 When seek is 2, we are avoiding considering the last active message
33 in a channel to be the one that invoked this command.
34
35 When seek is 3 or more, a user has been mentioned on the assumption
36 that they asked if the channel is free or they asked their question
37 in an active channel, and we want the message before that happened.
38 """
39 free_channels = []
40 python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)
41
42 if user is not None and seek == 2:
43 seek = 3
44 elif not 0 < seek < 10:
45 seek = 3
46
47 # Iterate through all the help channels
48 # to check latest activity
49 for channel in python_help.channels:
50 # Seek further back in the help channel
51 # the command was invoked in
52 if channel.id == ctx.channel.id:
53 messages = await channel.history(limit=seek).flatten()
54 msg = messages[seek - 1]
55 # Otherwise get last message
56 else:
57 msg = await channel.history(limit=1).next() # noqa (False positive)
58
59 inactive = (datetime.utcnow() - msg.created_at).seconds
60 if inactive > TIMEOUT:
61 free_channels.append((inactive, channel))
62
63 embed = Embed()
64 embed.colour = Colour.blurple()
65 embed.title = "**Looking for a free help channel?**"
66
67 if user is not None:
68 embed.description = f"**Hey {user.mention}!**\n\n"
69 else:
70 embed.description = ""
71
72 # Display all potentially inactive channels
73 # in descending order of inactivity
74 if free_channels:
75 embed.description += "**The following channel{0} look{1} free:**\n\n**".format(
76 's' if len(free_channels) > 1 else '',
77 '' if len(free_channels) > 1 else 's'
78 )
79
80 # Sort channels in descending order by seconds
81 # Get position in list, inactivity, and channel object
82 # For each channel, add to embed.description
83 sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)
84 for i, (inactive, channel) in enumerate(sorted_channels, 1):
85 minutes, seconds = divmod(inactive, 60)
86 if minutes > 59:
87 hours, minutes = divmod(minutes, 60)
88 embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n"
89 else:
90 embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n"
91
92 embed.description += ("**\nThese channels aren't guaranteed to be free, "
93 "so use your best judgement and check for yourself.")
94 else:
95 embed.description = ("**Doesn't look like any channels are available right now. "
96 "You're welcome to check for yourself to be sure. "
97 "If all channels are truly busy, please be patient "
98 "as one will likely be available soon.**")
99
100 await ctx.send(embed=embed)
101
102
103 def setup(bot: Bot) -> None:
104 """Free cog load."""
105 bot.add_cog(Free())
106 log.info("Cog loaded: Free")
107
[end of bot/cogs/free.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/cogs/free.py b/bot/cogs/free.py
--- a/bot/cogs/free.py
+++ b/bot/cogs/free.py
@@ -72,30 +72,27 @@
# Display all potentially inactive channels
# in descending order of inactivity
if free_channels:
- embed.description += "**The following channel{0} look{1} free:**\n\n**".format(
- 's' if len(free_channels) > 1 else '',
- '' if len(free_channels) > 1 else 's'
- )
-
# Sort channels in descending order by seconds
# Get position in list, inactivity, and channel object
# For each channel, add to embed.description
sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)
- for i, (inactive, channel) in enumerate(sorted_channels, 1):
+
+ for (inactive, channel) in sorted_channels[:3]:
minutes, seconds = divmod(inactive, 60)
if minutes > 59:
hours, minutes = divmod(minutes, 60)
- embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n"
+ embed.description += f"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\n"
else:
- embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n"
+ embed.description += f"{channel.mention} **{minutes}m {seconds}s** inactive\n"
- embed.description += ("**\nThese channels aren't guaranteed to be free, "
- "so use your best judgement and check for yourself.")
+ embed.set_footer(text="Please confirm these channels are free before posting")
else:
- embed.description = ("**Doesn't look like any channels are available right now. "
- "You're welcome to check for yourself to be sure. "
- "If all channels are truly busy, please be patient "
- "as one will likely be available soon.**")
+ embed.description = (
+ "Doesn't look like any channels are available right now. "
+ "You're welcome to check for yourself to be sure. "
+ "If all channels are truly busy, please be patient "
+ "as one will likely be available soon."
+ )
await ctx.send(embed=embed)
|
{"golden_diff": "diff --git a/bot/cogs/free.py b/bot/cogs/free.py\n--- a/bot/cogs/free.py\n+++ b/bot/cogs/free.py\n@@ -72,30 +72,27 @@\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n- embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n- 's' if len(free_channels) > 1 else '',\n- '' if len(free_channels) > 1 else 's'\n- )\n-\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n- for i, (inactive, channel) in enumerate(sorted_channels, 1):\n+\n+ for (inactive, channel) in sorted_channels[:3]:\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n- embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\\n\"\n else:\n- embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{minutes}m {seconds}s** inactive\\n\"\n \n- embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n- \"so use your best judgement and check for yourself.\")\n+ embed.set_footer(text=\"Please confirm these channels are free before posting\")\n else:\n- embed.description = (\"**Doesn't look like any channels are available right now. \"\n- \"You're welcome to check for yourself to be sure. \"\n- \"If all channels are truly busy, please be patient \"\n- \"as one will likely be available soon.**\")\n+ embed.description = (\n+ \"Doesn't look like any channels are available right now. \"\n+ \"You're welcome to check for yourself to be sure. \"\n+ \"If all channels are truly busy, please be patient \"\n+ \"as one will likely be available soon.\"\n+ )\n \n await ctx.send(embed=embed)\n", "issue": "Restricting/redirecting output of commands to bot-commands for regular users\n**Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below).\r\n\r\nMy idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed.\r\n\r\n**Four times !free in python-discussion:**\r\n\r\n\r\n**Old message for context:**\r\nNow the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels.\r\n\r\nI want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`).\r\n\r\nPersonally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels.\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\nfrom operator import itemgetter\n\nfrom discord import Colour, Embed, Member, utils\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.constants import Categories, Channels, Free, STAFF_ROLES\nfrom bot.decorators import redirect_output\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT = Free.activity_timeout\nRATE = Free.cooldown_rate\nPER = Free.cooldown_per\n\n\nclass Free(Cog):\n \"\"\"Tries to figure out which help channels are free.\"\"\"\n\n PYTHON_HELP_ID = Categories.python_help\n\n @command(name=\"free\", aliases=('f',))\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:\n \"\"\"\n Lists free help channels by likeliness of availability.\n\n seek is used only when this command is invoked in a help channel.\n You cannot override seek without mentioning a user first.\n\n When seek is 2, we are avoiding considering the last active message\n in a channel to be the one that invoked this command.\n\n When seek is 3 or more, a user has been mentioned on the assumption\n that they asked if the channel is free or they asked their question\n in an active channel, and we want the message before that happened.\n \"\"\"\n free_channels = []\n python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)\n\n if user is not None and seek == 2:\n seek = 3\n elif not 0 < seek < 10:\n seek = 3\n\n # Iterate through all the help channels\n # to check latest activity\n for channel in python_help.channels:\n # Seek further back in the help channel\n # the command was invoked in\n if channel.id == ctx.channel.id:\n messages = await channel.history(limit=seek).flatten()\n msg = messages[seek - 1]\n # Otherwise get last message\n else:\n msg = await channel.history(limit=1).next() # noqa (False positive)\n\n inactive = (datetime.utcnow() - msg.created_at).seconds\n if inactive > TIMEOUT:\n free_channels.append((inactive, channel))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = \"**Looking for a free help channel?**\"\n\n if user is not None:\n embed.description = f\"**Hey {user.mention}!**\\n\\n\"\n else:\n embed.description = \"\"\n\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n 's' if len(free_channels) > 1 else '',\n '' if len(free_channels) > 1 else 's'\n )\n\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n for i, (inactive, channel) in enumerate(sorted_channels, 1):\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n else:\n embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n\n embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n \"so use your best judgement and check for yourself.\")\n else:\n embed.description = (\"**Doesn't look like any channels are available right now. \"\n \"You're welcome to check for yourself to be sure. \"\n \"If all channels are truly busy, please be patient \"\n \"as one will likely be available soon.**\")\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Free cog load.\"\"\"\n bot.add_cog(Free())\n log.info(\"Cog loaded: Free\")\n", "path": "bot/cogs/free.py"}]}
| 2,121 | 551 |
gh_patches_debug_2246
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-3408
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
st2kv does not resolve in Jinja cast expression
In the rule:
` {{ "2" | int }} - 2` Dah
`{{ trigger.count | int }} -> 2` OK
`{{ st2kv.system.count | int }} -> 0` Wrong.
</issue>
<code>
[start of st2common/st2common/services/keyvalues.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from st2common import log as logging
17
18 from st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE
19 from st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE
20 from st2common.constants.keyvalue import ALLOWED_SCOPES
21 from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR
22 from st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException
23 from st2common.models.system.keyvalue import UserKeyReference
24 from st2common.persistence.keyvalue import KeyValuePair
25
26 __all__ = [
27 'get_kvp_for_name',
28 'get_values_for_names',
29
30 'KeyValueLookup',
31 'UserKeyValueLookup'
32 ]
33
34 LOG = logging.getLogger(__name__)
35
36
37 def get_kvp_for_name(name):
38 try:
39 kvp_db = KeyValuePair.get_by_name(name)
40 except ValueError:
41 kvp_db = None
42
43 return kvp_db
44
45
46 def get_values_for_names(names, default_value=None):
47 """
48 Retrieve values for the provided key names (multi get).
49
50 If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain
51 default_value for that name.
52
53 :rtype: ``dict``
54 """
55 result = {}
56 kvp_dbs = KeyValuePair.get_by_names(names=names)
57
58 name_to_kvp_db_map = {}
59 for kvp_db in kvp_dbs:
60 name_to_kvp_db_map[kvp_db.name] = kvp_db.value
61
62 for name in names:
63 result[name] = name_to_kvp_db_map.get(name, default_value)
64
65 return result
66
67
68 class KeyValueLookup(object):
69
70 def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):
71 if not scope:
72 scope = FULL_SYSTEM_SCOPE
73
74 if scope == SYSTEM_SCOPE:
75 scope = FULL_SYSTEM_SCOPE
76
77 self._prefix = prefix
78 self._key_prefix = key_prefix or ''
79 self._value_cache = cache or {}
80 self._scope = scope
81
82 def __str__(self):
83 return self._value_cache[self._key_prefix]
84
85 def __getitem__(self, key):
86 return self._get(key)
87
88 def __getattr__(self, name):
89 return self._get(name)
90
91 def _get(self, name):
92 # get the value for this key and save in value_cache
93 if self._key_prefix:
94 key = '%s.%s' % (self._key_prefix, name)
95 else:
96 key = name
97
98 if self._prefix:
99 kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
100 else:
101 kvp_key = key
102
103 value = self._get_kv(kvp_key)
104 self._value_cache[key] = value
105 # return a KeyValueLookup as response since the lookup may not be complete e.g. if
106 # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
107 # will expect to do a dictionary style lookup for key_base and key_value as subsequent
108 # calls. Saving the value in cache avoids extra DB calls.
109 return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,
110 scope=self._scope)
111
112 def _get_kv(self, key):
113 scope = self._scope
114 LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)
115 kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
116 if kvp:
117 LOG.debug('Got value %s from datastore.', kvp.value)
118 return kvp.value if kvp else ''
119
120
121 class UserKeyValueLookup(object):
122
123 def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):
124 if not scope:
125 scope = FULL_USER_SCOPE
126
127 if scope == USER_SCOPE:
128 scope = FULL_USER_SCOPE
129
130 self._prefix = prefix
131 self._key_prefix = key_prefix or ''
132 self._value_cache = cache or {}
133 self._user = user
134 self._scope = scope
135
136 def __str__(self):
137 return self._value_cache[self._key_prefix]
138
139 def __getitem__(self, key):
140 return self._get(key)
141
142 def __getattr__(self, name):
143 return self._get(name)
144
145 def _get(self, name):
146 # get the value for this key and save in value_cache
147 if self._key_prefix:
148 key = '%s.%s' % (self._key_prefix, name)
149 else:
150 key = UserKeyReference(name=name, user=self._user).ref
151
152 if self._prefix:
153 kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
154 else:
155 kvp_key = key
156
157 value = self._get_kv(kvp_key)
158 self._value_cache[key] = value
159 # return a KeyValueLookup as response since the lookup may not be complete e.g. if
160 # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
161 # will expect to do a dictionary style lookup for key_base and key_value as subsequent
162 # calls. Saving the value in cache avoids extra DB calls.
163 return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,
164 cache=self._value_cache, scope=self._scope)
165
166 def _get_kv(self, key):
167 scope = self._scope
168 kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
169 return kvp.value if kvp else ''
170
171
172 def get_key_reference(scope, name, user=None):
173 """
174 Given a key name and user this method returns a new name (string ref)
175 to address the key value pair in the context of that user.
176
177 :param user: User to whom key belongs.
178 :type name: ``str``
179
180 :param name: Original name of the key.
181 :type name: ``str``
182
183 :rtype: ``str``
184 """
185 if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):
186 return name
187 elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):
188 if not user:
189 raise InvalidUserException('A valid user must be specified for user key ref.')
190 return UserKeyReference(name=name, user=user).ref
191 else:
192 raise InvalidScopeException('Scope "%s" is not valid. Allowed scopes are %s.' %
193 (scope, ALLOWED_SCOPES))
194
[end of st2common/st2common/services/keyvalues.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/services/keyvalues.py b/st2common/st2common/services/keyvalues.py
--- a/st2common/st2common/services/keyvalues.py
+++ b/st2common/st2common/services/keyvalues.py
@@ -82,6 +82,12 @@
def __str__(self):
return self._value_cache[self._key_prefix]
+ def __int__(self):
+ return int(float(self))
+
+ def __float__(self):
+ return float(str(self))
+
def __getitem__(self, key):
return self._get(key)
|
{"golden_diff": "diff --git a/st2common/st2common/services/keyvalues.py b/st2common/st2common/services/keyvalues.py\n--- a/st2common/st2common/services/keyvalues.py\n+++ b/st2common/st2common/services/keyvalues.py\n@@ -82,6 +82,12 @@\n def __str__(self):\n return self._value_cache[self._key_prefix]\n \n+ def __int__(self):\n+ return int(float(self))\n+\n+ def __float__(self):\n+ return float(str(self))\n+\n def __getitem__(self, key):\n return self._get(key)\n", "issue": "st2kv does not resolve in Jinja cast expression\nIn the rule: \r\n` {{ \"2\" | int }} - 2` Dah\r\n`{{ trigger.count | int }} -> 2` OK\r\n`{{ st2kv.system.count | int }} -> 0` Wrong.\r\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common import log as logging\n\nfrom st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE\nfrom st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE\nfrom st2common.constants.keyvalue import ALLOWED_SCOPES\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR\nfrom st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException\nfrom st2common.models.system.keyvalue import UserKeyReference\nfrom st2common.persistence.keyvalue import KeyValuePair\n\n__all__ = [\n 'get_kvp_for_name',\n 'get_values_for_names',\n\n 'KeyValueLookup',\n 'UserKeyValueLookup'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_kvp_for_name(name):\n try:\n kvp_db = KeyValuePair.get_by_name(name)\n except ValueError:\n kvp_db = None\n\n return kvp_db\n\n\ndef get_values_for_names(names, default_value=None):\n \"\"\"\n Retrieve values for the provided key names (multi get).\n\n If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain\n default_value for that name.\n\n :rtype: ``dict``\n \"\"\"\n result = {}\n kvp_dbs = KeyValuePair.get_by_names(names=names)\n\n name_to_kvp_db_map = {}\n for kvp_db in kvp_dbs:\n name_to_kvp_db_map[kvp_db.name] = kvp_db.value\n\n for name in names:\n result[name] = name_to_kvp_db_map.get(name, default_value)\n\n return result\n\n\nclass KeyValueLookup(object):\n\n def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):\n if not scope:\n scope = FULL_SYSTEM_SCOPE\n\n if scope == SYSTEM_SCOPE:\n scope = FULL_SYSTEM_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = name\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,\n scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n if kvp:\n LOG.debug('Got value %s from datastore.', kvp.value)\n return kvp.value if kvp else ''\n\n\nclass UserKeyValueLookup(object):\n\n def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):\n if not scope:\n scope = FULL_USER_SCOPE\n\n if scope == USER_SCOPE:\n scope = FULL_USER_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._user = user\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = UserKeyReference(name=name, user=self._user).ref\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,\n cache=self._value_cache, scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n return kvp.value if kvp else ''\n\n\ndef get_key_reference(scope, name, user=None):\n \"\"\"\n Given a key name and user this method returns a new name (string ref)\n to address the key value pair in the context of that user.\n\n :param user: User to whom key belongs.\n :type name: ``str``\n\n :param name: Original name of the key.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):\n return name\n elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):\n if not user:\n raise InvalidUserException('A valid user must be specified for user key ref.')\n return UserKeyReference(name=name, user=user).ref\n else:\n raise InvalidScopeException('Scope \"%s\" is not valid. Allowed scopes are %s.' %\n (scope, ALLOWED_SCOPES))\n", "path": "st2common/st2common/services/keyvalues.py"}]}
| 2,644 | 132 |
gh_patches_debug_7723
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-5879
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_broadcast_shape is broken when 32 < nargs
xref https://github.com/numpy/numpy/pull/5371
``` python
>>> np.__version__
'1.10.0.dev0+30e3d41'
>>> from numpy.lib.stride_tricks import _broadcast_shape
>>> lst = [np.ones((5, 7, 11))] * 32 + [np.ones((2, 3))] * 32
>>> _broadcast_shape(*lst)
(2, 3)
>>> _broadcast_shape(*lst[::-1]) # process the list in reverse order
(5, 7, 11)
```
In [this line](https://github.com/numpy/numpy/blob/05b5335ecf25e59477956b4f85b9a8edbdf71bcc/numpy/lib/stride_tricks.py#L123) `b` is ultimately converted to an array of shape `1 x 1`; so it will broadcast with the rest of args regardless of their shape.
</issue>
<code>
[start of numpy/lib/stride_tricks.py]
1 """
2 Utilities that manipulate strides to achieve desirable effects.
3
4 An explanation of strides can be found in the "ndarray.rst" file in the
5 NumPy reference guide.
6
7 """
8 from __future__ import division, absolute_import, print_function
9
10 import numpy as np
11
12 __all__ = ['broadcast_to', 'broadcast_arrays']
13
14
15 class DummyArray(object):
16 """Dummy object that just exists to hang __array_interface__ dictionaries
17 and possibly keep alive a reference to a base array.
18 """
19
20 def __init__(self, interface, base=None):
21 self.__array_interface__ = interface
22 self.base = base
23
24
25 def _maybe_view_as_subclass(original_array, new_array):
26 if type(original_array) is not type(new_array):
27 # if input was an ndarray subclass and subclasses were OK,
28 # then view the result as that subclass.
29 new_array = new_array.view(type=type(original_array))
30 # Since we have done something akin to a view from original_array, we
31 # should let the subclass finalize (if it has it implemented, i.e., is
32 # not None).
33 if new_array.__array_finalize__:
34 new_array.__array_finalize__(original_array)
35 return new_array
36
37
38 def as_strided(x, shape=None, strides=None, subok=False):
39 """ Make an ndarray from the given array with the given shape and strides.
40 """
41 # first convert input to array, possibly keeping subclass
42 x = np.array(x, copy=False, subok=subok)
43 interface = dict(x.__array_interface__)
44 if shape is not None:
45 interface['shape'] = tuple(shape)
46 if strides is not None:
47 interface['strides'] = tuple(strides)
48 array = np.asarray(DummyArray(interface, base=x))
49
50 if array.dtype.fields is None and x.dtype.fields is not None:
51 # This should only happen if x.dtype is [('', 'Vx')]
52 array.dtype = x.dtype
53
54 return _maybe_view_as_subclass(x, array)
55
56
57 def _broadcast_to(array, shape, subok, readonly):
58 shape = tuple(shape) if np.iterable(shape) else (shape,)
59 array = np.array(array, copy=False, subok=subok)
60 if not shape and array.shape:
61 raise ValueError('cannot broadcast a non-scalar to a scalar array')
62 if any(size < 0 for size in shape):
63 raise ValueError('all elements of broadcast shape must be non-'
64 'negative')
65 broadcast = np.nditer(
66 (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
67 op_flags=['readonly'], itershape=shape, order='C').itviews[0]
68 result = _maybe_view_as_subclass(array, broadcast)
69 if not readonly and array.flags.writeable:
70 result.flags.writeable = True
71 return result
72
73
74 def broadcast_to(array, shape, subok=False):
75 """Broadcast an array to a new shape.
76
77 Parameters
78 ----------
79 array : array_like
80 The array to broadcast.
81 shape : tuple
82 The shape of the desired array.
83 subok : bool, optional
84 If True, then sub-classes will be passed-through, otherwise
85 the returned array will be forced to be a base-class array (default).
86
87 Returns
88 -------
89 broadcast : array
90 A readonly view on the original array with the given shape. It is
91 typically not contiguous. Furthermore, more than one element of a
92 broadcasted array may refer to a single memory location.
93
94 Raises
95 ------
96 ValueError
97 If the array is not compatible with the new shape according to NumPy's
98 broadcasting rules.
99
100 Notes
101 -----
102 .. versionadded:: 1.10.0
103
104 Examples
105 --------
106 >>> x = np.array([1, 2, 3])
107 >>> np.broadcast_to(x, (3, 3))
108 array([[1, 2, 3],
109 [1, 2, 3],
110 [1, 2, 3]])
111 """
112 return _broadcast_to(array, shape, subok=subok, readonly=True)
113
114
115 def _broadcast_shape(*args):
116 """Returns the shape of the ararys that would result from broadcasting the
117 supplied arrays against each other.
118 """
119 if not args:
120 raise ValueError('must provide at least one argument')
121 if len(args) == 1:
122 # a single argument does not work with np.broadcast
123 return np.asarray(args[0]).shape
124 # use the old-iterator because np.nditer does not handle size 0 arrays
125 # consistently
126 b = np.broadcast(*args[:32])
127 # unfortunately, it cannot handle 32 or more arguments directly
128 for pos in range(32, len(args), 31):
129 b = np.broadcast(b, *args[pos:(pos + 31)])
130 return b.shape
131
132
133 def broadcast_arrays(*args, **kwargs):
134 """
135 Broadcast any number of arrays against each other.
136
137 Parameters
138 ----------
139 `*args` : array_likes
140 The arrays to broadcast.
141
142 subok : bool, optional
143 If True, then sub-classes will be passed-through, otherwise
144 the returned arrays will be forced to be a base-class array (default).
145
146 Returns
147 -------
148 broadcasted : list of arrays
149 These arrays are views on the original arrays. They are typically
150 not contiguous. Furthermore, more than one element of a
151 broadcasted array may refer to a single memory location. If you
152 need to write to the arrays, make copies first.
153
154 Examples
155 --------
156 >>> x = np.array([[1,2,3]])
157 >>> y = np.array([[1],[2],[3]])
158 >>> np.broadcast_arrays(x, y)
159 [array([[1, 2, 3],
160 [1, 2, 3],
161 [1, 2, 3]]), array([[1, 1, 1],
162 [2, 2, 2],
163 [3, 3, 3]])]
164
165 Here is a useful idiom for getting contiguous copies instead of
166 non-contiguous views.
167
168 >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
169 [array([[1, 2, 3],
170 [1, 2, 3],
171 [1, 2, 3]]), array([[1, 1, 1],
172 [2, 2, 2],
173 [3, 3, 3]])]
174
175 """
176 # nditer is not used here to avoid the limit of 32 arrays.
177 # Otherwise, something like the following one-liner would suffice:
178 # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
179 # order='C').itviews
180
181 subok = kwargs.pop('subok', False)
182 if kwargs:
183 raise TypeError('broadcast_arrays() got an unexpected keyword '
184 'argument {}'.format(kwargs.pop()))
185 args = [np.array(_m, copy=False, subok=subok) for _m in args]
186
187 shape = _broadcast_shape(*args)
188
189 if all(array.shape == shape for array in args):
190 # Common case where nothing needs to be broadcasted.
191 return args
192
193 # TODO: consider making the results of broadcast_arrays readonly to match
194 # broadcast_to. This will require a deprecation cycle.
195 return [_broadcast_to(array, shape, subok=subok, readonly=False)
196 for array in args]
197
[end of numpy/lib/stride_tricks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -126,6 +126,10 @@
b = np.broadcast(*args[:32])
# unfortunately, it cannot handle 32 or more arguments directly
for pos in range(32, len(args), 31):
+ # ironically, np.broadcast does not properly handle np.broadcast
+ # objects (it treats them as scalars)
+ # use broadcasting to avoid allocating the full array
+ b = broadcast_to(0, b.shape)
b = np.broadcast(b, *args[pos:(pos + 31)])
return b.shape
|
{"golden_diff": "diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py\n--- a/numpy/lib/stride_tricks.py\n+++ b/numpy/lib/stride_tricks.py\n@@ -126,6 +126,10 @@\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n+ # ironically, np.broadcast does not properly handle np.broadcast\n+ # objects (it treats them as scalars)\n+ # use broadcasting to avoid allocating the full array\n+ b = broadcast_to(0, b.shape)\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n", "issue": "_broadcast_shape is broken when 32 < nargs\nxref https://github.com/numpy/numpy/pull/5371\n\n``` python\n>>> np.__version__\n'1.10.0.dev0+30e3d41'\n>>> from numpy.lib.stride_tricks import _broadcast_shape\n>>> lst = [np.ones((5, 7, 11))] * 32 + [np.ones((2, 3))] * 32\n>>> _broadcast_shape(*lst)\n(2, 3)\n>>> _broadcast_shape(*lst[::-1]) # process the list in reverse order\n(5, 7, 11)\n```\n\nIn [this line](https://github.com/numpy/numpy/blob/05b5335ecf25e59477956b4f85b9a8edbdf71bcc/numpy/lib/stride_tricks.py#L123) `b` is ultimately converted to an array of shape `1 x 1`; so it will broadcast with the rest of args regardless of their shape.\n\n", "before_files": [{"content": "\"\"\"\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the \"ndarray.rst\" file in the\nNumPy reference guide.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n__all__ = ['broadcast_to', 'broadcast_arrays']\n\n\nclass DummyArray(object):\n \"\"\"Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n \"\"\"\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\ndef as_strided(x, shape=None, strides=None, subok=False):\n \"\"\" Make an ndarray from the given array with the given shape and strides.\n \"\"\"\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n\n if array.dtype.fields is None and x.dtype.fields is not None:\n # This should only happen if x.dtype is [('', 'Vx')]\n array.dtype = x.dtype\n\n return _maybe_view_as_subclass(x, array)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Notes\n -----\n .. versionadded:: 1.10.0\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n \"\"\"Returns the shape of the ararys that would result from broadcasting the\n supplied arrays against each other.\n \"\"\"\n if not args:\n raise ValueError('must provide at least one argument')\n if len(args) == 1:\n # a single argument does not work with np.broadcast\n return np.asarray(args[0]).shape\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\ndef broadcast_arrays(*args, **kwargs):\n \"\"\"\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n `*args` : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : list of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you\n need to write to the arrays, make copies first.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[1],[2],[3]])\n >>> np.broadcast_arrays(x, y)\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n \"\"\"\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n subok = kwargs.pop('subok', False)\n if kwargs:\n raise TypeError('broadcast_arrays() got an unexpected keyword '\n 'argument {}'.format(kwargs.pop()))\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n if all(array.shape == shape for array in args):\n # Common case where nothing needs to be broadcasted.\n return args\n\n # TODO: consider making the results of broadcast_arrays readonly to match\n # broadcast_to. This will require a deprecation cycle.\n return [_broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n", "path": "numpy/lib/stride_tricks.py"}]}
| 2,899 | 173 |
gh_patches_debug_11955
|
rasdani/github-patches
|
git_diff
|
MongoEngine__mongoengine-1430
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop Python 2.6 support
For the most relevant discussion about the topic, see #1294.
Plan:
1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying "Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine."
2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6.
Cc @lafrech @gukoff
</issue>
<code>
[start of mongoengine/python_support.py]
1 """Helper functions and types to aid with Python 2.5 - 3 support."""
2
3 import sys
4 import pymongo
5
6
7 if pymongo.version_tuple[0] < 3:
8 IS_PYMONGO_3 = False
9 else:
10 IS_PYMONGO_3 = True
11
12 PY3 = sys.version_info[0] == 3
13
14 if PY3:
15 import codecs
16 from io import BytesIO as StringIO
17
18 # return s converted to binary. b('test') should be equivalent to b'test'
19 def b(s):
20 return codecs.latin_1_encode(s)[0]
21
22 bin_type = bytes
23 txt_type = str
24 else:
25 try:
26 from cStringIO import StringIO
27 except ImportError:
28 from StringIO import StringIO
29
30 # Conversion to binary only necessary in Python 3
31 def b(s):
32 return s
33
34 bin_type = str
35 txt_type = unicode
36
37 str_types = (bin_type, txt_type)
38
[end of mongoengine/python_support.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py
--- a/mongoengine/python_support.py
+++ b/mongoengine/python_support.py
@@ -1,9 +1,22 @@
-"""Helper functions and types to aid with Python 2.5 - 3 support."""
+"""Helper functions and types to aid with Python 2.6 - 3 support."""
import sys
+import warnings
+
import pymongo
+# Show a deprecation warning for people using Python v2.6
+# TODO remove in mongoengine v0.11.0
+if sys.version_info[0] == 2 and sys.version_info[1] == 6:
+ warnings.warn(
+ 'Python v2.6 support is deprecated and is going to be dropped '
+ 'entirely in the upcoming v0.11.0 release. Update your Python '
+ 'version if you want to have access to the latest features and '
+ 'bug fixes in MongoEngine.',
+ DeprecationWarning
+ )
+
if pymongo.version_tuple[0] < 3:
IS_PYMONGO_3 = False
else:
|
{"golden_diff": "diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py\n--- a/mongoengine/python_support.py\n+++ b/mongoengine/python_support.py\n@@ -1,9 +1,22 @@\n-\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n+\"\"\"Helper functions and types to aid with Python 2.6 - 3 support.\"\"\"\n \n import sys\n+import warnings\n+\n import pymongo\n \n \n+# Show a deprecation warning for people using Python v2.6\n+# TODO remove in mongoengine v0.11.0\n+if sys.version_info[0] == 2 and sys.version_info[1] == 6:\n+ warnings.warn(\n+ 'Python v2.6 support is deprecated and is going to be dropped '\n+ 'entirely in the upcoming v0.11.0 release. Update your Python '\n+ 'version if you want to have access to the latest features and '\n+ 'bug fixes in MongoEngine.',\n+ DeprecationWarning\n+ )\n+\n if pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\n else:\n", "issue": "Drop Python 2.6 support\nFor the most relevant discussion about the topic, see #1294.\r\n\r\nPlan:\r\n1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying \"Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine.\"\r\n2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6.\r\n\r\nCc @lafrech @gukoff \n", "before_files": [{"content": "\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n\nimport sys\nimport pymongo\n\n\nif pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\nelse:\n IS_PYMONGO_3 = True\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n import codecs\n from io import BytesIO as StringIO\n\n # return s converted to binary. b('test') should be equivalent to b'test'\n def b(s):\n return codecs.latin_1_encode(s)[0]\n\n bin_type = bytes\n txt_type = str\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n # Conversion to binary only necessary in Python 3\n def b(s):\n return s\n\n bin_type = str\n txt_type = unicode\n\nstr_types = (bin_type, txt_type)\n", "path": "mongoengine/python_support.py"}]}
| 978 | 253 |
gh_patches_debug_11989
|
rasdani/github-patches
|
git_diff
|
sagemath__sage-36173
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unoptimal memory complexity of `sage.matrix.berlekamp`
The code here is unoptimal:
https://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98
For example, the following code uses a lot of memory:
```python
sage: from sage.matrix.berlekamp_massey import berlekamp_massey
sage: p = next_prime(2**64)
sage: ls = [GF(p).random_element() for _ in range(20000)]
sage: berlekamp_massey(ls);
```
To be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables.
### Additional Information
I am fixing it.
### Checklist
- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.
- [X] I have read the documentation and troubleshoot guide
</issue>
<code>
[start of src/sage/matrix/berlekamp_massey.py]
1 """
2 Minimal Polynomials of Linear Recurrence Sequences
3
4 AUTHORS:
5
6 - William Stein
7 """
8 # ****************************************************************************
9 # Copyright (C) 2005 William Stein <wstein@gmail.com>
10 #
11 # Distributed under the terms of the GNU General Public License (GPL)
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 # General Public License for more details.
17 #
18 # The full text of the GPL is available at:
19 #
20 # https://www.gnu.org/licenses/
21 # ****************************************************************************
22
23 import sage.rings.rational_field
24
25
26 def berlekamp_massey(a):
27 r"""
28 Use the Berlekamp-Massey algorithm to find the minimal polynomial
29 of a linear recurrence sequence `a`.
30
31 The minimal polynomial of a linear recurrence `\{a_r\}` is
32 by definition the unique monic polynomial `g`, such that if
33 `\{a_r\}` satisfies a linear recurrence
34 `a_{j+k} + b_{j-1} a_{j-1+k} + \cdots + b_0 a_k=0`
35 (for all `k\geq 0`), then `g` divides the
36 polynomial `x^j + \sum_{i=0}^{j-1} b_i x^i`.
37
38 INPUT:
39
40 - ``a`` -- a list of even length of elements of a field (or domain)
41
42 OUTPUT:
43
44 the minimal polynomial of the sequence, as a polynomial over the
45 field in which the entries of `a` live
46
47 .. WARNING::
48
49 The result is only guaranteed to be correct on the full
50 sequence if there exists a linear recurrence of length less
51 than half the length of `a`.
52
53 EXAMPLES::
54
55 sage: from sage.matrix.berlekamp_massey import berlekamp_massey
56 sage: berlekamp_massey([1,2,1,2,1,2])
57 x^2 - 1
58 sage: berlekamp_massey([GF(7)(1), 19, 1, 19])
59 x^2 + 6
60 sage: berlekamp_massey([2,2,1,2,1,191,393,132])
61 x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673
62 sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari
63 x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9
64
65 TESTS::
66
67 sage: berlekamp_massey("banana")
68 Traceback (most recent call last):
69 ...
70 TypeError: argument must be a list or tuple
71 sage: berlekamp_massey([1,2,5])
72 Traceback (most recent call last):
73 ...
74 ValueError: argument must have an even number of terms
75 """
76 if not isinstance(a, (list, tuple)):
77 raise TypeError("argument must be a list or tuple")
78 if len(a) % 2:
79 raise ValueError("argument must have an even number of terms")
80
81 M = len(a) // 2
82
83 try:
84 K = a[0].parent().fraction_field()
85 except AttributeError:
86 K = sage.rings.rational_field.RationalField()
87 R = K['x']
88 x = R.gen()
89
90 f = {-1: R(a), 0: x**(2 * M)}
91 s = {-1: 1, 0: 0}
92 j = 0
93 while f[j].degree() >= M:
94 j += 1
95 qj, f[j] = f[j - 2].quo_rem(f[j - 1])
96 s[j] = s[j - 2] - qj * s[j - 1]
97 t = s[j].reverse()
98 return ~(t[t.degree()]) * t # make monic (~ is inverse in python)
99
[end of src/sage/matrix/berlekamp_massey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py
--- a/src/sage/matrix/berlekamp_massey.py
+++ b/src/sage/matrix/berlekamp_massey.py
@@ -84,15 +84,11 @@
K = a[0].parent().fraction_field()
except AttributeError:
K = sage.rings.rational_field.RationalField()
- R = K['x']
- x = R.gen()
-
- f = {-1: R(a), 0: x**(2 * M)}
- s = {-1: 1, 0: 0}
- j = 0
- while f[j].degree() >= M:
- j += 1
- qj, f[j] = f[j - 2].quo_rem(f[j - 1])
- s[j] = s[j - 2] - qj * s[j - 1]
- t = s[j].reverse()
- return ~(t[t.degree()]) * t # make monic (~ is inverse in python)
+
+ R, x = K['x'].objgen()
+ f0, f1 = R(a), x**(2 * M)
+ s0, s1 = 1, 0
+ while f1.degree() >= M:
+ f0, (q, f1) = f1, f0.quo_rem(f1)
+ s0, s1 = s1, s0 - q * s1
+ return s1.reverse().monic()
|
{"golden_diff": "diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py\n--- a/src/sage/matrix/berlekamp_massey.py\n+++ b/src/sage/matrix/berlekamp_massey.py\n@@ -84,15 +84,11 @@\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n- R = K['x']\n- x = R.gen()\n-\n- f = {-1: R(a), 0: x**(2 * M)}\n- s = {-1: 1, 0: 0}\n- j = 0\n- while f[j].degree() >= M:\n- j += 1\n- qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n- s[j] = s[j - 2] - qj * s[j - 1]\n- t = s[j].reverse()\n- return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n+\n+ R, x = K['x'].objgen()\n+ f0, f1 = R(a), x**(2 * M)\n+ s0, s1 = 1, 0\n+ while f1.degree() >= M:\n+ f0, (q, f1) = f1, f0.quo_rem(f1)\n+ s0, s1 = s1, s0 - q * s1\n+ return s1.reverse().monic()\n", "issue": "Unoptimal memory complexity of `sage.matrix.berlekamp`\nThe code here is unoptimal:\r\n\r\nhttps://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98\r\n\r\nFor example, the following code uses a lot of memory:\r\n\r\n```python\r\nsage: from sage.matrix.berlekamp_massey import berlekamp_massey\r\nsage: p = next_prime(2**64)\r\nsage: ls = [GF(p).random_element() for _ in range(20000)]\r\nsage: berlekamp_massey(ls);\r\n```\r\n\r\nTo be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables.\r\n\r\n### Additional Information\r\n\r\nI am fixing it.\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.\r\n- [X] I have read the documentation and troubleshoot guide\n", "before_files": [{"content": "\"\"\"\nMinimal Polynomials of Linear Recurrence Sequences\n\nAUTHORS:\n\n- William Stein\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2005 William Stein <wstein@gmail.com>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport sage.rings.rational_field\n\n\ndef berlekamp_massey(a):\n r\"\"\"\n Use the Berlekamp-Massey algorithm to find the minimal polynomial\n of a linear recurrence sequence `a`.\n\n The minimal polynomial of a linear recurrence `\\{a_r\\}` is\n by definition the unique monic polynomial `g`, such that if\n `\\{a_r\\}` satisfies a linear recurrence\n `a_{j+k} + b_{j-1} a_{j-1+k} + \\cdots + b_0 a_k=0`\n (for all `k\\geq 0`), then `g` divides the\n polynomial `x^j + \\sum_{i=0}^{j-1} b_i x^i`.\n\n INPUT:\n\n - ``a`` -- a list of even length of elements of a field (or domain)\n\n OUTPUT:\n\n the minimal polynomial of the sequence, as a polynomial over the\n field in which the entries of `a` live\n\n .. WARNING::\n\n The result is only guaranteed to be correct on the full\n sequence if there exists a linear recurrence of length less\n than half the length of `a`.\n\n EXAMPLES::\n\n sage: from sage.matrix.berlekamp_massey import berlekamp_massey\n sage: berlekamp_massey([1,2,1,2,1,2])\n x^2 - 1\n sage: berlekamp_massey([GF(7)(1), 19, 1, 19])\n x^2 + 6\n sage: berlekamp_massey([2,2,1,2,1,191,393,132])\n x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673\n sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari\n x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9\n\n TESTS::\n\n sage: berlekamp_massey(\"banana\")\n Traceback (most recent call last):\n ...\n TypeError: argument must be a list or tuple\n sage: berlekamp_massey([1,2,5])\n Traceback (most recent call last):\n ...\n ValueError: argument must have an even number of terms\n \"\"\"\n if not isinstance(a, (list, tuple)):\n raise TypeError(\"argument must be a list or tuple\")\n if len(a) % 2:\n raise ValueError(\"argument must have an even number of terms\")\n\n M = len(a) // 2\n\n try:\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n R = K['x']\n x = R.gen()\n\n f = {-1: R(a), 0: x**(2 * M)}\n s = {-1: 1, 0: 0}\n j = 0\n while f[j].degree() >= M:\n j += 1\n qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n s[j] = s[j - 2] - qj * s[j - 1]\n t = s[j].reverse()\n return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n", "path": "src/sage/matrix/berlekamp_massey.py"}]}
| 1,988 | 362 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.