problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_29276
rasdani/github-patches
git_diff
wagtail__wagtail-365
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Searching images without match is not displayed correctly (minor) When searching for images, if you enter a query string which will not return resuls you will get the message Sorry, no images match "" (without the query string inside the ""). Beyon this, search works fine. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/wagtailsearch/views/editorspicks.py` Content: ``` 1 from django.shortcuts import render, redirect, get_object_or_404 2 from django.contrib.auth.decorators import permission_required 3 from django.contrib import messages 4 5 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger 6 from django.utils.translation import ugettext as _ 7 from django.views.decorators.vary import vary_on_headers 8 9 from wagtail.wagtailsearch import models, forms 10 from wagtail.wagtailadmin.forms import SearchForm 11 12 13 @permission_required('wagtailadmin.access_admin') 14 @vary_on_headers('X-Requested-With') 15 def index(request): 16 page = request.GET.get('p', 1) 17 query_string = request.GET.get('q', "") 18 19 queries = models.Query.objects.filter(editors_picks__isnull=False).distinct() 20 21 # Search 22 if query_string: 23 queries = queries.filter(query_string__icontains=query_string) 24 25 # Pagination 26 paginator = Paginator(queries, 20) 27 try: 28 queries = paginator.page(page) 29 except PageNotAnInteger: 30 queries = paginator.page(1) 31 except EmptyPage: 32 queries = paginator.page(paginator.num_pages) 33 34 if request.is_ajax(): 35 return render(request, "wagtailsearch/editorspicks/results.html", { 36 'queries': queries, 37 'query_string': query_string, 38 }) 39 else: 40 return render(request, 'wagtailsearch/editorspicks/index.html', { 41 'queries': queries, 42 'query_string': query_string, 43 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search editor's picks")), 44 }) 45 46 47 def save_editorspicks(query, new_query, editors_pick_formset): 48 # Save 49 if editors_pick_formset.is_valid(): 50 # Set sort_order 51 for i, form in enumerate(editors_pick_formset.ordered_forms): 52 form.instance.sort_order = i 53 54 editors_pick_formset.save() 55 56 # If query was changed, move all editors picks to the new query 57 if query != new_query: 58 editors_pick_formset.get_queryset().update(query=new_query) 59 60 return True 61 else: 62 return False 63 64 65 @permission_required('wagtailadmin.access_admin') 66 def add(request): 67 if request.POST: 68 # Get query 69 query_form = forms.QueryForm(request.POST) 70 if query_form.is_valid(): 71 query = models.Query.get(query_form['query_string'].value()) 72 73 # Save editors picks 74 editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query) 75 if save_editorspicks(query, query, editors_pick_formset): 76 messages.success(request, _("Editor's picks for '{0}' created.").format(query)) 77 return redirect('wagtailsearch_editorspicks_index') 78 else: 79 if len(editors_pick_formset.non_form_errors()): 80 messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted) 81 else: 82 messages.error(request, _("Recommendations have not been created due to errors")) # specific errors will be displayed within form fields 83 else: 84 editors_pick_formset = forms.EditorsPickFormSet() 85 else: 86 query_form = forms.QueryForm() 87 editors_pick_formset = forms.EditorsPickFormSet() 88 89 return render(request, 'wagtailsearch/editorspicks/add.html', { 90 'query_form': query_form, 91 'editors_pick_formset': editors_pick_formset, 92 }) 93 94 95 @permission_required('wagtailadmin.access_admin') 96 def edit(request, query_id): 97 query = get_object_or_404(models.Query, id=query_id) 98 99 if request.POST: 100 # Get query 101 query_form = forms.QueryForm(request.POST) 102 # and the recommendations 103 editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query) 104 105 if query_form.is_valid(): 106 new_query = models.Query.get(query_form['query_string'].value()) 107 108 # Save editors picks 109 if save_editorspicks(query, new_query, editors_pick_formset): 110 messages.success(request, _("Editor's picks for '{0}' updated.").format(new_query)) 111 return redirect('wagtailsearch_editorspicks_index') 112 else: 113 if len(editors_pick_formset.non_form_errors()): 114 messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted) 115 else: 116 messages.error(request, _("Recommendations have not been saved due to errors")) # specific errors will be displayed within form fields 117 118 else: 119 query_form = forms.QueryForm(initial=dict(query_string=query.query_string)) 120 editors_pick_formset = forms.EditorsPickFormSet(instance=query) 121 122 return render(request, 'wagtailsearch/editorspicks/edit.html', { 123 'query_form': query_form, 124 'editors_pick_formset': editors_pick_formset, 125 'query': query, 126 }) 127 128 129 @permission_required('wagtailadmin.access_admin') 130 def delete(request, query_id): 131 query = get_object_or_404(models.Query, id=query_id) 132 133 if request.POST: 134 query.editors_picks.all().delete() 135 messages.success(request, _("Editor's picks deleted.")) 136 return redirect('wagtailsearch_editorspicks_index') 137 138 return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', { 139 'query': query, 140 }) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/wagtailsearch/views/editorspicks.py b/wagtail/wagtailsearch/views/editorspicks.py --- a/wagtail/wagtailsearch/views/editorspicks.py +++ b/wagtail/wagtailsearch/views/editorspicks.py @@ -13,6 +13,7 @@ @permission_required('wagtailadmin.access_admin') @vary_on_headers('X-Requested-With') def index(request): + is_searching = False page = request.GET.get('p', 1) query_string = request.GET.get('q', "") @@ -21,6 +22,7 @@ # Search if query_string: queries = queries.filter(query_string__icontains=query_string) + is_searching = True # Pagination paginator = Paginator(queries, 20) @@ -33,11 +35,13 @@ if request.is_ajax(): return render(request, "wagtailsearch/editorspicks/results.html", { + 'is_searching': is_searching, 'queries': queries, 'query_string': query_string, }) else: return render(request, 'wagtailsearch/editorspicks/index.html', { + 'is_searching': is_searching, 'queries': queries, 'query_string': query_string, 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search editor's picks")),
{"golden_diff": "diff --git a/wagtail/wagtailsearch/views/editorspicks.py b/wagtail/wagtailsearch/views/editorspicks.py\n--- a/wagtail/wagtailsearch/views/editorspicks.py\n+++ b/wagtail/wagtailsearch/views/editorspicks.py\n@@ -13,6 +13,7 @@\n @permission_required('wagtailadmin.access_admin')\n @vary_on_headers('X-Requested-With')\n def index(request):\n+ is_searching = False\n page = request.GET.get('p', 1)\n query_string = request.GET.get('q', \"\")\n \n@@ -21,6 +22,7 @@\n # Search\n if query_string:\n queries = queries.filter(query_string__icontains=query_string)\n+ is_searching = True\n \n # Pagination\n paginator = Paginator(queries, 20)\n@@ -33,11 +35,13 @@\n \n if request.is_ajax():\n return render(request, \"wagtailsearch/editorspicks/results.html\", {\n+ 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n })\n else:\n return render(request, 'wagtailsearch/editorspicks/index.html', {\n+ 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_(\"Search editor's picks\")),\n", "issue": "Searching images without match is not displayed correctly (minor)\nWhen searching for images, if you enter a query string which will not return resuls you will get the message Sorry, no images match \"\" (without the query string inside the \"\"). Beyon this, search works fine.\n\n", "before_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib import messages\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.wagtailsearch import models, forms\nfrom wagtail.wagtailadmin.forms import SearchForm\n\n\n@permission_required('wagtailadmin.access_admin')\n@vary_on_headers('X-Requested-With')\ndef index(request):\n page = request.GET.get('p', 1)\n query_string = request.GET.get('q', \"\")\n\n queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()\n\n # Search\n if query_string:\n queries = queries.filter(query_string__icontains=query_string)\n\n # Pagination\n paginator = Paginator(queries, 20)\n try:\n queries = paginator.page(page)\n except PageNotAnInteger:\n queries = paginator.page(1)\n except EmptyPage:\n queries = paginator.page(paginator.num_pages)\n\n if request.is_ajax():\n return render(request, \"wagtailsearch/editorspicks/results.html\", {\n 'queries': queries,\n 'query_string': query_string,\n })\n else:\n return render(request, 'wagtailsearch/editorspicks/index.html', {\n 'queries': queries,\n 'query_string': query_string,\n 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_(\"Search editor's picks\")),\n })\n\n\ndef save_editorspicks(query, new_query, editors_pick_formset):\n # Save\n if editors_pick_formset.is_valid():\n # Set sort_order\n for i, form in enumerate(editors_pick_formset.ordered_forms):\n form.instance.sort_order = i\n\n editors_pick_formset.save()\n\n # If query was changed, move all editors picks to the new query\n if query != new_query:\n editors_pick_formset.get_queryset().update(query=new_query)\n\n return True\n else:\n return False\n\n\n@permission_required('wagtailadmin.access_admin')\ndef add(request):\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n if query_form.is_valid():\n query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n if save_editorspicks(query, query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' created.\").format(query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been created due to errors\")) # specific errors will be displayed within form fields\n else:\n editors_pick_formset = forms.EditorsPickFormSet()\n else:\n query_form = forms.QueryForm()\n editors_pick_formset = forms.EditorsPickFormSet()\n\n return render(request, 'wagtailsearch/editorspicks/add.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef edit(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n # and the recommendations\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n\n if query_form.is_valid():\n new_query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n if save_editorspicks(query, new_query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' updated.\").format(new_query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been saved due to errors\")) # specific errors will be displayed within form fields\n\n else:\n query_form = forms.QueryForm(initial=dict(query_string=query.query_string))\n editors_pick_formset = forms.EditorsPickFormSet(instance=query)\n\n return render(request, 'wagtailsearch/editorspicks/edit.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n 'query': query,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef delete(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n query.editors_picks.all().delete()\n messages.success(request, _(\"Editor's picks deleted.\"))\n return redirect('wagtailsearch_editorspicks_index')\n\n return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {\n 'query': query,\n })\n", "path": "wagtail/wagtailsearch/views/editorspicks.py"}], "after_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib import messages\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.vary import vary_on_headers\n\nfrom wagtail.wagtailsearch import models, forms\nfrom wagtail.wagtailadmin.forms import SearchForm\n\n\n@permission_required('wagtailadmin.access_admin')\n@vary_on_headers('X-Requested-With')\ndef index(request):\n is_searching = False\n page = request.GET.get('p', 1)\n query_string = request.GET.get('q', \"\")\n\n queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()\n\n # Search\n if query_string:\n queries = queries.filter(query_string__icontains=query_string)\n is_searching = True\n\n # Pagination\n paginator = Paginator(queries, 20)\n try:\n queries = paginator.page(page)\n except PageNotAnInteger:\n queries = paginator.page(1)\n except EmptyPage:\n queries = paginator.page(paginator.num_pages)\n\n if request.is_ajax():\n return render(request, \"wagtailsearch/editorspicks/results.html\", {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n })\n else:\n return render(request, 'wagtailsearch/editorspicks/index.html', {\n 'is_searching': is_searching,\n 'queries': queries,\n 'query_string': query_string,\n 'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_(\"Search editor's picks\")),\n })\n\n\ndef save_editorspicks(query, new_query, editors_pick_formset):\n # Save\n if editors_pick_formset.is_valid():\n # Set sort_order\n for i, form in enumerate(editors_pick_formset.ordered_forms):\n form.instance.sort_order = i\n\n editors_pick_formset.save()\n\n # If query was changed, move all editors picks to the new query\n if query != new_query:\n editors_pick_formset.get_queryset().update(query=new_query)\n\n return True\n else:\n return False\n\n\n@permission_required('wagtailadmin.access_admin')\ndef add(request):\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n if query_form.is_valid():\n query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n if save_editorspicks(query, query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' created.\").format(query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been created due to errors\")) # specific errors will be displayed within form fields\n else:\n editors_pick_formset = forms.EditorsPickFormSet()\n else:\n query_form = forms.QueryForm()\n editors_pick_formset = forms.EditorsPickFormSet()\n\n return render(request, 'wagtailsearch/editorspicks/add.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef edit(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n # Get query\n query_form = forms.QueryForm(request.POST)\n # and the recommendations\n editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)\n\n if query_form.is_valid():\n new_query = models.Query.get(query_form['query_string'].value())\n\n # Save editors picks\n if save_editorspicks(query, new_query, editors_pick_formset):\n messages.success(request, _(\"Editor's picks for '{0}' updated.\").format(new_query))\n return redirect('wagtailsearch_editorspicks_index')\n else:\n if len(editors_pick_formset.non_form_errors()):\n messages.error(request, \" \".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)\n else:\n messages.error(request, _(\"Recommendations have not been saved due to errors\")) # specific errors will be displayed within form fields\n\n else:\n query_form = forms.QueryForm(initial=dict(query_string=query.query_string))\n editors_pick_formset = forms.EditorsPickFormSet(instance=query)\n\n return render(request, 'wagtailsearch/editorspicks/edit.html', {\n 'query_form': query_form,\n 'editors_pick_formset': editors_pick_formset,\n 'query': query,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef delete(request, query_id):\n query = get_object_or_404(models.Query, id=query_id)\n\n if request.POST:\n query.editors_picks.all().delete()\n messages.success(request, _(\"Editor's picks deleted.\"))\n return redirect('wagtailsearch_editorspicks_index')\n\n return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {\n 'query': query,\n })\n", "path": "wagtail/wagtailsearch/views/editorspicks.py"}]}
1,820
326
gh_patches_debug_17026
rasdani/github-patches
git_diff
CTFd__CTFd-530
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade to Font Awesome 5 CTFd 1.1 is meant to be a major front end improvement. Font Awesome 5 is out and I think if there's a time to make this change it would be now. https://fontawesome.com/how-to-use/upgrading-from-4 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/views.py` Content: ``` 1 import os 2 import re 3 4 from flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file 5 from flask.helpers import safe_join 6 from jinja2.exceptions import TemplateNotFound 7 from passlib.hash import bcrypt_sha256 8 9 from CTFd.models import db, Teams, Solves, Awards, Files, Pages 10 from CTFd.utils import cache, markdown 11 from CTFd import utils 12 13 views = Blueprint('views', __name__) 14 15 16 @views.route('/setup', methods=['GET', 'POST']) 17 def setup(): 18 # with app.app_context(): 19 # admin = Teams.query.filter_by(admin=True).first() 20 21 if not utils.is_setup(): 22 if not session.get('nonce'): 23 session['nonce'] = utils.sha512(os.urandom(10)) 24 if request.method == 'POST': 25 ctf_name = request.form['ctf_name'] 26 ctf_name = utils.set_config('ctf_name', ctf_name) 27 28 # CSS 29 css = utils.set_config('start', '') 30 31 # Admin user 32 name = request.form['name'] 33 email = request.form['email'] 34 password = request.form['password'] 35 admin = Teams(name, email, password) 36 admin.admin = True 37 admin.banned = True 38 39 # Index page 40 41 index = """<div class="row"> 42 <div class="col-md-6 offset-md-3"> 43 <img class="w-100 mx-auto d-block" style="max-width: 500px;padding: 50px;padding-top: 14vh;" src="themes/core/static/img/logo.png" /> 44 <h3 class="text-center"> 45 <p>A cool CTF platform from <a href="https://ctfd.io">ctfd.io</a></p> 46 <p>Follow us on social media:</p> 47 <a href="https://twitter.com/ctfdio"><i class="fa fa-twitter fa-2x" aria-hidden="true"></i></a>&nbsp; 48 <a href="https://facebook.com/ctfdio"><i class="fa fa-facebook-official fa-2x" aria-hidden="true"></i></a>&nbsp; 49 <a href="https://github.com/ctfd"><i class="fa fa-github fa-2x" aria-hidden="true"></i></a> 50 </h3> 51 <br> 52 <h4 class="text-center"> 53 <a href="admin">Click here</a> to login and setup your CTF 54 </h4> 55 </div> 56 </div>""".format(request.script_root) 57 58 page = Pages(title=None, route='index', html=index, draft=False) 59 60 # max attempts per challenge 61 max_tries = utils.set_config('max_tries', 0) 62 63 # Start time 64 start = utils.set_config('start', None) 65 end = utils.set_config('end', None) 66 freeze = utils.set_config('freeze', None) 67 68 # Challenges cannot be viewed by unregistered users 69 view_challenges_unregistered = utils.set_config('view_challenges_unregistered', None) 70 71 # Allow/Disallow registration 72 prevent_registration = utils.set_config('prevent_registration', None) 73 74 # Verify emails 75 verify_emails = utils.set_config('verify_emails', None) 76 77 mail_server = utils.set_config('mail_server', None) 78 mail_port = utils.set_config('mail_port', None) 79 mail_tls = utils.set_config('mail_tls', None) 80 mail_ssl = utils.set_config('mail_ssl', None) 81 mail_username = utils.set_config('mail_username', None) 82 mail_password = utils.set_config('mail_password', None) 83 mail_useauth = utils.set_config('mail_useauth', None) 84 85 setup = utils.set_config('setup', True) 86 87 db.session.add(page) 88 db.session.add(admin) 89 db.session.commit() 90 91 session['username'] = admin.name 92 session['id'] = admin.id 93 session['admin'] = admin.admin 94 session['nonce'] = utils.sha512(os.urandom(10)) 95 96 db.session.close() 97 app.setup = False 98 with app.app_context(): 99 cache.clear() 100 101 return redirect(url_for('views.static_html')) 102 return render_template('setup.html', nonce=session.get('nonce')) 103 return redirect(url_for('views.static_html')) 104 105 106 # Custom CSS handler 107 @views.route('/static/user.css') 108 def custom_css(): 109 return Response(utils.get_config('css'), mimetype='text/css') 110 111 112 # Static HTML files 113 @views.route("/", defaults={'template': 'index'}) 114 @views.route("/<path:template>") 115 def static_html(template): 116 page = utils.get_page(template) 117 if page is None: 118 try: 119 return render_template('%s.html' % template) 120 except TemplateNotFound: 121 abort(404) 122 else: 123 if page.auth_required and utils.authed() is False: 124 return redirect(url_for('auth.login', next=request.path)) 125 126 return render_template('page.html', content=markdown(page.html)) 127 128 129 @views.route('/teams', defaults={'page': '1'}) 130 @views.route('/teams/<int:page>') 131 def teams(page): 132 if utils.get_config('workshop_mode'): 133 abort(404) 134 page = abs(int(page)) 135 results_per_page = 50 136 page_start = results_per_page * (page - 1) 137 page_end = results_per_page * (page - 1) + results_per_page 138 139 if utils.get_config('verify_emails'): 140 count = Teams.query.filter_by(verified=True, banned=False).count() 141 teams = Teams.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all() 142 else: 143 count = Teams.query.filter_by(banned=False).count() 144 teams = Teams.query.filter_by(banned=False).slice(page_start, page_end).all() 145 pages = int(count / results_per_page) + (count % results_per_page > 0) 146 return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page) 147 148 149 @views.route('/team', methods=['GET']) 150 def private_team(): 151 if utils.authed(): 152 teamid = session['id'] 153 154 freeze = utils.get_config('freeze') 155 user = Teams.query.filter_by(id=teamid).first_or_404() 156 solves = Solves.query.filter_by(teamid=teamid) 157 awards = Awards.query.filter_by(teamid=teamid) 158 159 place = user.place() 160 score = user.score() 161 162 if freeze: 163 freeze = utils.unix_time_to_utc(freeze) 164 if teamid != session.get('id'): 165 solves = solves.filter(Solves.date < freeze) 166 awards = awards.filter(Awards.date < freeze) 167 168 solves = solves.all() 169 awards = awards.all() 170 171 return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen()) 172 else: 173 return redirect(url_for('auth.login')) 174 175 176 @views.route('/team/<int:teamid>', methods=['GET', 'POST']) 177 def team(teamid): 178 if utils.get_config('workshop_mode'): 179 abort(404) 180 181 if utils.get_config('view_scoreboard_if_utils.authed') and not utils.authed(): 182 return redirect(url_for('auth.login', next=request.path)) 183 errors = [] 184 freeze = utils.get_config('freeze') 185 user = Teams.query.filter_by(id=teamid).first_or_404() 186 solves = Solves.query.filter_by(teamid=teamid) 187 awards = Awards.query.filter_by(teamid=teamid) 188 189 place = user.place() 190 score = user.score() 191 192 if freeze: 193 freeze = utils.unix_time_to_utc(freeze) 194 if teamid != session.get('id'): 195 solves = solves.filter(Solves.date < freeze) 196 awards = awards.filter(Awards.date < freeze) 197 198 solves = solves.all() 199 awards = awards.all() 200 201 db.session.close() 202 203 if utils.hide_scores() and teamid != session.get('id'): 204 errors.append('Scores are currently hidden') 205 206 if errors: 207 return render_template('team.html', team=user, errors=errors) 208 209 if request.method == 'GET': 210 return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen()) 211 elif request.method == 'POST': 212 json = {'solves': []} 213 for x in solves: 214 json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.teamid}) 215 return jsonify(json) 216 217 218 @views.route('/profile', methods=['POST', 'GET']) 219 def profile(): 220 if utils.authed(): 221 if request.method == "POST": 222 errors = [] 223 224 name = request.form.get('name').strip() 225 email = request.form.get('email').strip() 226 website = request.form.get('website').strip() 227 affiliation = request.form.get('affiliation').strip() 228 country = request.form.get('country').strip() 229 230 user = Teams.query.filter_by(id=session['id']).first() 231 232 if not utils.get_config('prevent_name_change'): 233 names = Teams.query.filter_by(name=name).first() 234 name_len = len(request.form['name']) == 0 235 236 emails = Teams.query.filter_by(email=email).first() 237 valid_email = utils.check_email_format(email) 238 239 if utils.check_email_format(name) is True: 240 errors.append('Team name cannot be an email address') 241 242 if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \ 243 (not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)): 244 errors.append("Your old password doesn't match what we have.") 245 if not valid_email: 246 errors.append("That email doesn't look right") 247 if not utils.get_config('prevent_name_change') and names and name != session['username']: 248 errors.append('That team name is already taken') 249 if emails and emails.id != session['id']: 250 errors.append('That email has already been used') 251 if not utils.get_config('prevent_name_change') and name_len: 252 errors.append('Pick a longer team name') 253 if website.strip() and not utils.validate_url(website): 254 errors.append("That doesn't look like a valid URL") 255 256 if len(errors) > 0: 257 return render_template('profile.html', name=name, email=email, website=website, 258 affiliation=affiliation, country=country, errors=errors) 259 else: 260 team = Teams.query.filter_by(id=session['id']).first() 261 if team.name != name: 262 if not utils.get_config('prevent_name_change'): 263 team.name = name 264 session['username'] = team.name 265 if team.email != email.lower(): 266 team.email = email.lower() 267 if utils.get_config('verify_emails'): 268 team.verified = False 269 270 if 'password' in request.form.keys() and not len(request.form['password']) == 0: 271 team.password = bcrypt_sha256.encrypt(request.form.get('password')) 272 team.website = website 273 team.affiliation = affiliation 274 team.country = country 275 db.session.commit() 276 db.session.close() 277 return redirect(url_for('views.profile')) 278 else: 279 user = Teams.query.filter_by(id=session['id']).first() 280 name = user.name 281 email = user.email 282 website = user.website 283 affiliation = user.affiliation 284 country = user.country 285 prevent_name_change = utils.get_config('prevent_name_change') 286 confirm_email = utils.get_config('verify_emails') and not user.verified 287 return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation, 288 country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email) 289 else: 290 return redirect(url_for('auth.login')) 291 292 293 @views.route('/files', defaults={'path': ''}) 294 @views.route('/files/<path:path>') 295 def file_handler(path): 296 f = Files.query.filter_by(location=path).first_or_404() 297 if f.chal: 298 if not utils.is_admin(): 299 if not utils.ctftime(): 300 if utils.view_after_ctf() and utils.ctf_started(): 301 pass 302 else: 303 abort(403) 304 upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER']) 305 return send_file(safe_join(upload_folder, f.location)) 306 307 308 @views.route('/themes/<theme>/static/<path:path>') 309 def themes_handler(theme, path): 310 filename = safe_join(app.root_path, 'themes', theme, 'static', path) 311 if os.path.isfile(filename): 312 return send_file(filename) 313 else: 314 abort(404) 315 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/views.py b/CTFd/views.py --- a/CTFd/views.py +++ b/CTFd/views.py @@ -44,9 +44,9 @@ <h3 class="text-center"> <p>A cool CTF platform from <a href="https://ctfd.io">ctfd.io</a></p> <p>Follow us on social media:</p> - <a href="https://twitter.com/ctfdio"><i class="fa fa-twitter fa-2x" aria-hidden="true"></i></a>&nbsp; - <a href="https://facebook.com/ctfdio"><i class="fa fa-facebook-official fa-2x" aria-hidden="true"></i></a>&nbsp; - <a href="https://github.com/ctfd"><i class="fa fa-github fa-2x" aria-hidden="true"></i></a> + <a href="https://twitter.com/ctfdio"><i class="fab fa-twitter fa-2x" aria-hidden="true"></i></a>&nbsp; + <a href="https://facebook.com/ctfdio"><i class="fab fa-facebook fa-2x" aria-hidden="true"></i></a>&nbsp; + <a href="https://github.com/ctfd"><i class="fab fa-github fa-2x" aria-hidden="true"></i></a> </h3> <br> <h4 class="text-center">
{"golden_diff": "diff --git a/CTFd/views.py b/CTFd/views.py\n--- a/CTFd/views.py\n+++ b/CTFd/views.py\n@@ -44,9 +44,9 @@\n <h3 class=\"text-center\">\n <p>A cool CTF platform from <a href=\"https://ctfd.io\">ctfd.io</a></p>\n <p>Follow us on social media:</p>\n- <a href=\"https://twitter.com/ctfdio\"><i class=\"fa fa-twitter fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n- <a href=\"https://facebook.com/ctfdio\"><i class=\"fa fa-facebook-official fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n- <a href=\"https://github.com/ctfd\"><i class=\"fa fa-github fa-2x\" aria-hidden=\"true\"></i></a>\n+ <a href=\"https://twitter.com/ctfdio\"><i class=\"fab fa-twitter fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n+ <a href=\"https://facebook.com/ctfdio\"><i class=\"fab fa-facebook fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n+ <a href=\"https://github.com/ctfd\"><i class=\"fab fa-github fa-2x\" aria-hidden=\"true\"></i></a>\n </h3>\n <br>\n <h4 class=\"text-center\">\n", "issue": "Upgrade to Font Awesome 5\nCTFd 1.1 is meant to be a major front end improvement. Font Awesome 5 is out and I think if there's a time to make this change it would be now. \r\n\r\nhttps://fontawesome.com/how-to-use/upgrading-from-4\n", "before_files": [{"content": "import os\nimport re\n\nfrom flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file\nfrom flask.helpers import safe_join\nfrom jinja2.exceptions import TemplateNotFound\nfrom passlib.hash import bcrypt_sha256\n\nfrom CTFd.models import db, Teams, Solves, Awards, Files, Pages\nfrom CTFd.utils import cache, markdown\nfrom CTFd import utils\n\nviews = Blueprint('views', __name__)\n\n\n@views.route('/setup', methods=['GET', 'POST'])\ndef setup():\n # with app.app_context():\n # admin = Teams.query.filter_by(admin=True).first()\n\n if not utils.is_setup():\n if not session.get('nonce'):\n session['nonce'] = utils.sha512(os.urandom(10))\n if request.method == 'POST':\n ctf_name = request.form['ctf_name']\n ctf_name = utils.set_config('ctf_name', ctf_name)\n\n # CSS\n css = utils.set_config('start', '')\n\n # Admin user\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n admin = Teams(name, email, password)\n admin.admin = True\n admin.banned = True\n\n # Index page\n\n index = \"\"\"<div class=\"row\">\n <div class=\"col-md-6 offset-md-3\">\n <img class=\"w-100 mx-auto d-block\" style=\"max-width: 500px;padding: 50px;padding-top: 14vh;\" src=\"themes/core/static/img/logo.png\" />\n <h3 class=\"text-center\">\n <p>A cool CTF platform from <a href=\"https://ctfd.io\">ctfd.io</a></p>\n <p>Follow us on social media:</p>\n <a href=\"https://twitter.com/ctfdio\"><i class=\"fa fa-twitter fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n <a href=\"https://facebook.com/ctfdio\"><i class=\"fa fa-facebook-official fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n <a href=\"https://github.com/ctfd\"><i class=\"fa fa-github fa-2x\" aria-hidden=\"true\"></i></a>\n </h3>\n <br>\n <h4 class=\"text-center\">\n <a href=\"admin\">Click here</a> to login and setup your CTF\n </h4>\n </div>\n</div>\"\"\".format(request.script_root)\n\n page = Pages(title=None, route='index', html=index, draft=False)\n\n # max attempts per challenge\n max_tries = utils.set_config('max_tries', 0)\n\n # Start time\n start = utils.set_config('start', None)\n end = utils.set_config('end', None)\n freeze = utils.set_config('freeze', None)\n\n # Challenges cannot be viewed by unregistered users\n view_challenges_unregistered = utils.set_config('view_challenges_unregistered', None)\n\n # Allow/Disallow registration\n prevent_registration = utils.set_config('prevent_registration', None)\n\n # Verify emails\n verify_emails = utils.set_config('verify_emails', None)\n\n mail_server = utils.set_config('mail_server', None)\n mail_port = utils.set_config('mail_port', None)\n mail_tls = utils.set_config('mail_tls', None)\n mail_ssl = utils.set_config('mail_ssl', None)\n mail_username = utils.set_config('mail_username', None)\n mail_password = utils.set_config('mail_password', None)\n mail_useauth = utils.set_config('mail_useauth', None)\n\n setup = utils.set_config('setup', True)\n\n db.session.add(page)\n db.session.add(admin)\n db.session.commit()\n\n session['username'] = admin.name\n session['id'] = admin.id\n session['admin'] = admin.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n\n db.session.close()\n app.setup = False\n with app.app_context():\n cache.clear()\n\n return redirect(url_for('views.static_html'))\n return render_template('setup.html', nonce=session.get('nonce'))\n return redirect(url_for('views.static_html'))\n\n\n# Custom CSS handler\n@views.route('/static/user.css')\ndef custom_css():\n return Response(utils.get_config('css'), mimetype='text/css')\n\n\n# Static HTML files\n@views.route(\"/\", defaults={'template': 'index'})\n@views.route(\"/<path:template>\")\ndef static_html(template):\n page = utils.get_page(template)\n if page is None:\n try:\n return render_template('%s.html' % template)\n except TemplateNotFound:\n abort(404)\n else:\n if page.auth_required and utils.authed() is False:\n return redirect(url_for('auth.login', next=request.path))\n\n return render_template('page.html', content=markdown(page.html))\n\n\n@views.route('/teams', defaults={'page': '1'})\n@views.route('/teams/<int:page>')\ndef teams(page):\n if utils.get_config('workshop_mode'):\n abort(404)\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n if utils.get_config('verify_emails'):\n count = Teams.query.filter_by(verified=True, banned=False).count()\n teams = Teams.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()\n else:\n count = Teams.query.filter_by(banned=False).count()\n teams = Teams.query.filter_by(banned=False).slice(page_start, page_end).all()\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)\n\n\n@views.route('/team', methods=['GET'])\ndef private_team():\n if utils.authed():\n teamid = session['id']\n\n freeze = utils.get_config('freeze')\n user = Teams.query.filter_by(id=teamid).first_or_404()\n solves = Solves.query.filter_by(teamid=teamid)\n awards = Awards.query.filter_by(teamid=teamid)\n\n place = user.place()\n score = user.score()\n\n if freeze:\n freeze = utils.unix_time_to_utc(freeze)\n if teamid != session.get('id'):\n solves = solves.filter(Solves.date < freeze)\n awards = awards.filter(Awards.date < freeze)\n\n solves = solves.all()\n awards = awards.all()\n\n return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())\n else:\n return redirect(url_for('auth.login'))\n\n\n@views.route('/team/<int:teamid>', methods=['GET', 'POST'])\ndef team(teamid):\n if utils.get_config('workshop_mode'):\n abort(404)\n\n if utils.get_config('view_scoreboard_if_utils.authed') and not utils.authed():\n return redirect(url_for('auth.login', next=request.path))\n errors = []\n freeze = utils.get_config('freeze')\n user = Teams.query.filter_by(id=teamid).first_or_404()\n solves = Solves.query.filter_by(teamid=teamid)\n awards = Awards.query.filter_by(teamid=teamid)\n\n place = user.place()\n score = user.score()\n\n if freeze:\n freeze = utils.unix_time_to_utc(freeze)\n if teamid != session.get('id'):\n solves = solves.filter(Solves.date < freeze)\n awards = awards.filter(Awards.date < freeze)\n\n solves = solves.all()\n awards = awards.all()\n\n db.session.close()\n\n if utils.hide_scores() and teamid != session.get('id'):\n errors.append('Scores are currently hidden')\n\n if errors:\n return render_template('team.html', team=user, errors=errors)\n\n if request.method == 'GET':\n return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())\n elif request.method == 'POST':\n json = {'solves': []}\n for x in solves:\n json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.teamid})\n return jsonify(json)\n\n\n@views.route('/profile', methods=['POST', 'GET'])\ndef profile():\n if utils.authed():\n if request.method == \"POST\":\n errors = []\n\n name = request.form.get('name').strip()\n email = request.form.get('email').strip()\n website = request.form.get('website').strip()\n affiliation = request.form.get('affiliation').strip()\n country = request.form.get('country').strip()\n\n user = Teams.query.filter_by(id=session['id']).first()\n\n if not utils.get_config('prevent_name_change'):\n names = Teams.query.filter_by(name=name).first()\n name_len = len(request.form['name']) == 0\n\n emails = Teams.query.filter_by(email=email).first()\n valid_email = utils.check_email_format(email)\n\n if utils.check_email_format(name) is True:\n errors.append('Team name cannot be an email address')\n\n if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \\\n (not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):\n errors.append(\"Your old password doesn't match what we have.\")\n if not valid_email:\n errors.append(\"That email doesn't look right\")\n if not utils.get_config('prevent_name_change') and names and name != session['username']:\n errors.append('That team name is already taken')\n if emails and emails.id != session['id']:\n errors.append('That email has already been used')\n if not utils.get_config('prevent_name_change') and name_len:\n errors.append('Pick a longer team name')\n if website.strip() and not utils.validate_url(website):\n errors.append(\"That doesn't look like a valid URL\")\n\n if len(errors) > 0:\n return render_template('profile.html', name=name, email=email, website=website,\n affiliation=affiliation, country=country, errors=errors)\n else:\n team = Teams.query.filter_by(id=session['id']).first()\n if team.name != name:\n if not utils.get_config('prevent_name_change'):\n team.name = name\n session['username'] = team.name\n if team.email != email.lower():\n team.email = email.lower()\n if utils.get_config('verify_emails'):\n team.verified = False\n\n if 'password' in request.form.keys() and not len(request.form['password']) == 0:\n team.password = bcrypt_sha256.encrypt(request.form.get('password'))\n team.website = website\n team.affiliation = affiliation\n team.country = country\n db.session.commit()\n db.session.close()\n return redirect(url_for('views.profile'))\n else:\n user = Teams.query.filter_by(id=session['id']).first()\n name = user.name\n email = user.email\n website = user.website\n affiliation = user.affiliation\n country = user.country\n prevent_name_change = utils.get_config('prevent_name_change')\n confirm_email = utils.get_config('verify_emails') and not user.verified\n return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,\n country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)\n else:\n return redirect(url_for('auth.login'))\n\n\n@views.route('/files', defaults={'path': ''})\n@views.route('/files/<path:path>')\ndef file_handler(path):\n f = Files.query.filter_by(location=path).first_or_404()\n if f.chal:\n if not utils.is_admin():\n if not utils.ctftime():\n if utils.view_after_ctf() and utils.ctf_started():\n pass\n else:\n abort(403)\n upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'])\n return send_file(safe_join(upload_folder, f.location))\n\n\n@views.route('/themes/<theme>/static/<path:path>')\ndef themes_handler(theme, path):\n filename = safe_join(app.root_path, 'themes', theme, 'static', path)\n if os.path.isfile(filename):\n return send_file(filename)\n else:\n abort(404)\n", "path": "CTFd/views.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom flask import current_app as app, render_template, request, redirect, abort, jsonify, url_for, session, Blueprint, Response, send_file\nfrom flask.helpers import safe_join\nfrom jinja2.exceptions import TemplateNotFound\nfrom passlib.hash import bcrypt_sha256\n\nfrom CTFd.models import db, Teams, Solves, Awards, Files, Pages\nfrom CTFd.utils import cache, markdown\nfrom CTFd import utils\n\nviews = Blueprint('views', __name__)\n\n\n@views.route('/setup', methods=['GET', 'POST'])\ndef setup():\n # with app.app_context():\n # admin = Teams.query.filter_by(admin=True).first()\n\n if not utils.is_setup():\n if not session.get('nonce'):\n session['nonce'] = utils.sha512(os.urandom(10))\n if request.method == 'POST':\n ctf_name = request.form['ctf_name']\n ctf_name = utils.set_config('ctf_name', ctf_name)\n\n # CSS\n css = utils.set_config('start', '')\n\n # Admin user\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n admin = Teams(name, email, password)\n admin.admin = True\n admin.banned = True\n\n # Index page\n\n index = \"\"\"<div class=\"row\">\n <div class=\"col-md-6 offset-md-3\">\n <img class=\"w-100 mx-auto d-block\" style=\"max-width: 500px;padding: 50px;padding-top: 14vh;\" src=\"themes/core/static/img/logo.png\" />\n <h3 class=\"text-center\">\n <p>A cool CTF platform from <a href=\"https://ctfd.io\">ctfd.io</a></p>\n <p>Follow us on social media:</p>\n <a href=\"https://twitter.com/ctfdio\"><i class=\"fab fa-twitter fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n <a href=\"https://facebook.com/ctfdio\"><i class=\"fab fa-facebook fa-2x\" aria-hidden=\"true\"></i></a>&nbsp;\n <a href=\"https://github.com/ctfd\"><i class=\"fab fa-github fa-2x\" aria-hidden=\"true\"></i></a>\n </h3>\n <br>\n <h4 class=\"text-center\">\n <a href=\"admin\">Click here</a> to login and setup your CTF\n </h4>\n </div>\n</div>\"\"\".format(request.script_root)\n\n page = Pages(title=None, route='index', html=index, draft=False)\n\n # max attempts per challenge\n max_tries = utils.set_config('max_tries', 0)\n\n # Start time\n start = utils.set_config('start', None)\n end = utils.set_config('end', None)\n freeze = utils.set_config('freeze', None)\n\n # Challenges cannot be viewed by unregistered users\n view_challenges_unregistered = utils.set_config('view_challenges_unregistered', None)\n\n # Allow/Disallow registration\n prevent_registration = utils.set_config('prevent_registration', None)\n\n # Verify emails\n verify_emails = utils.set_config('verify_emails', None)\n\n mail_server = utils.set_config('mail_server', None)\n mail_port = utils.set_config('mail_port', None)\n mail_tls = utils.set_config('mail_tls', None)\n mail_ssl = utils.set_config('mail_ssl', None)\n mail_username = utils.set_config('mail_username', None)\n mail_password = utils.set_config('mail_password', None)\n mail_useauth = utils.set_config('mail_useauth', None)\n\n setup = utils.set_config('setup', True)\n\n db.session.add(page)\n db.session.add(admin)\n db.session.commit()\n\n session['username'] = admin.name\n session['id'] = admin.id\n session['admin'] = admin.admin\n session['nonce'] = utils.sha512(os.urandom(10))\n\n db.session.close()\n app.setup = False\n with app.app_context():\n cache.clear()\n\n return redirect(url_for('views.static_html'))\n return render_template('setup.html', nonce=session.get('nonce'))\n return redirect(url_for('views.static_html'))\n\n\n# Custom CSS handler\n@views.route('/static/user.css')\ndef custom_css():\n return Response(utils.get_config('css'), mimetype='text/css')\n\n\n# Static HTML files\n@views.route(\"/\", defaults={'template': 'index'})\n@views.route(\"/<path:template>\")\ndef static_html(template):\n page = utils.get_page(template)\n if page is None:\n try:\n return render_template('%s.html' % template)\n except TemplateNotFound:\n abort(404)\n else:\n if page.auth_required and utils.authed() is False:\n return redirect(url_for('auth.login', next=request.path))\n\n return render_template('page.html', content=markdown(page.html))\n\n\n@views.route('/teams', defaults={'page': '1'})\n@views.route('/teams/<int:page>')\ndef teams(page):\n if utils.get_config('workshop_mode'):\n abort(404)\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n if utils.get_config('verify_emails'):\n count = Teams.query.filter_by(verified=True, banned=False).count()\n teams = Teams.query.filter_by(verified=True, banned=False).slice(page_start, page_end).all()\n else:\n count = Teams.query.filter_by(banned=False).count()\n teams = Teams.query.filter_by(banned=False).slice(page_start, page_end).all()\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template('teams.html', teams=teams, team_pages=pages, curr_page=page)\n\n\n@views.route('/team', methods=['GET'])\ndef private_team():\n if utils.authed():\n teamid = session['id']\n\n freeze = utils.get_config('freeze')\n user = Teams.query.filter_by(id=teamid).first_or_404()\n solves = Solves.query.filter_by(teamid=teamid)\n awards = Awards.query.filter_by(teamid=teamid)\n\n place = user.place()\n score = user.score()\n\n if freeze:\n freeze = utils.unix_time_to_utc(freeze)\n if teamid != session.get('id'):\n solves = solves.filter(Solves.date < freeze)\n awards = awards.filter(Awards.date < freeze)\n\n solves = solves.all()\n awards = awards.all()\n\n return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())\n else:\n return redirect(url_for('auth.login'))\n\n\n@views.route('/team/<int:teamid>', methods=['GET', 'POST'])\ndef team(teamid):\n if utils.get_config('workshop_mode'):\n abort(404)\n\n if utils.get_config('view_scoreboard_if_utils.authed') and not utils.authed():\n return redirect(url_for('auth.login', next=request.path))\n errors = []\n freeze = utils.get_config('freeze')\n user = Teams.query.filter_by(id=teamid).first_or_404()\n solves = Solves.query.filter_by(teamid=teamid)\n awards = Awards.query.filter_by(teamid=teamid)\n\n place = user.place()\n score = user.score()\n\n if freeze:\n freeze = utils.unix_time_to_utc(freeze)\n if teamid != session.get('id'):\n solves = solves.filter(Solves.date < freeze)\n awards = awards.filter(Awards.date < freeze)\n\n solves = solves.all()\n awards = awards.all()\n\n db.session.close()\n\n if utils.hide_scores() and teamid != session.get('id'):\n errors.append('Scores are currently hidden')\n\n if errors:\n return render_template('team.html', team=user, errors=errors)\n\n if request.method == 'GET':\n return render_template('team.html', solves=solves, awards=awards, team=user, score=score, place=place, score_frozen=utils.is_scoreboard_frozen())\n elif request.method == 'POST':\n json = {'solves': []}\n for x in solves:\n json['solves'].append({'id': x.id, 'chal': x.chalid, 'team': x.teamid})\n return jsonify(json)\n\n\n@views.route('/profile', methods=['POST', 'GET'])\ndef profile():\n if utils.authed():\n if request.method == \"POST\":\n errors = []\n\n name = request.form.get('name').strip()\n email = request.form.get('email').strip()\n website = request.form.get('website').strip()\n affiliation = request.form.get('affiliation').strip()\n country = request.form.get('country').strip()\n\n user = Teams.query.filter_by(id=session['id']).first()\n\n if not utils.get_config('prevent_name_change'):\n names = Teams.query.filter_by(name=name).first()\n name_len = len(request.form['name']) == 0\n\n emails = Teams.query.filter_by(email=email).first()\n valid_email = utils.check_email_format(email)\n\n if utils.check_email_format(name) is True:\n errors.append('Team name cannot be an email address')\n\n if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \\\n (not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):\n errors.append(\"Your old password doesn't match what we have.\")\n if not valid_email:\n errors.append(\"That email doesn't look right\")\n if not utils.get_config('prevent_name_change') and names and name != session['username']:\n errors.append('That team name is already taken')\n if emails and emails.id != session['id']:\n errors.append('That email has already been used')\n if not utils.get_config('prevent_name_change') and name_len:\n errors.append('Pick a longer team name')\n if website.strip() and not utils.validate_url(website):\n errors.append(\"That doesn't look like a valid URL\")\n\n if len(errors) > 0:\n return render_template('profile.html', name=name, email=email, website=website,\n affiliation=affiliation, country=country, errors=errors)\n else:\n team = Teams.query.filter_by(id=session['id']).first()\n if team.name != name:\n if not utils.get_config('prevent_name_change'):\n team.name = name\n session['username'] = team.name\n if team.email != email.lower():\n team.email = email.lower()\n if utils.get_config('verify_emails'):\n team.verified = False\n\n if 'password' in request.form.keys() and not len(request.form['password']) == 0:\n team.password = bcrypt_sha256.encrypt(request.form.get('password'))\n team.website = website\n team.affiliation = affiliation\n team.country = country\n db.session.commit()\n db.session.close()\n return redirect(url_for('views.profile'))\n else:\n user = Teams.query.filter_by(id=session['id']).first()\n name = user.name\n email = user.email\n website = user.website\n affiliation = user.affiliation\n country = user.country\n prevent_name_change = utils.get_config('prevent_name_change')\n confirm_email = utils.get_config('verify_emails') and not user.verified\n return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,\n country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)\n else:\n return redirect(url_for('auth.login'))\n\n\n@views.route('/files', defaults={'path': ''})\n@views.route('/files/<path:path>')\ndef file_handler(path):\n f = Files.query.filter_by(location=path).first_or_404()\n if f.chal:\n if not utils.is_admin():\n if not utils.ctftime():\n if utils.view_after_ctf() and utils.ctf_started():\n pass\n else:\n abort(403)\n upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'])\n return send_file(safe_join(upload_folder, f.location))\n\n\n@views.route('/themes/<theme>/static/<path:path>')\ndef themes_handler(theme, path):\n filename = safe_join(app.root_path, 'themes', theme, 'static', path)\n if os.path.isfile(filename):\n return send_file(filename)\n else:\n abort(404)\n", "path": "CTFd/views.py"}]}
3,959
330
gh_patches_debug_686
rasdani/github-patches
git_diff
projectmesa__mesa-398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- error launching Flocker I've Anaconda with python 3.6 & Mesa 0.8.1 I launch Flocker's run.py and I get this error: ``` Flockers e$ python run.py Traceback (most recent call last): File "run.py", line 1, in <module> from flockers.server import server File "/Users/e/Dropbox/devlib/notebooks/mesa-master/examples/Flockers/flockers/server.py", line 20, in <module> server = ModularServer(BoidModel, [boid_canvas], "Boids", model_params) File "/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py", line 287, in __init__ self.reset_model() File "/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py", line 313, in reset_model self.model = self.model_cls(**model_params) TypeError: __init__() got an unexpected keyword argument 'N' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/Flockers/flockers/server.py` Content: ``` 1 from mesa.visualization.ModularVisualization import ModularServer 2 3 from .model import BoidModel 4 from .SimpleContinuousModule import SimpleCanvas 5 6 7 def boid_draw(agent): 8 return {"Shape": "circle", "r": 2, "Filled": "true", "Color": "Red"} 9 10 boid_canvas = SimpleCanvas(boid_draw, 500, 500) 11 model_params = { 12 "N": 100, 13 "width": 100, 14 "height": 100, 15 "speed": 5, 16 "vision": 10, 17 "separation": 2 18 } 19 20 server = ModularServer(BoidModel, [boid_canvas], "Boids", model_params) 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/Flockers/flockers/server.py b/examples/Flockers/flockers/server.py --- a/examples/Flockers/flockers/server.py +++ b/examples/Flockers/flockers/server.py @@ -9,7 +9,7 @@ boid_canvas = SimpleCanvas(boid_draw, 500, 500) model_params = { - "N": 100, + "population": 100, "width": 100, "height": 100, "speed": 5,
{"golden_diff": "diff --git a/examples/Flockers/flockers/server.py b/examples/Flockers/flockers/server.py\n--- a/examples/Flockers/flockers/server.py\n+++ b/examples/Flockers/flockers/server.py\n@@ -9,7 +9,7 @@\n \n boid_canvas = SimpleCanvas(boid_draw, 500, 500)\n model_params = {\n- \"N\": 100,\n+ \"population\": 100,\n \"width\": 100,\n \"height\": 100,\n \"speed\": 5,\n", "issue": "error launching Flocker\nI've Anaconda with python 3.6 & Mesa 0.8.1\r\n\r\nI launch Flocker's run.py and I get this error:\r\n```\r\nFlockers e$ python run.py\r\nTraceback (most recent call last):\r\n File \"run.py\", line 1, in <module>\r\n from flockers.server import server\r\n File \"/Users/e/Dropbox/devlib/notebooks/mesa-master/examples/Flockers/flockers/server.py\", line 20, in <module>\r\n server = ModularServer(BoidModel, [boid_canvas], \"Boids\", model_params)\r\n File \"/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py\", line 287, in __init__\r\n self.reset_model()\r\n File \"/Users/e/anaconda3/lib/python3.6/site-packages/mesa/visualization/ModularVisualization.py\", line 313, in reset_model\r\n self.model = self.model_cls(**model_params)\r\nTypeError: __init__() got an unexpected keyword argument 'N'\r\n```\n", "before_files": [{"content": "from mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import BoidModel\nfrom .SimpleContinuousModule import SimpleCanvas\n\n\ndef boid_draw(agent):\n return {\"Shape\": \"circle\", \"r\": 2, \"Filled\": \"true\", \"Color\": \"Red\"}\n\nboid_canvas = SimpleCanvas(boid_draw, 500, 500)\nmodel_params = {\n \"N\": 100,\n \"width\": 100,\n \"height\": 100,\n \"speed\": 5,\n \"vision\": 10,\n \"separation\": 2\n}\n\nserver = ModularServer(BoidModel, [boid_canvas], \"Boids\", model_params)\n", "path": "examples/Flockers/flockers/server.py"}], "after_files": [{"content": "from mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import BoidModel\nfrom .SimpleContinuousModule import SimpleCanvas\n\n\ndef boid_draw(agent):\n return {\"Shape\": \"circle\", \"r\": 2, \"Filled\": \"true\", \"Color\": \"Red\"}\n\nboid_canvas = SimpleCanvas(boid_draw, 500, 500)\nmodel_params = {\n \"population\": 100,\n \"width\": 100,\n \"height\": 100,\n \"speed\": 5,\n \"vision\": 10,\n \"separation\": 2\n}\n\nserver = ModularServer(BoidModel, [boid_canvas], \"Boids\", model_params)\n", "path": "examples/Flockers/flockers/server.py"}]}
692
128
gh_patches_debug_39717
rasdani/github-patches
git_diff
kserve__kserve-156
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- KFServing should have a consistent way of supporting model download across inference server implementations /kind feature **Describe the solution you'd like** KFServing should expose a consistent way to download models across inference servers and clouds. The current implementation depends on the features of individual inference servers expose. E.g. see #137 **Anything else you would like to add:** Proposed solution design is documented here: https://docs.google.com/document/d/1xqBOkoQ6Vzc5gv4O5MgVVNE3qILbKuMkC-DN5zp5w28/edit?usp=sharing --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kfserving/kfserving/storage.py` Content: ``` 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 import tempfile 17 import os 18 import re 19 from minio import Minio 20 from google.cloud import storage 21 from google.auth import exceptions 22 23 _GCS_PREFIX = "gs://" 24 _S3_PREFIX = "s3://" 25 _LOCAL_PREFIX = "file://" 26 27 28 class Storage(object): # pylint: disable=too-few-public-methods 29 @staticmethod 30 def download(uri: str) -> str: 31 logging.info("Copying contents of %s to local", uri) 32 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri): 33 return Storage._download_local(uri) 34 35 temp_dir = tempfile.mkdtemp() 36 if uri.startswith(_GCS_PREFIX): 37 Storage._download_gcs(uri, temp_dir) 38 elif uri.startswith(_S3_PREFIX): 39 Storage._download_s3(uri, temp_dir) 40 else: 41 raise Exception("Cannot recognize storage type for " + uri + 42 "\n'%s', '%s', and '%s' are the current available storage type." % 43 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX)) 44 45 logging.info("Successfully copied %s to %s", uri, temp_dir) 46 return temp_dir 47 48 @staticmethod 49 def _download_s3(uri, temp_dir: str): 50 client = Storage._create_minio_client() 51 bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1) 52 bucket_name = bucket_args[0] 53 bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" 54 objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True) 55 for obj in objects: 56 # Replace any prefix from the object key with temp_dir 57 subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/") 58 client.fget_object(bucket_name, obj.object_name, 59 os.path.join(temp_dir, subdir_object_key)) 60 61 @staticmethod 62 def _download_gcs(uri, temp_dir: str): 63 try: 64 storage_client = storage.Client() 65 except exceptions.DefaultCredentialsError: 66 storage_client = storage.Client.create_anonymous_client() 67 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1) 68 bucket_name = bucket_args[0] 69 bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" 70 bucket = storage_client.bucket(bucket_name) 71 blobs = bucket.list_blobs(prefix=bucket_path) 72 for blob in blobs: 73 # Replace any prefix from the object key with temp_dir 74 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/") 75 # Create necessary subdirectory to store the object locally 76 if "/" in subdir_object_key: 77 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0]) 78 if not os.path.isdir(local_object_dir): 79 os.makedirs(local_object_dir, exist_ok=True) 80 blob.download_to_filename(os.path.join(temp_dir, subdir_object_key)) 81 82 @staticmethod 83 def _download_local(uri): 84 local_path = uri.replace(_LOCAL_PREFIX, "", 1) 85 if not os.path.exists(local_path): 86 raise Exception("Local path %s does not exist." % (uri)) 87 return local_path 88 89 @staticmethod 90 def _create_minio_client(): 91 # Remove possible http scheme for Minio 92 url = re.compile(r"https?://") 93 minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")), 94 access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), 95 secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), 96 secure=True) 97 return minioClient 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py --- a/python/kfserving/kfserving/storage.py +++ b/python/kfserving/kfserving/storage.py @@ -27,23 +27,25 @@ class Storage(object): # pylint: disable=too-few-public-methods @staticmethod - def download(uri: str) -> str: + def download(uri: str, out_dir: str = None) -> str: logging.info("Copying contents of %s to local", uri) if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri): return Storage._download_local(uri) - temp_dir = tempfile.mkdtemp() + if out_dir is None: + out_dir = tempfile.mkdtemp() + if uri.startswith(_GCS_PREFIX): - Storage._download_gcs(uri, temp_dir) + Storage._download_gcs(uri, out_dir) elif uri.startswith(_S3_PREFIX): - Storage._download_s3(uri, temp_dir) + Storage._download_s3(uri, out_dir) else: raise Exception("Cannot recognize storage type for " + uri + "\n'%s', '%s', and '%s' are the current available storage type." % (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX)) - logging.info("Successfully copied %s to %s", uri, temp_dir) - return temp_dir + logging.info("Successfully copied %s to %s", uri, out_dir) + return out_dir @staticmethod def _download_s3(uri, temp_dir: str): @@ -68,16 +70,23 @@ bucket_name = bucket_args[0] bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" bucket = storage_client.bucket(bucket_name) - blobs = bucket.list_blobs(prefix=bucket_path) + prefix = bucket_path + if not prefix.endswith("/"): + prefix = prefix + "/" + blobs = bucket.list_blobs(prefix=prefix) for blob in blobs: # Replace any prefix from the object key with temp_dir subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/") + # Create necessary subdirectory to store the object locally if "/" in subdir_object_key: local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0]) if not os.path.isdir(local_object_dir): os.makedirs(local_object_dir, exist_ok=True) - blob.download_to_filename(os.path.join(temp_dir, subdir_object_key)) + if subdir_object_key.strip() != "": + dest_path = os.path.join(temp_dir, subdir_object_key) + logging.info("Downloading: %s", dest_path) + blob.download_to_filename(dest_path) @staticmethod def _download_local(uri):
{"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -27,23 +27,25 @@\n \n class Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n- def download(uri: str) -> str:\n+ def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n \n- temp_dir = tempfile.mkdtemp()\n+ if out_dir is None:\n+ out_dir = tempfile.mkdtemp()\n+\n if uri.startswith(_GCS_PREFIX):\n- Storage._download_gcs(uri, temp_dir)\n+ Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n- Storage._download_s3(uri, temp_dir)\n+ Storage._download_s3(uri, out_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n \n- logging.info(\"Successfully copied %s to %s\", uri, temp_dir)\n- return temp_dir\n+ logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n+ return out_dir\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n@@ -68,16 +70,23 @@\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n- blobs = bucket.list_blobs(prefix=bucket_path)\n+ prefix = bucket_path\n+ if not prefix.endswith(\"/\"):\n+ prefix = prefix + \"/\"\n+ blobs = bucket.list_blobs(prefix=prefix)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n+\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n- blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n+ if subdir_object_key.strip() != \"\":\n+ dest_path = os.path.join(temp_dir, subdir_object_key)\n+ logging.info(\"Downloading: %s\", dest_path)\n+ blob.download_to_filename(dest_path)\n \n @staticmethod\n def _download_local(uri):\n", "issue": "KFServing should have a consistent way of supporting model download across inference server implementations\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKFServing should expose a consistent way to download models across inference servers and clouds. The current implementation depends on the features of individual inference servers expose. E.g. see #137 \r\n\r\n**Anything else you would like to add:**\r\nProposed solution design is documented here: https://docs.google.com/document/d/1xqBOkoQ6Vzc5gv4O5MgVVNE3qILbKuMkC-DN5zp5w28/edit?usp=sharing\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\nimport re\nfrom minio import Minio\nfrom google.cloud import storage\nfrom google.auth import exceptions\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, temp_dir)\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=bucket_path)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\nimport re\nfrom minio import Minio\nfrom google.cloud import storage\nfrom google.auth import exceptions\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n if out_dir is None:\n out_dir = tempfile.mkdtemp()\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}]}
1,496
635
gh_patches_debug_28046
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-23193
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bitchute --dateafter flag broken <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl: - First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.11.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED. - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser. - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape. - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates. - Read bugs section in FAQ: http://yt-dl.org/reporting - Finally, put x into all relevant boxes (like this [x]) --> - [x] I'm reporting a broken site support issue - [x] I've verified that I'm running youtube-dl version **2019.11.05** - [x] I've checked that all provided URLs are alive and playable in a browser - [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [x] I've searched the bugtracker for similar bug reports including closed ones - [x] I've read bugs section in FAQ ## Verbose log <!-- Provide the complete verbose output of youtube-dl that clearly demonstrates the problem. Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this: [debug] System config: [] [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 [debug] youtube-dl version 2019.11.05 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} <more lines> --> ``` [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: [u'https://www.bitchute.com/channel/Styxhexenham/', u'--dateafter', u'now-3days', u'-e', u'-v'] [debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2019.11.05 [debug] Python version 2.7.5 (CPython) - Linux-4.4.198-1.el7.elrepo.x86_64-x86_64-with-centos-7.7.1908-Core [debug] exe versions: ffmpeg 3.4.6, ffprobe 3.4.6 [debug] Proxy map: {} ``` ## Description <!-- Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible. If work on your issue requires account credentials please provide them or explain how one can obtain them. --> Title says it all. The date feature works on youtube but not bitchute. Thanks for looking into it --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/bitchute.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import itertools 5 import re 6 7 from .common import InfoExtractor 8 from ..utils import ( 9 orderedSet, 10 urlencode_postdata, 11 ) 12 13 14 class BitChuteIE(InfoExtractor): 15 _VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)' 16 _TESTS = [{ 17 'url': 'https://www.bitchute.com/video/szoMrox2JEI/', 18 'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb', 19 'info_dict': { 20 'id': 'szoMrox2JEI', 21 'ext': 'mp4', 22 'title': 'Fuck bitches get money', 23 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a', 24 'thumbnail': r're:^https?://.*\.jpg$', 25 'uploader': 'Victoria X Rave', 26 }, 27 }, { 28 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/', 29 'only_matching': True, 30 }, { 31 'url': 'https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent', 32 'only_matching': True, 33 }] 34 35 def _real_extract(self, url): 36 video_id = self._match_id(url) 37 38 webpage = self._download_webpage( 39 'https://www.bitchute.com/video/%s' % video_id, video_id, headers={ 40 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36', 41 }) 42 43 title = self._html_search_regex( 44 (r'<[^>]+\bid=["\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'), 45 webpage, 'title', default=None) or self._html_search_meta( 46 'description', webpage, 'title', 47 default=None) or self._og_search_description(webpage) 48 49 format_urls = [] 50 for mobj in re.finditer( 51 r'addWebSeed\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage): 52 format_urls.append(mobj.group('url')) 53 format_urls.extend(re.findall(r'as=(https?://[^&"\']+)', webpage)) 54 55 formats = [ 56 {'url': format_url} 57 for format_url in orderedSet(format_urls)] 58 59 if not formats: 60 formats = self._parse_html5_media_entries( 61 url, webpage, video_id)[0]['formats'] 62 63 self._check_formats(formats, video_id) 64 self._sort_formats(formats) 65 66 description = self._html_search_regex( 67 r'(?s)<div\b[^>]+\bclass=["\']full hidden[^>]+>(.+?)</div>', 68 webpage, 'description', fatal=False) 69 thumbnail = self._og_search_thumbnail( 70 webpage, default=None) or self._html_search_meta( 71 'twitter:image:src', webpage, 'thumbnail') 72 uploader = self._html_search_regex( 73 (r'(?s)<div class=["\']channel-banner.*?<p\b[^>]+\bclass=["\']name[^>]+>(.+?)</p>', 74 r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>'), 75 webpage, 'uploader', fatal=False) 76 77 return { 78 'id': video_id, 79 'title': title, 80 'description': description, 81 'thumbnail': thumbnail, 82 'uploader': uploader, 83 'formats': formats, 84 } 85 86 87 class BitChuteChannelIE(InfoExtractor): 88 _VALID_URL = r'https?://(?:www\.)?bitchute\.com/channel/(?P<id>[^/?#&]+)' 89 _TEST = { 90 'url': 'https://www.bitchute.com/channel/victoriaxrave/', 91 'playlist_mincount': 185, 92 'info_dict': { 93 'id': 'victoriaxrave', 94 }, 95 } 96 97 _TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7' 98 99 def _entries(self, channel_id): 100 channel_url = 'https://www.bitchute.com/channel/%s/' % channel_id 101 offset = 0 102 for page_num in itertools.count(1): 103 data = self._download_json( 104 '%sextend/' % channel_url, channel_id, 105 'Downloading channel page %d' % page_num, 106 data=urlencode_postdata({ 107 'csrfmiddlewaretoken': self._TOKEN, 108 'name': '', 109 'offset': offset, 110 }), headers={ 111 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 112 'Referer': channel_url, 113 'X-Requested-With': 'XMLHttpRequest', 114 'Cookie': 'csrftoken=%s' % self._TOKEN, 115 }) 116 if data.get('success') is False: 117 break 118 html = data.get('html') 119 if not html: 120 break 121 video_ids = re.findall( 122 r'class=["\']channel-videos-image-container[^>]+>\s*<a\b[^>]+\bhref=["\']/video/([^"\'/]+)', 123 html) 124 if not video_ids: 125 break 126 offset += len(video_ids) 127 for video_id in video_ids: 128 yield self.url_result( 129 'https://www.bitchute.com/video/%s' % video_id, 130 ie=BitChuteIE.ie_key(), video_id=video_id) 131 132 def _real_extract(self, url): 133 channel_id = self._match_id(url) 134 return self.playlist_result( 135 self._entries(channel_id), playlist_id=channel_id) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/bitchute.py b/youtube_dl/extractor/bitchute.py --- a/youtube_dl/extractor/bitchute.py +++ b/youtube_dl/extractor/bitchute.py @@ -7,6 +7,7 @@ from .common import InfoExtractor from ..utils import ( orderedSet, + unified_strdate, urlencode_postdata, ) @@ -23,6 +24,7 @@ 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Victoria X Rave', + 'upload_date': '20170813', }, }, { 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/', @@ -74,12 +76,17 @@ r'(?s)<p\b[^>]+\bclass=["\']video-author[^>]+>(.+?)</p>'), webpage, 'uploader', fatal=False) + upload_date = unified_strdate(self._search_regex( + r'class=["\']video-publish-date[^>]+>[^<]+ at \d+:\d+ UTC on (.+?)\.', + webpage, 'upload date', fatal=False)) + return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, + 'upload_date': upload_date, 'formats': formats, }
{"golden_diff": "diff --git a/youtube_dl/extractor/bitchute.py b/youtube_dl/extractor/bitchute.py\n--- a/youtube_dl/extractor/bitchute.py\n+++ b/youtube_dl/extractor/bitchute.py\n@@ -7,6 +7,7 @@\n from .common import InfoExtractor\n from ..utils import (\n orderedSet,\n+ unified_strdate,\n urlencode_postdata,\n )\n \n@@ -23,6 +24,7 @@\n 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'uploader': 'Victoria X Rave',\n+ 'upload_date': '20170813',\n },\n }, {\n 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/',\n@@ -74,12 +76,17 @@\n r'(?s)<p\\b[^>]+\\bclass=[\"\\']video-author[^>]+>(.+?)</p>'),\n webpage, 'uploader', fatal=False)\n \n+ upload_date = unified_strdate(self._search_regex(\n+ r'class=[\"\\']video-publish-date[^>]+>[^<]+ at \\d+:\\d+ UTC on (.+?)\\.',\n+ webpage, 'upload date', fatal=False))\n+\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n 'uploader': uploader,\n+ 'upload_date': upload_date,\n 'formats': formats,\n }\n", "issue": "Bitchute --dateafter flag broken\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:\r\n- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.11.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.\r\n- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.\r\n- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.\r\n- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.\r\n- Read bugs section in FAQ: http://yt-dl.org/reporting\r\n- Finally, put x into all relevant boxes (like this [x])\r\n-->\r\n\r\n- [x] I'm reporting a broken site support issue\r\n- [x] I've verified that I'm running youtube-dl version **2019.11.05**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar bug reports including closed ones\r\n- [x] I've read bugs section in FAQ\r\n\r\n\r\n## Verbose log\r\n\r\n<!--\r\nProvide the complete verbose output of youtube-dl that clearly demonstrates the problem.\r\nAdd the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:\r\n [debug] System config: []\r\n [debug] User config: []\r\n [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']\r\n [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251\r\n [debug] youtube-dl version 2019.11.05\r\n [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2\r\n [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4\r\n [debug] Proxy map: {}\r\n <more lines>\r\n-->\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'https://www.bitchute.com/channel/Styxhexenham/', u'--dateafter', u'now-3days', u'-e', u'-v']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2019.11.05\r\n[debug] Python version 2.7.5 (CPython) - Linux-4.4.198-1.el7.elrepo.x86_64-x86_64-with-centos-7.7.1908-Core\r\n[debug] exe versions: ffmpeg 3.4.6, ffprobe 3.4.6\r\n[debug] Proxy map: {}\r\n```\r\n\r\n\r\n## Description\r\n\r\n<!--\r\nProvide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.\r\nIf work on your issue requires account credentials please provide them or explain how one can obtain them.\r\n-->\r\n\r\nTitle says it all. The date feature works on youtube but not bitchute. Thanks for looking into it\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport itertools\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n orderedSet,\n urlencode_postdata,\n)\n\n\nclass BitChuteIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bitchute\\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'https://www.bitchute.com/video/szoMrox2JEI/',\n 'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb',\n 'info_dict': {\n 'id': 'szoMrox2JEI',\n 'ext': 'mp4',\n 'title': 'Fuck bitches get money',\n 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'uploader': 'Victoria X Rave',\n },\n }, {\n 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/',\n 'only_matching': True,\n }, {\n 'url': 'https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'https://www.bitchute.com/video/%s' % video_id, video_id, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36',\n })\n\n title = self._html_search_regex(\n (r'<[^>]+\\bid=[\"\\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'),\n webpage, 'title', default=None) or self._html_search_meta(\n 'description', webpage, 'title',\n default=None) or self._og_search_description(webpage)\n\n format_urls = []\n for mobj in re.finditer(\n r'addWebSeed\\s*\\(\\s*([\"\\'])(?P<url>(?:(?!\\1).)+)\\1', webpage):\n format_urls.append(mobj.group('url'))\n format_urls.extend(re.findall(r'as=(https?://[^&\"\\']+)', webpage))\n\n formats = [\n {'url': format_url}\n for format_url in orderedSet(format_urls)]\n\n if not formats:\n formats = self._parse_html5_media_entries(\n url, webpage, video_id)[0]['formats']\n\n self._check_formats(formats, video_id)\n self._sort_formats(formats)\n\n description = self._html_search_regex(\n r'(?s)<div\\b[^>]+\\bclass=[\"\\']full hidden[^>]+>(.+?)</div>',\n webpage, 'description', fatal=False)\n thumbnail = self._og_search_thumbnail(\n webpage, default=None) or self._html_search_meta(\n 'twitter:image:src', webpage, 'thumbnail')\n uploader = self._html_search_regex(\n (r'(?s)<div class=[\"\\']channel-banner.*?<p\\b[^>]+\\bclass=[\"\\']name[^>]+>(.+?)</p>',\n r'(?s)<p\\b[^>]+\\bclass=[\"\\']video-author[^>]+>(.+?)</p>'),\n webpage, 'uploader', fatal=False)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n 'uploader': uploader,\n 'formats': formats,\n }\n\n\nclass BitChuteChannelIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bitchute\\.com/channel/(?P<id>[^/?#&]+)'\n _TEST = {\n 'url': 'https://www.bitchute.com/channel/victoriaxrave/',\n 'playlist_mincount': 185,\n 'info_dict': {\n 'id': 'victoriaxrave',\n },\n }\n\n _TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7'\n\n def _entries(self, channel_id):\n channel_url = 'https://www.bitchute.com/channel/%s/' % channel_id\n offset = 0\n for page_num in itertools.count(1):\n data = self._download_json(\n '%sextend/' % channel_url, channel_id,\n 'Downloading channel page %d' % page_num,\n data=urlencode_postdata({\n 'csrfmiddlewaretoken': self._TOKEN,\n 'name': '',\n 'offset': offset,\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': channel_url,\n 'X-Requested-With': 'XMLHttpRequest',\n 'Cookie': 'csrftoken=%s' % self._TOKEN,\n })\n if data.get('success') is False:\n break\n html = data.get('html')\n if not html:\n break\n video_ids = re.findall(\n r'class=[\"\\']channel-videos-image-container[^>]+>\\s*<a\\b[^>]+\\bhref=[\"\\']/video/([^\"\\'/]+)',\n html)\n if not video_ids:\n break\n offset += len(video_ids)\n for video_id in video_ids:\n yield self.url_result(\n 'https://www.bitchute.com/video/%s' % video_id,\n ie=BitChuteIE.ie_key(), video_id=video_id)\n\n def _real_extract(self, url):\n channel_id = self._match_id(url)\n return self.playlist_result(\n self._entries(channel_id), playlist_id=channel_id)\n", "path": "youtube_dl/extractor/bitchute.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport itertools\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n orderedSet,\n unified_strdate,\n urlencode_postdata,\n)\n\n\nclass BitChuteIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bitchute\\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'\n _TESTS = [{\n 'url': 'https://www.bitchute.com/video/szoMrox2JEI/',\n 'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb',\n 'info_dict': {\n 'id': 'szoMrox2JEI',\n 'ext': 'mp4',\n 'title': 'Fuck bitches get money',\n 'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'uploader': 'Victoria X Rave',\n 'upload_date': '20170813',\n },\n }, {\n 'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/',\n 'only_matching': True,\n }, {\n 'url': 'https://www.bitchute.com/torrent/Zee5BE49045h/szoMrox2JEI.webtorrent',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'https://www.bitchute.com/video/%s' % video_id, video_id, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36',\n })\n\n title = self._html_search_regex(\n (r'<[^>]+\\bid=[\"\\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'),\n webpage, 'title', default=None) or self._html_search_meta(\n 'description', webpage, 'title',\n default=None) or self._og_search_description(webpage)\n\n format_urls = []\n for mobj in re.finditer(\n r'addWebSeed\\s*\\(\\s*([\"\\'])(?P<url>(?:(?!\\1).)+)\\1', webpage):\n format_urls.append(mobj.group('url'))\n format_urls.extend(re.findall(r'as=(https?://[^&\"\\']+)', webpage))\n\n formats = [\n {'url': format_url}\n for format_url in orderedSet(format_urls)]\n\n if not formats:\n formats = self._parse_html5_media_entries(\n url, webpage, video_id)[0]['formats']\n\n self._check_formats(formats, video_id)\n self._sort_formats(formats)\n\n description = self._html_search_regex(\n r'(?s)<div\\b[^>]+\\bclass=[\"\\']full hidden[^>]+>(.+?)</div>',\n webpage, 'description', fatal=False)\n thumbnail = self._og_search_thumbnail(\n webpage, default=None) or self._html_search_meta(\n 'twitter:image:src', webpage, 'thumbnail')\n uploader = self._html_search_regex(\n (r'(?s)<div class=[\"\\']channel-banner.*?<p\\b[^>]+\\bclass=[\"\\']name[^>]+>(.+?)</p>',\n r'(?s)<p\\b[^>]+\\bclass=[\"\\']video-author[^>]+>(.+?)</p>'),\n webpage, 'uploader', fatal=False)\n\n upload_date = unified_strdate(self._search_regex(\n r'class=[\"\\']video-publish-date[^>]+>[^<]+ at \\d+:\\d+ UTC on (.+?)\\.',\n webpage, 'upload date', fatal=False))\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': thumbnail,\n 'uploader': uploader,\n 'upload_date': upload_date,\n 'formats': formats,\n }\n\n\nclass BitChuteChannelIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?bitchute\\.com/channel/(?P<id>[^/?#&]+)'\n _TEST = {\n 'url': 'https://www.bitchute.com/channel/victoriaxrave/',\n 'playlist_mincount': 185,\n 'info_dict': {\n 'id': 'victoriaxrave',\n },\n }\n\n _TOKEN = 'zyG6tQcGPE5swyAEFLqKUwMuMMuF6IO2DZ6ZDQjGfsL0e4dcTLwqkTTul05Jdve7'\n\n def _entries(self, channel_id):\n channel_url = 'https://www.bitchute.com/channel/%s/' % channel_id\n offset = 0\n for page_num in itertools.count(1):\n data = self._download_json(\n '%sextend/' % channel_url, channel_id,\n 'Downloading channel page %d' % page_num,\n data=urlencode_postdata({\n 'csrfmiddlewaretoken': self._TOKEN,\n 'name': '',\n 'offset': offset,\n }), headers={\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Referer': channel_url,\n 'X-Requested-With': 'XMLHttpRequest',\n 'Cookie': 'csrftoken=%s' % self._TOKEN,\n })\n if data.get('success') is False:\n break\n html = data.get('html')\n if not html:\n break\n video_ids = re.findall(\n r'class=[\"\\']channel-videos-image-container[^>]+>\\s*<a\\b[^>]+\\bhref=[\"\\']/video/([^\"\\'/]+)',\n html)\n if not video_ids:\n break\n offset += len(video_ids)\n for video_id in video_ids:\n yield self.url_result(\n 'https://www.bitchute.com/video/%s' % video_id,\n ie=BitChuteIE.ie_key(), video_id=video_id)\n\n def _real_extract(self, url):\n channel_id = self._match_id(url)\n return self.playlist_result(\n self._entries(channel_id), playlist_id=channel_id)\n", "path": "youtube_dl/extractor/bitchute.py"}]}
2,872
367
gh_patches_debug_11230
rasdani/github-patches
git_diff
spack__spack-12009
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Installation issue: py-jsonchema (No checksum provided for @2.6.0 requested by nrm) The nrm package specifically requests py-jsonschema@2.6.0. Attempting to install this package results in the following error: ==> Warning: There is no checksum on file to fetch py-jsonschema@2.6.0 safely. ==> Error: Will not fetch py-jsonschema@2.6.0 Add a checksum or use --no-checksum to skip this check. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/py-jsonschema/package.py` Content: ``` 1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class PyJsonschema(PythonPackage): 10 """Jsonschema: An(other) implementation of JSON Schema for Python.""" 11 12 homepage = "http://github.com/Julian/jsonschema" 13 url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz" 14 15 version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640') 16 17 depends_on('py-setuptools', type='build') 18 depends_on('py-vcversioner', type=('build', 'run')) 19 depends_on('py-functools32', when="^python@2.7.0:2.7.999", type=('build', 'run')) 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/py-jsonschema/package.py b/var/spack/repos/builtin/packages/py-jsonschema/package.py --- a/var/spack/repos/builtin/packages/py-jsonschema/package.py +++ b/var/spack/repos/builtin/packages/py-jsonschema/package.py @@ -10,8 +10,9 @@ """Jsonschema: An(other) implementation of JSON Schema for Python.""" homepage = "http://github.com/Julian/jsonschema" - url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz" + url = "https://pypi.io/packages/source/j/jsonschema/jsonschema-2.6.0.tar.gz" + version('2.6.0', sha256='6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02') version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640') depends_on('py-setuptools', type='build')
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-jsonschema/package.py b/var/spack/repos/builtin/packages/py-jsonschema/package.py\n--- a/var/spack/repos/builtin/packages/py-jsonschema/package.py\n+++ b/var/spack/repos/builtin/packages/py-jsonschema/package.py\n@@ -10,8 +10,9 @@\n \"\"\"Jsonschema: An(other) implementation of JSON Schema for Python.\"\"\"\n \n homepage = \"http://github.com/Julian/jsonschema\"\n- url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz\"\n+ url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.6.0.tar.gz\"\n \n+ version('2.6.0', sha256='6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02')\n version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')\n \n depends_on('py-setuptools', type='build')\n", "issue": "Installation issue: py-jsonchema (No checksum provided for @2.6.0 requested by nrm)\nThe nrm package specifically requests py-jsonschema@2.6.0. Attempting to install this package results in the following error:\r\n\r\n==> Warning: There is no checksum on file to fetch py-jsonschema@2.6.0 safely.\r\n==> Error: Will not fetch py-jsonschema@2.6.0\r\nAdd a checksum or use --no-checksum to skip this check.\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyJsonschema(PythonPackage):\n \"\"\"Jsonschema: An(other) implementation of JSON Schema for Python.\"\"\"\n\n homepage = \"http://github.com/Julian/jsonschema\"\n url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.5.1.tar.gz\"\n\n version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')\n\n depends_on('py-setuptools', type='build')\n depends_on('py-vcversioner', type=('build', 'run'))\n depends_on('py-functools32', when=\"^python@2.7.0:2.7.999\", type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-jsonschema/package.py"}], "after_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyJsonschema(PythonPackage):\n \"\"\"Jsonschema: An(other) implementation of JSON Schema for Python.\"\"\"\n\n homepage = \"http://github.com/Julian/jsonschema\"\n url = \"https://pypi.io/packages/source/j/jsonschema/jsonschema-2.6.0.tar.gz\"\n\n version('2.6.0', sha256='6ff5f3180870836cae40f06fa10419f557208175f13ad7bc26caa77beb1f6e02')\n version('2.5.1', '374e848fdb69a3ce8b7e778b47c30640')\n\n depends_on('py-setuptools', type='build')\n depends_on('py-vcversioner', type=('build', 'run'))\n depends_on('py-functools32', when=\"^python@2.7.0:2.7.999\", type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-jsonschema/package.py"}]}
633
280
gh_patches_debug_25167
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2057
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect E1029 errors when literal YAML block style is used *cfn-lint version: 0.51.0 Embedded parameters are being flagged for not being wrapped in a `!Sub`, but they are in fact wrapped in a `!Sub`. Please provide as much information as possible: Sample template (extraneous lines have been removed): ```yaml Resources: SomeStackset: Type: AWS::CloudFormation::StackSet Properties: TemplateBody: | Resources: SomeRole: Type: AWS::IAM::Role Properties: Policies: - PolicyName: SomeName PolicyDocument: Version: 2012-10-17 Statement: - Effect: Allow Action: - iam:GetSomething Resource: - !Sub arn:aws:iam::${AWS::AccountId}:role/SomeRole* ``` The error is an [E1029](https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/functions/SubNeeded.py), and the line number referenced is the line number where the YAML literal appears (here, the line that contains `TemplateBody: |`) This appears to be a new issue with [release 0.51.0](https://github.com/aws-cloudformation/cfn-lint/releases/tag/v0.51.0). The code in question above hasn't changed in my repo for a long time, and I don't get the error on previous releases. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/functions/SubNeeded.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from functools import reduce # pylint: disable=redefined-builtin 6 import re 7 import copy 8 import six 9 from cfnlint.rules import CloudFormationLintRule 10 from cfnlint.rules import RuleMatch 11 12 13 class SubNeeded(CloudFormationLintRule): 14 """Check if a substitution string exists without a substitution function""" 15 id = 'E1029' 16 shortdesc = 'Sub is required if a variable is used in a string' 17 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.' 18 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' 19 tags = ['functions', 'sub'] 20 21 def __init__(self): 22 """Init""" 23 super(SubNeeded, self).__init__() 24 self.config_definition = { 25 'custom_excludes': { 26 'default': '', 27 'type': 'string' 28 } 29 } 30 self.configure() 31 self.subParameterRegex = re.compile(r'(\$\{[A-Za-z0-9_:\.]+\})') 32 33 def _match_values(self, cfnelem, path): 34 """Recursively search for values matching the searchRegex""" 35 values = [] 36 if isinstance(cfnelem, dict): 37 for key in cfnelem: 38 pathprop = path[:] 39 pathprop.append(key) 40 values.extend(self._match_values(cfnelem[key], pathprop)) 41 elif isinstance(cfnelem, list): 42 for index, item in enumerate(cfnelem): 43 pathprop = path[:] 44 pathprop.append(index) 45 values.extend(self._match_values(item, pathprop)) 46 else: 47 # Leaf node 48 if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem): 49 for variable in re.findall(self.subParameterRegex, cfnelem): 50 values.append(path + [variable]) 51 52 return values 53 54 def match_values(self, cfn): 55 """ 56 Search for values in all parts of the templates that match the searchRegex 57 """ 58 results = [] 59 results.extend(self._match_values(cfn.template, [])) 60 # Globals are removed during a transform. They need to be checked manually 61 results.extend(self._match_values(cfn.template.get('Globals', {}), [])) 62 return results 63 64 def _api_exceptions(self, value): 65 """ Key value exceptions """ 66 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$') 67 return re.match(parameter_search, value) 68 69 def _variable_custom_excluded(self, value): 70 """ User-defined exceptions for variables, anywhere in the file """ 71 custom_excludes = self.config['custom_excludes'] 72 if custom_excludes: 73 custom_search = re.compile(custom_excludes) 74 return re.match(custom_search, value) 75 return False 76 77 def match(self, cfn): 78 matches = [] 79 80 refs = cfn.get_valid_refs() 81 getatts = cfn.get_valid_getatts() 82 83 # Get a list of paths to every leaf node string containing at least one ${parameter} 84 parameter_string_paths = self.match_values(cfn) 85 # We want to search all of the paths to check if each one contains an 'Fn::Sub' 86 for parameter_string_path in parameter_string_paths: 87 # Get variable 88 var = parameter_string_path[-1] 89 90 # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub 91 # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html 92 93 if 'DefinitionString' in parameter_string_path: 94 modified_parameter_string_path = copy.copy(parameter_string_path) 95 index = parameter_string_path.index('DefinitionString') 96 modified_parameter_string_path[index] = 'DefinitionSubstitutions' 97 modified_parameter_string_path = modified_parameter_string_path[:index+1] 98 modified_parameter_string_path.append(var[2:-1]) 99 if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template): 100 continue 101 102 # Exclude variables that match custom exclude filters, if configured 103 # (for third-party tools that pre-process templates before uploading them to AWS) 104 if self._variable_custom_excluded(var): 105 continue 106 107 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html) 108 if var.startswith('${!'): 109 continue 110 111 var_stripped = var[2:-1].strip() 112 113 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly 114 if not 'Fn::Sub' in parameter_string_path: 115 if (var_stripped in refs or var_stripped in getatts) or 'DefinitionString' in parameter_string_path: 116 # Remove the last item (the variable) to prevent multiple errors on 1 line errors 117 path = parameter_string_path[:-1] 118 message = 'Found an embedded parameter "{}" outside of an "Fn::Sub" at {}'.format( 119 var, '/'.join(map(str, path))) 120 matches.append(RuleMatch(path, message)) 121 122 return matches 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py --- a/src/cfnlint/rules/functions/SubNeeded.py +++ b/src/cfnlint/rules/functions/SubNeeded.py @@ -18,6 +18,8 @@ source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' tags = ['functions', 'sub'] + exceptions = ['TemplateBody'] + def __init__(self): """Init""" super(SubNeeded, self).__init__() @@ -111,7 +113,7 @@ var_stripped = var[2:-1].strip() # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly - if not 'Fn::Sub' in parameter_string_path: + if not 'Fn::Sub' in parameter_string_path and parameter_string_path[-2] not in self.exceptions: if (var_stripped in refs or var_stripped in getatts) or 'DefinitionString' in parameter_string_path: # Remove the last item (the variable) to prevent multiple errors on 1 line errors path = parameter_string_path[:-1]
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -18,6 +18,8 @@\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n \n+ exceptions = ['TemplateBody']\n+\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n@@ -111,7 +113,7 @@\n var_stripped = var[2:-1].strip()\n \n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n- if not 'Fn::Sub' in parameter_string_path:\n+ if not 'Fn::Sub' in parameter_string_path and parameter_string_path[-2] not in self.exceptions:\n if (var_stripped in refs or var_stripped in getatts) or 'DefinitionString' in parameter_string_path:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n", "issue": "Incorrect E1029 errors when literal YAML block style is used\n*cfn-lint version: 0.51.0\r\n\r\nEmbedded parameters are being flagged for not being wrapped in a `!Sub`, but they are in fact wrapped in a `!Sub`.\r\n\r\nPlease provide as much information as possible:\r\n\r\nSample template (extraneous lines have been removed):\r\n```yaml\r\nResources:\r\n SomeStackset:\r\n Type: AWS::CloudFormation::StackSet\r\n Properties:\r\n TemplateBody: |\r\n Resources:\r\n SomeRole:\r\n Type: AWS::IAM::Role\r\n Properties:\r\n Policies:\r\n -\r\n PolicyName: SomeName\r\n PolicyDocument:\r\n Version: 2012-10-17\r\n Statement:\r\n -\r\n Effect: Allow\r\n Action:\r\n - iam:GetSomething\r\n Resource:\r\n - !Sub arn:aws:iam::${AWS::AccountId}:role/SomeRole*\r\n```\r\n\r\nThe error is an [E1029](https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/functions/SubNeeded.py), and the line number referenced is the line number where the YAML literal appears (here, the line that contains `TemplateBody: |`)\r\n\r\nThis appears to be a new issue with [release 0.51.0](https://github.com/aws-cloudformation/cfn-lint/releases/tag/v0.51.0). The code in question above hasn't changed in my repo for a long time, and I don't get the error on previous releases.\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport copy\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n refs = cfn.get_valid_refs()\n getatts = cfn.get_valid_getatts()\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n # Get variable\n var = parameter_string_path[-1]\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = copy.copy(parameter_string_path)\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(var[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(var):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if var.startswith('${!'):\n continue\n\n var_stripped = var[2:-1].strip()\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not 'Fn::Sub' in parameter_string_path:\n if (var_stripped in refs or var_stripped in getatts) or 'DefinitionString' in parameter_string_path:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n var, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport copy\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n exceptions = ['TemplateBody']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n refs = cfn.get_valid_refs()\n getatts = cfn.get_valid_getatts()\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n # Get variable\n var = parameter_string_path[-1]\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = copy.copy(parameter_string_path)\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(var[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(var):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if var.startswith('${!'):\n continue\n\n var_stripped = var[2:-1].strip()\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not 'Fn::Sub' in parameter_string_path and parameter_string_path[-2] not in self.exceptions:\n if (var_stripped in refs or var_stripped in getatts) or 'DefinitionString' in parameter_string_path:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n var, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]}
1,992
277
gh_patches_debug_56729
rasdani/github-patches
git_diff
WordPress__openverse-api-723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Swagger/ReDoc page raises an error ## Description <!-- Concisely describe the bug. Compare your experience with what you expected to happen. --> <!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." --> While deploying [v2.5.2](https://github.com/WordPress/openverse-api/releases/tag/v2.5.2) to staging, we noticed that the API documentation page failed to render and caused this error: ``` [2022-05-25 17:02:32,253 - django.request - 241][ERROR] Internal Server Error: /v1/ Traceback (most recent call last): File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 110, in __getattr__ return self[make_swagger_name(item)] KeyError: 'name' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 55, in inner response = get_response(request) File "/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 197, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/venv/lib/python3.10/site-packages/sentry_sdk/integrations/django/views.py", line 67, in sentry_wrapped_callback return callback(request, *args, **kwargs) File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 34, in _wrapped_view_func response = view_func(request, *args, **kwargs) File "/venv/lib/python3.10/site-packages/django/utils/decorators.py", line 133, in _wrapped_view response = view_func(request, *args, **kwargs) File "/venv/lib/python3.10/site-packages/django/views/decorators/vary.py", line 21, in inner_func response = func(*args, **kwargs) File "/venv/lib/python3.10/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view return view_func(*args, **kwargs) File "/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 84, in view return self.dispatch(request, *args, **kwargs) File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 94, in get schema = generator.get_schema(request, self.public) File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 246, in get_schema paths, prefix = self.get_paths(endpoints, components, request, public) File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 404, in get_paths operation = self.get_operation(view, path, prefix, method, components, request) File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 446, in get_operation operation = view_inspector.get_operation(operation_keys) File "/api/catalog/custom_auto_schema.py", line 14, in get_operation query = self.get_query_parameters() File "/venv/lib/python3.10/site-packages/drf_yasg/inspectors/view.py", line 298, in get_query_parameters if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0: File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in param_list_to_odict result = OrderedDict(((param.name, param.in_), param) for param in parameters) File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in <genexpr> result = OrderedDict(((param.name, param.in_), param) for param in parameters) File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 113, in __getattr__ raise AttributeError("object of class " + type(self).__name__ + " has no attribute " + item) AttributeError: object of class Parameter has no attribute name ``` Here's the error the page presents: ``` Something went wrong... Error downloading http://localhost:8000/v1/?format=openapi HTTP ERROR 500 Stack trace s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651 read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:36080 s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651 read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:35658 ReDoc Version: 2.0.0-rc.40 Commit: 17b9873 ``` ## Reproduction <!-- Provide detailed steps to reproduce the bug. --> 1. `git checkout v2.5.2` 2. `just build` 3. `just recreate && just init` 4. Visit localhost:8000 and observe error ## Additional context <!-- Add any other context about the problem here; or delete the section entirely. --> Sentry issue: https://sentry.io/share/issue/83044216200d47538f3733a16df46adc/ ## Resolution <!-- Replace the [ ] with [x] to check the box. --> - [ ] 🙋 I would be interested in resolving this bug. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `api/catalog/custom_auto_schema.py` Content: ``` 1 from drf_yasg import openapi 2 from drf_yasg.inspectors import SwaggerAutoSchema 3 from drf_yasg.utils import filter_none, force_real_str 4 5 6 class CustomAutoSchema(SwaggerAutoSchema): 7 def get_operation(self, operation_keys=None): 8 operation_keys = operation_keys or self.operation_keys 9 10 consumes = self.get_consumes() 11 produces = self.get_produces() 12 13 body = self.get_request_body_parameters(consumes) 14 query = self.get_query_parameters() 15 parameters = body + query 16 parameters = filter_none(parameters) 17 parameters = self.add_manual_parameters(parameters) 18 19 operation_id = self.get_operation_id(operation_keys) 20 summary, description = self.get_summary_and_description() 21 security = self.get_security() 22 assert security is None or isinstance( 23 security, list 24 ), "security must be a list of security requirement objects" 25 deprecated = self.is_deprecated() 26 tags = self.get_tags(operation_keys) 27 28 responses = self.get_responses() 29 30 return openapi.Operation( 31 operation_id=operation_id, 32 description=force_real_str(description), 33 summary=force_real_str(summary), 34 responses=responses, 35 parameters=parameters, 36 consumes=consumes, 37 produces=produces, 38 tags=tags, 39 security=security, 40 deprecated=deprecated, 41 **{"x-code-samples": self.overrides.get("code_examples")} 42 ) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/api/catalog/custom_auto_schema.py b/api/catalog/custom_auto_schema.py --- a/api/catalog/custom_auto_schema.py +++ b/api/catalog/custom_auto_schema.py @@ -4,6 +4,14 @@ class CustomAutoSchema(SwaggerAutoSchema): + def get_pagination_parameters(self): + """ + Since the pagination params are a part of the ``MediaSearchRequestSerializer``, + they need not be added again as pagination params. + """ + + return [] + def get_operation(self, operation_keys=None): operation_keys = operation_keys or self.operation_keys
{"golden_diff": "diff --git a/api/catalog/custom_auto_schema.py b/api/catalog/custom_auto_schema.py\n--- a/api/catalog/custom_auto_schema.py\n+++ b/api/catalog/custom_auto_schema.py\n@@ -4,6 +4,14 @@\n \n \n class CustomAutoSchema(SwaggerAutoSchema):\n+ def get_pagination_parameters(self):\n+ \"\"\"\n+ Since the pagination params are a part of the ``MediaSearchRequestSerializer``,\n+ they need not be added again as pagination params.\n+ \"\"\"\n+\n+ return []\n+\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n", "issue": "Swagger/ReDoc page raises an error\n## Description\n<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->\n<!-- For example: \"I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page.\" -->\nWhile deploying [v2.5.2](https://github.com/WordPress/openverse-api/releases/tag/v2.5.2) to staging, we noticed that the API documentation page failed to render and caused this error:\n\n```\n[2022-05-25 17:02:32,253 - django.request - 241][ERROR] Internal Server Error: /v1/\nTraceback (most recent call last):\n File \"/venv/lib/python3.10/site-packages/drf_yasg/openapi.py\", line 110, in __getattr__\n return self[make_swagger_name(item)]\nKeyError: 'name'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 55, in inner\n response = get_response(request)\n File \"/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 197, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"/venv/lib/python3.10/site-packages/sentry_sdk/integrations/django/views.py\", line 67, in sentry_wrapped_callback\n return callback(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/views.py\", line 34, in _wrapped_view_func\n response = view_func(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/utils/decorators.py\", line 133, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/decorators/vary.py\", line 21, in inner_func\n response = func(*args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\n return view_func(*args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 84, in view\n return self.dispatch(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 509, in dispatch\n response = self.handle_exception(exc)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 469, in handle_exception\n self.raise_uncaught_exception(exc)\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\n raise exc\n File \"/venv/lib/python3.10/site-packages/rest_framework/views.py\", line 506, in dispatch\n response = handler(request, *args, **kwargs)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/views.py\", line 94, in get\n schema = generator.get_schema(request, self.public)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 246, in get_schema\n paths, prefix = self.get_paths(endpoints, components, request, public)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 404, in get_paths\n operation = self.get_operation(view, path, prefix, method, components, request)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/generators.py\", line 446, in get_operation\n operation = view_inspector.get_operation(operation_keys)\n File \"/api/catalog/custom_auto_schema.py\", line 14, in get_operation\n query = self.get_query_parameters()\n File \"/venv/lib/python3.10/site-packages/drf_yasg/inspectors/view.py\", line 298, in get_query_parameters\n if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0:\n File \"/venv/lib/python3.10/site-packages/drf_yasg/utils.py\", line 266, in param_list_to_odict\n result = OrderedDict(((param.name, param.in_), param) for param in parameters)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/utils.py\", line 266, in <genexpr>\n result = OrderedDict(((param.name, param.in_), param) for param in parameters)\n File \"/venv/lib/python3.10/site-packages/drf_yasg/openapi.py\", line 113, in __getattr__\n raise AttributeError(\"object of class \" + type(self).__name__ + \" has no attribute \" + item)\nAttributeError: object of class Parameter has no attribute name\n```\n\nHere's the error the page presents:\n```\nSomething went wrong...\nError downloading http://localhost:8000/v1/?format=openapi HTTP ERROR 500\n\nStack trace\n\ns/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651\nread/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:36080\n\n\ns/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651\nread/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:35658\n\nReDoc Version: 2.0.0-rc.40\nCommit: 17b9873\n```\n\n## Reproduction\n<!-- Provide detailed steps to reproduce the bug. -->\n1. `git checkout v2.5.2`\n2. `just build`\n3. `just recreate && just init`\n4. Visit localhost:8000 and observe error\n\n## Additional context\n<!-- Add any other context about the problem here; or delete the section entirely. -->\nSentry issue: https://sentry.io/share/issue/83044216200d47538f3733a16df46adc/\n\n## Resolution\n<!-- Replace the [ ] with [x] to check the box. -->\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\n\n", "before_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n", "path": "api/catalog/custom_auto_schema.py"}], "after_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_pagination_parameters(self):\n \"\"\"\n Since the pagination params are a part of the ``MediaSearchRequestSerializer``,\n they need not be added again as pagination params.\n \"\"\"\n\n return []\n\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n", "path": "api/catalog/custom_auto_schema.py"}]}
2,154
129
gh_patches_debug_17118
rasdani/github-patches
git_diff
GPflow__GPflow-303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Positive transform fails large numbers The positive transform implemented in `GPflow.transforms.positive` is an alias for the `Log1pe`-transform. The name suggests that it can be used to represent arbitrary positive numbers, however, it is actually only possible to to represent numbers up a value of about 700 due to the result of `np.exp` growing too large: ```python In [31]: GPflow.transforms.positive.forward(700) Out[31]: 700.000001 In [32]: GPflow.transforms.positive.forward(800) /(...)/GPflow/transforms.py:129: RuntimeWarning: overflow encountered in exp return np.log(1. + np.exp(x)) + self._lower Out[32]: inf In [33]: GPflow.transforms.positive.backward(700) Out[33]: array([ 699.999999]) In [34]: GPflow.transforms.positive.backward(800) /(...)/GPflow/transforms.py:138: RuntimeWarning: overflow encountered in exp return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type)) Out[34]: array([ inf]) ``` Since the positive transform degrades to an identity function for large values, it should be fairly easy to fix this. However, it is probably important to ensure that both `forward` and `tf_forward` behave the same. I guess a similar problem will exist for the `Logistic`-transform. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `GPflow/transforms.py` Content: ``` 1 # Copyright 2016 James Hensman, alexggmatthews 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 import numpy as np 17 import tensorflow as tf 18 from . import tf_wraps as tfw 19 from ._settings import settings 20 21 float_type = settings.dtypes.float_type 22 np_float_type = np.float32 if float_type is tf.float32 else np.float64 23 24 25 class Transform(object): 26 def forward(self, x): 27 """ 28 Map from the free-space to the variable space, using numpy 29 """ 30 raise NotImplementedError 31 32 def backward(self, y): 33 """ 34 Map from the variable-space to the free space, using numpy 35 """ 36 raise NotImplementedError 37 38 def tf_forward(self, x): 39 """ 40 Map from the free-space to the variable space, using tensorflow 41 """ 42 raise NotImplementedError 43 44 def tf_log_jacobian(self, x): 45 """ 46 Return the log Jacobian of the tf_forward mapping. 47 48 Note that we *could* do this using a tf manipulation of 49 self.tf_forward, but tensorflow may have difficulty: it doesn't have a 50 Jacaobian at time of writing. We do this in the tests to make sure the 51 implementation is correct. 52 """ 53 raise NotImplementedError 54 55 def free_state_size(self, variable_shape): 56 return np.prod(variable_shape) 57 58 def __str__(self): 59 """ 60 A short string describing the nature of the constraint 61 """ 62 raise NotImplementedError 63 64 def __getstate__(self): 65 return self.__dict__.copy() 66 67 def __setstate__(self, d): 68 self.__dict__ = d 69 70 71 class Identity(Transform): 72 def tf_forward(self, x): 73 return tf.identity(x) 74 75 def forward(self, x): 76 return x 77 78 def backward(self, y): 79 return y 80 81 def tf_log_jacobian(self, x): 82 return tf.zeros((1,), float_type) 83 84 def __str__(self): 85 return '(none)' 86 87 88 class Exp(Transform): 89 def __init__(self, lower=1e-6): 90 self._lower = lower 91 92 def tf_forward(self, x): 93 return tf.exp(x) + self._lower 94 95 def forward(self, x): 96 return np.exp(x) + self._lower 97 98 def backward(self, y): 99 return np.log(y - self._lower) 100 101 def tf_log_jacobian(self, x): 102 return tf.reduce_sum(x) 103 104 def __str__(self): 105 return '+ve' 106 107 108 class Log1pe(Transform): 109 """ 110 A transform of the form 111 112 y = \log ( 1 + \exp(x)) 113 114 x is a free variable, y is always positive. 115 116 This function is known as 'softplus' in tensorflow. 117 """ 118 119 def __init__(self, lower=1e-6): 120 """ 121 lower is a float that defines the minimum value that this transform can 122 take, default 1e-6. This helps stability during optimization, because 123 aggressive optimizers can take overly-long steps which lead to zero in 124 the transformed variable, causing an error. 125 """ 126 self._lower = lower 127 128 def forward(self, x): 129 return np.log(1. + np.exp(x)) + self._lower 130 131 def tf_forward(self, x): 132 return tf.nn.softplus(x) + self._lower 133 134 def tf_log_jacobian(self, x): 135 return -tf.reduce_sum(tf.log(1. + tf.exp(-x))) 136 137 def backward(self, y): 138 return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type)) 139 140 def __str__(self): 141 return '+ve' 142 143 144 class Logistic(Transform): 145 def __init__(self, a=0., b=1.): 146 Transform.__init__(self) 147 assert b > a 148 self.a, self.b = float(a), float(b) 149 150 def tf_forward(self, x): 151 ex = tf.exp(-x) 152 return self.a + (self.b - self.a) / (1. + ex) 153 154 def forward(self, x): 155 ex = np.exp(-x) 156 return self.a + (self.b - self.a) / (1. + ex) 157 158 def backward(self, y): 159 return -np.log((self.b - self.a) / (y - self.a) - 1.) 160 161 def tf_log_jacobian(self, x): 162 return tf.reduce_sum(x - 2. * tf.log(tf.exp(x) + 1.) + np.log(self.b - self.a)) 163 164 def __str__(self): 165 return '[' + str(self.a) + ', ' + str(self.b) + ']' 166 167 168 class DiagMatrix(Transform): 169 """ 170 A transform to represent diagonal matrices. 171 172 The output of this transform is a N x dim x dim array of diagonal matrices. 173 The contructor argumnet dim specifies the size of the matrixes. 174 175 Additionally, to ensure that the matrices are positive definite, the 176 diagonal elements are pushed through a 'positive' transform, defaulting to 177 log1pe. 178 """ 179 def __init__(self, dim=1, positive_transform=Log1pe()): 180 self.dim = dim 181 self._lower = 1e-6 182 self._positive_transform = positive_transform 183 184 def forward(self, x): 185 # Create diagonal matrix 186 x = self._positive_transform.forward(x).reshape((-1, self.dim)) 187 m = np.zeros((x.shape[0], x.shape[1], x.shape[1])) 188 m[(np.s_[:],) + np.diag_indices(x.shape[1])] = x 189 return m 190 191 def backward(self, y): 192 # Return diagonals of matrices 193 return self._positive_transform.backward(y.reshape(-1, self.dim, self.dim).diagonal(0, 1, 2).flatten()) 194 195 def tf_forward(self, x): 196 return tf.matrix_diag(tf.reshape(self._positive_transform.tf_forward(x), (-1, self.dim))) 197 198 def tf_log_jacobian(self, x): 199 return tf.zeros((1,), float_type) + self._positive_transform.tf_log_jacobian(x) 200 201 def __str__(self): 202 return 'DiagMatrix' 203 204 def free_state_size(self, variable_shape): 205 return variable_shape[0] * variable_shape[1] 206 207 208 class LowerTriangular(Transform): 209 """ 210 A transform of the form 211 212 tri_mat = vec_to_tri(x) 213 214 x is a free variable, y is always a list of lower triangular matrices sized 215 (N x N x D). 216 """ 217 218 def __init__(self, num_matrices=1, squeeze=False): 219 """ 220 Create an instance of LowerTriangular transform. 221 Args: 222 num_matrices: Number of matrices to be stored. 223 squeeze: If num_matrices == 1, drop the redundant axis. 224 """ 225 self.num_matrices = num_matrices # We need to store this for reconstruction. 226 self.squeeze = squeeze 227 228 def _validate_vector_length(self, length): 229 """ 230 Check whether the vector length is consistent with being a triangular 231 matrix and with `self.num_matrices`. 232 Args: 233 length: Length of the free state vector. 234 235 Returns: Length of the vector with the lower triangular elements. 236 237 """ 238 L = length / self.num_matrices 239 if int(((L * 8) + 1) ** 0.5) ** 2.0 != (L * 8 + 1): 240 raise ValueError("The free state must be a triangle number.") 241 return L 242 243 def forward(self, x): 244 """ 245 Transforms from the free state to the variable. 246 Args: 247 x: Free state vector. Must have length of `self.num_matrices` * 248 triangular_number. 249 250 Returns: 251 Reconstructed variable. 252 """ 253 L = self._validate_vector_length(len(x)) 254 matsize = int((L * 8 + 1) ** 0.5 * 0.5 - 0.5) 255 xr = np.reshape(x, (self.num_matrices, -1)) 256 var = np.zeros((matsize, matsize, self.num_matrices), np_float_type) 257 for i in range(self.num_matrices): 258 indices = np.tril_indices(matsize, 0) 259 var[indices + (np.zeros(len(indices[0])).astype(int) + i,)] = xr[i, :] 260 return var.squeeze() if self.squeeze else var 261 262 def backward(self, y): 263 """ 264 Transforms from the variable to the free state. 265 Args: 266 y: Variable representation. 267 268 Returns: 269 Free state. 270 """ 271 N = int((y.size / self.num_matrices) ** 0.5) 272 y = np.reshape(y, (N, N, self.num_matrices)) 273 return y[np.tril_indices(len(y), 0)].T.flatten() 274 275 def tf_forward(self, x): 276 fwd = tf.transpose(tfw.vec_to_tri(tf.reshape(x, (self.num_matrices, -1))), [1, 2, 0]) 277 return tf.squeeze(fwd) if self.squeeze else fwd 278 279 def tf_log_jacobian(self, x): 280 return tf.zeros((1,), float_type) 281 282 def free_state_size(self, variable_shape): 283 matrix_batch = len(variable_shape) > 2 284 if ((not matrix_batch and self.num_matrices != 1) or 285 (matrix_batch and variable_shape[2] != self.num_matrices)): 286 raise ValueError("Number of matrices must be consistent with what was passed to the constructor.") 287 if variable_shape[0] != variable_shape[1]: 288 raise ValueError("Matrices passed must be square.") 289 N = variable_shape[0] 290 return int(0.5 * N * (N + 1)) * (variable_shape[2] if matrix_batch else 1) 291 292 def __str__(self): 293 return "LoTri->vec" 294 295 296 positive = Log1pe() 297 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/GPflow/transforms.py b/GPflow/transforms.py --- a/GPflow/transforms.py +++ b/GPflow/transforms.py @@ -126,7 +126,9 @@ self._lower = lower def forward(self, x): - return np.log(1. + np.exp(x)) + self._lower + result = np.log(1. + np.exp(x)) + self._lower + # do not transform large numbers, they overflow and the mapping is exactly identity. + return np.where(x > 35, x + self._lower, result) def tf_forward(self, x): return tf.nn.softplus(x) + self._lower @@ -135,7 +137,8 @@ return -tf.reduce_sum(tf.log(1. + tf.exp(-x))) def backward(self, y): - return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type)) + result = np.log(np.exp(y - self._lower) - np.ones(1, np_float_type)) + return np.where(y > 35, y-self._lower, result) def __str__(self): return '+ve'
{"golden_diff": "diff --git a/GPflow/transforms.py b/GPflow/transforms.py\n--- a/GPflow/transforms.py\n+++ b/GPflow/transforms.py\n@@ -126,7 +126,9 @@\n self._lower = lower\n \n def forward(self, x):\n- return np.log(1. + np.exp(x)) + self._lower\n+ result = np.log(1. + np.exp(x)) + self._lower\n+ # do not transform large numbers, they overflow and the mapping is exactly identity.\n+ return np.where(x > 35, x + self._lower, result)\n \n def tf_forward(self, x):\n return tf.nn.softplus(x) + self._lower\n@@ -135,7 +137,8 @@\n return -tf.reduce_sum(tf.log(1. + tf.exp(-x)))\n \n def backward(self, y):\n- return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type))\n+ result = np.log(np.exp(y - self._lower) - np.ones(1, np_float_type))\n+ return np.where(y > 35, y-self._lower, result)\n \n def __str__(self):\n return '+ve'\n", "issue": "Positive transform fails large numbers\nThe positive transform implemented in `GPflow.transforms.positive` is an alias for the `Log1pe`-transform. The name suggests that it can be used to represent arbitrary positive numbers, however, it is actually only possible to to represent numbers up a value of about 700 due to the result of `np.exp` growing too large:\r\n\r\n```python\r\nIn [31]: GPflow.transforms.positive.forward(700)\r\nOut[31]: 700.000001\r\n\r\nIn [32]: GPflow.transforms.positive.forward(800)\r\n/(...)/GPflow/transforms.py:129: RuntimeWarning: overflow encountered in exp\r\n return np.log(1. + np.exp(x)) + self._lower\r\nOut[32]: inf\r\n\r\nIn [33]: GPflow.transforms.positive.backward(700)\r\nOut[33]: array([ 699.999999])\r\n\r\nIn [34]: GPflow.transforms.positive.backward(800)\r\n/(...)/GPflow/transforms.py:138: RuntimeWarning: overflow encountered in exp\r\n return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type))\r\nOut[34]: array([ inf])\r\n```\r\n\r\nSince the positive transform degrades to an identity function for large values, it should be fairly easy to fix this. However, it is probably important to ensure that both `forward` and `tf_forward` behave the same.\r\n\r\nI guess a similar problem will exist for the `Logistic`-transform.\n", "before_files": [{"content": "# Copyright 2016 James Hensman, alexggmatthews\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport numpy as np\nimport tensorflow as tf\nfrom . import tf_wraps as tfw\nfrom ._settings import settings\n\nfloat_type = settings.dtypes.float_type\nnp_float_type = np.float32 if float_type is tf.float32 else np.float64\n\n\nclass Transform(object):\n def forward(self, x):\n \"\"\"\n Map from the free-space to the variable space, using numpy\n \"\"\"\n raise NotImplementedError\n\n def backward(self, y):\n \"\"\"\n Map from the variable-space to the free space, using numpy\n \"\"\"\n raise NotImplementedError\n\n def tf_forward(self, x):\n \"\"\"\n Map from the free-space to the variable space, using tensorflow\n \"\"\"\n raise NotImplementedError\n\n def tf_log_jacobian(self, x):\n \"\"\"\n Return the log Jacobian of the tf_forward mapping.\n\n Note that we *could* do this using a tf manipulation of\n self.tf_forward, but tensorflow may have difficulty: it doesn't have a\n Jacaobian at time of writing. We do this in the tests to make sure the\n implementation is correct.\n \"\"\"\n raise NotImplementedError\n\n def free_state_size(self, variable_shape):\n return np.prod(variable_shape)\n\n def __str__(self):\n \"\"\"\n A short string describing the nature of the constraint\n \"\"\"\n raise NotImplementedError\n\n def __getstate__(self):\n return self.__dict__.copy()\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n\nclass Identity(Transform):\n def tf_forward(self, x):\n return tf.identity(x)\n\n def forward(self, x):\n return x\n\n def backward(self, y):\n return y\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type)\n\n def __str__(self):\n return '(none)'\n\n\nclass Exp(Transform):\n def __init__(self, lower=1e-6):\n self._lower = lower\n\n def tf_forward(self, x):\n return tf.exp(x) + self._lower\n\n def forward(self, x):\n return np.exp(x) + self._lower\n\n def backward(self, y):\n return np.log(y - self._lower)\n\n def tf_log_jacobian(self, x):\n return tf.reduce_sum(x)\n\n def __str__(self):\n return '+ve'\n\n\nclass Log1pe(Transform):\n \"\"\"\n A transform of the form\n\n y = \\log ( 1 + \\exp(x))\n\n x is a free variable, y is always positive.\n\n This function is known as 'softplus' in tensorflow.\n \"\"\"\n\n def __init__(self, lower=1e-6):\n \"\"\"\n lower is a float that defines the minimum value that this transform can\n take, default 1e-6. This helps stability during optimization, because\n aggressive optimizers can take overly-long steps which lead to zero in\n the transformed variable, causing an error.\n \"\"\"\n self._lower = lower\n\n def forward(self, x):\n return np.log(1. + np.exp(x)) + self._lower\n\n def tf_forward(self, x):\n return tf.nn.softplus(x) + self._lower\n\n def tf_log_jacobian(self, x):\n return -tf.reduce_sum(tf.log(1. + tf.exp(-x)))\n\n def backward(self, y):\n return np.log(np.exp(y - self._lower) - np.ones(1, np_float_type))\n\n def __str__(self):\n return '+ve'\n\n\nclass Logistic(Transform):\n def __init__(self, a=0., b=1.):\n Transform.__init__(self)\n assert b > a\n self.a, self.b = float(a), float(b)\n\n def tf_forward(self, x):\n ex = tf.exp(-x)\n return self.a + (self.b - self.a) / (1. + ex)\n\n def forward(self, x):\n ex = np.exp(-x)\n return self.a + (self.b - self.a) / (1. + ex)\n\n def backward(self, y):\n return -np.log((self.b - self.a) / (y - self.a) - 1.)\n\n def tf_log_jacobian(self, x):\n return tf.reduce_sum(x - 2. * tf.log(tf.exp(x) + 1.) + np.log(self.b - self.a))\n\n def __str__(self):\n return '[' + str(self.a) + ', ' + str(self.b) + ']'\n\n\nclass DiagMatrix(Transform):\n \"\"\"\n A transform to represent diagonal matrices.\n\n The output of this transform is a N x dim x dim array of diagonal matrices.\n The contructor argumnet dim specifies the size of the matrixes.\n\n Additionally, to ensure that the matrices are positive definite, the\n diagonal elements are pushed through a 'positive' transform, defaulting to\n log1pe.\n \"\"\"\n def __init__(self, dim=1, positive_transform=Log1pe()):\n self.dim = dim\n self._lower = 1e-6\n self._positive_transform = positive_transform\n\n def forward(self, x):\n # Create diagonal matrix\n x = self._positive_transform.forward(x).reshape((-1, self.dim))\n m = np.zeros((x.shape[0], x.shape[1], x.shape[1]))\n m[(np.s_[:],) + np.diag_indices(x.shape[1])] = x\n return m\n\n def backward(self, y):\n # Return diagonals of matrices\n return self._positive_transform.backward(y.reshape(-1, self.dim, self.dim).diagonal(0, 1, 2).flatten())\n\n def tf_forward(self, x):\n return tf.matrix_diag(tf.reshape(self._positive_transform.tf_forward(x), (-1, self.dim)))\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type) + self._positive_transform.tf_log_jacobian(x)\n\n def __str__(self):\n return 'DiagMatrix'\n\n def free_state_size(self, variable_shape):\n return variable_shape[0] * variable_shape[1]\n\n\nclass LowerTriangular(Transform):\n \"\"\"\n A transform of the form\n\n tri_mat = vec_to_tri(x)\n\n x is a free variable, y is always a list of lower triangular matrices sized\n (N x N x D).\n \"\"\"\n\n def __init__(self, num_matrices=1, squeeze=False):\n \"\"\"\n Create an instance of LowerTriangular transform.\n Args:\n num_matrices: Number of matrices to be stored.\n squeeze: If num_matrices == 1, drop the redundant axis.\n \"\"\"\n self.num_matrices = num_matrices # We need to store this for reconstruction.\n self.squeeze = squeeze\n\n def _validate_vector_length(self, length):\n \"\"\"\n Check whether the vector length is consistent with being a triangular\n matrix and with `self.num_matrices`.\n Args:\n length: Length of the free state vector.\n\n Returns: Length of the vector with the lower triangular elements.\n\n \"\"\"\n L = length / self.num_matrices\n if int(((L * 8) + 1) ** 0.5) ** 2.0 != (L * 8 + 1):\n raise ValueError(\"The free state must be a triangle number.\")\n return L\n\n def forward(self, x):\n \"\"\"\n Transforms from the free state to the variable.\n Args:\n x: Free state vector. Must have length of `self.num_matrices` *\n triangular_number.\n\n Returns:\n Reconstructed variable.\n \"\"\"\n L = self._validate_vector_length(len(x))\n matsize = int((L * 8 + 1) ** 0.5 * 0.5 - 0.5)\n xr = np.reshape(x, (self.num_matrices, -1))\n var = np.zeros((matsize, matsize, self.num_matrices), np_float_type)\n for i in range(self.num_matrices):\n indices = np.tril_indices(matsize, 0)\n var[indices + (np.zeros(len(indices[0])).astype(int) + i,)] = xr[i, :]\n return var.squeeze() if self.squeeze else var\n\n def backward(self, y):\n \"\"\"\n Transforms from the variable to the free state.\n Args:\n y: Variable representation.\n\n Returns:\n Free state.\n \"\"\"\n N = int((y.size / self.num_matrices) ** 0.5)\n y = np.reshape(y, (N, N, self.num_matrices))\n return y[np.tril_indices(len(y), 0)].T.flatten()\n\n def tf_forward(self, x):\n fwd = tf.transpose(tfw.vec_to_tri(tf.reshape(x, (self.num_matrices, -1))), [1, 2, 0])\n return tf.squeeze(fwd) if self.squeeze else fwd\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type)\n\n def free_state_size(self, variable_shape):\n matrix_batch = len(variable_shape) > 2\n if ((not matrix_batch and self.num_matrices != 1) or\n (matrix_batch and variable_shape[2] != self.num_matrices)):\n raise ValueError(\"Number of matrices must be consistent with what was passed to the constructor.\")\n if variable_shape[0] != variable_shape[1]:\n raise ValueError(\"Matrices passed must be square.\")\n N = variable_shape[0]\n return int(0.5 * N * (N + 1)) * (variable_shape[2] if matrix_batch else 1)\n\n def __str__(self):\n return \"LoTri->vec\"\n\n\npositive = Log1pe()\n", "path": "GPflow/transforms.py"}], "after_files": [{"content": "# Copyright 2016 James Hensman, alexggmatthews\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport numpy as np\nimport tensorflow as tf\nfrom . import tf_wraps as tfw\nfrom ._settings import settings\n\nfloat_type = settings.dtypes.float_type\nnp_float_type = np.float32 if float_type is tf.float32 else np.float64\n\n\nclass Transform(object):\n def forward(self, x):\n \"\"\"\n Map from the free-space to the variable space, using numpy\n \"\"\"\n raise NotImplementedError\n\n def backward(self, y):\n \"\"\"\n Map from the variable-space to the free space, using numpy\n \"\"\"\n raise NotImplementedError\n\n def tf_forward(self, x):\n \"\"\"\n Map from the free-space to the variable space, using tensorflow\n \"\"\"\n raise NotImplementedError\n\n def tf_log_jacobian(self, x):\n \"\"\"\n Return the log Jacobian of the tf_forward mapping.\n\n Note that we *could* do this using a tf manipulation of\n self.tf_forward, but tensorflow may have difficulty: it doesn't have a\n Jacaobian at time of writing. We do this in the tests to make sure the\n implementation is correct.\n \"\"\"\n raise NotImplementedError\n\n def free_state_size(self, variable_shape):\n return np.prod(variable_shape)\n\n def __str__(self):\n \"\"\"\n A short string describing the nature of the constraint\n \"\"\"\n raise NotImplementedError\n\n def __getstate__(self):\n return self.__dict__.copy()\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n\nclass Identity(Transform):\n def tf_forward(self, x):\n return tf.identity(x)\n\n def forward(self, x):\n return x\n\n def backward(self, y):\n return y\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type)\n\n def __str__(self):\n return '(none)'\n\n\nclass Exp(Transform):\n def __init__(self, lower=1e-6):\n self._lower = lower\n\n def tf_forward(self, x):\n return tf.exp(x) + self._lower\n\n def forward(self, x):\n return np.exp(x) + self._lower\n\n def backward(self, y):\n return np.log(y - self._lower)\n\n def tf_log_jacobian(self, x):\n return tf.reduce_sum(x)\n\n def __str__(self):\n return '+ve'\n\n\nclass Log1pe(Transform):\n \"\"\"\n A transform of the form\n\n y = \\log ( 1 + \\exp(x))\n\n x is a free variable, y is always positive.\n\n This function is known as 'softplus' in tensorflow.\n \"\"\"\n\n def __init__(self, lower=1e-6):\n \"\"\"\n lower is a float that defines the minimum value that this transform can\n take, default 1e-6. This helps stability during optimization, because\n aggressive optimizers can take overly-long steps which lead to zero in\n the transformed variable, causing an error.\n \"\"\"\n self._lower = lower\n\n def forward(self, x):\n result = np.log(1. + np.exp(x)) + self._lower\n # do not transform large numbers, they overflow and the mapping is exactly identity.\n return np.where(x > 35, x + self._lower, result)\n\n def tf_forward(self, x):\n return tf.nn.softplus(x) + self._lower\n\n def tf_log_jacobian(self, x):\n return -tf.reduce_sum(tf.log(1. + tf.exp(-x)))\n\n def backward(self, y):\n result = np.log(np.exp(y - self._lower) - np.ones(1, np_float_type))\n return np.where(y > 35, y-self._lower, result)\n\n def __str__(self):\n return '+ve'\n\n\nclass Logistic(Transform):\n def __init__(self, a=0., b=1.):\n Transform.__init__(self)\n assert b > a\n self.a, self.b = float(a), float(b)\n\n def tf_forward(self, x):\n ex = tf.exp(-x)\n return self.a + (self.b - self.a) / (1. + ex)\n\n def forward(self, x):\n ex = np.exp(-x)\n return self.a + (self.b - self.a) / (1. + ex)\n\n def backward(self, y):\n return -np.log((self.b - self.a) / (y - self.a) - 1.)\n\n def tf_log_jacobian(self, x):\n return tf.reduce_sum(x - 2. * tf.log(tf.exp(x) + 1.) + np.log(self.b - self.a))\n\n def __str__(self):\n return '[' + str(self.a) + ', ' + str(self.b) + ']'\n\n\nclass DiagMatrix(Transform):\n \"\"\"\n A transform to represent diagonal matrices.\n\n The output of this transform is a N x dim x dim array of diagonal matrices.\n The contructor argumnet dim specifies the size of the matrixes.\n\n Additionally, to ensure that the matrices are positive definite, the\n diagonal elements are pushed through a 'positive' transform, defaulting to\n log1pe.\n \"\"\"\n def __init__(self, dim=1, positive_transform=Log1pe()):\n self.dim = dim\n self._lower = 1e-6\n self._positive_transform = positive_transform\n\n def forward(self, x):\n # Create diagonal matrix\n x = self._positive_transform.forward(x).reshape((-1, self.dim))\n m = np.zeros((x.shape[0], x.shape[1], x.shape[1]))\n m[(np.s_[:],) + np.diag_indices(x.shape[1])] = x\n return m\n\n def backward(self, y):\n # Return diagonals of matrices\n return self._positive_transform.backward(y.reshape(-1, self.dim, self.dim).diagonal(0, 1, 2).flatten())\n\n def tf_forward(self, x):\n return tf.matrix_diag(tf.reshape(self._positive_transform.tf_forward(x), (-1, self.dim)))\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type) + self._positive_transform.tf_log_jacobian(x)\n\n def __str__(self):\n return 'DiagMatrix'\n\n def free_state_size(self, variable_shape):\n return variable_shape[0] * variable_shape[1]\n\n\nclass LowerTriangular(Transform):\n \"\"\"\n A transform of the form\n\n tri_mat = vec_to_tri(x)\n\n x is a free variable, y is always a list of lower triangular matrices sized\n (N x N x D).\n \"\"\"\n\n def __init__(self, num_matrices=1, squeeze=False):\n \"\"\"\n Create an instance of LowerTriangular transform.\n Args:\n num_matrices: Number of matrices to be stored.\n squeeze: If num_matrices == 1, drop the redundant axis.\n \"\"\"\n self.num_matrices = num_matrices # We need to store this for reconstruction.\n self.squeeze = squeeze\n\n def _validate_vector_length(self, length):\n \"\"\"\n Check whether the vector length is consistent with being a triangular\n matrix and with `self.num_matrices`.\n Args:\n length: Length of the free state vector.\n\n Returns: Length of the vector with the lower triangular elements.\n\n \"\"\"\n L = length / self.num_matrices\n if int(((L * 8) + 1) ** 0.5) ** 2.0 != (L * 8 + 1):\n raise ValueError(\"The free state must be a triangle number.\")\n return L\n\n def forward(self, x):\n \"\"\"\n Transforms from the free state to the variable.\n Args:\n x: Free state vector. Must have length of `self.num_matrices` *\n triangular_number.\n\n Returns:\n Reconstructed variable.\n \"\"\"\n L = self._validate_vector_length(len(x))\n matsize = int((L * 8 + 1) ** 0.5 * 0.5 - 0.5)\n xr = np.reshape(x, (self.num_matrices, -1))\n var = np.zeros((matsize, matsize, self.num_matrices), np_float_type)\n for i in range(self.num_matrices):\n indices = np.tril_indices(matsize, 0)\n var[indices + (np.zeros(len(indices[0])).astype(int) + i,)] = xr[i, :]\n return var.squeeze() if self.squeeze else var\n\n def backward(self, y):\n \"\"\"\n Transforms from the variable to the free state.\n Args:\n y: Variable representation.\n\n Returns:\n Free state.\n \"\"\"\n N = int((y.size / self.num_matrices) ** 0.5)\n y = np.reshape(y, (N, N, self.num_matrices))\n return y[np.tril_indices(len(y), 0)].T.flatten()\n\n def tf_forward(self, x):\n fwd = tf.transpose(tfw.vec_to_tri(tf.reshape(x, (self.num_matrices, -1))), [1, 2, 0])\n return tf.squeeze(fwd) if self.squeeze else fwd\n\n def tf_log_jacobian(self, x):\n return tf.zeros((1,), float_type)\n\n def free_state_size(self, variable_shape):\n matrix_batch = len(variable_shape) > 2\n if ((not matrix_batch and self.num_matrices != 1) or\n (matrix_batch and variable_shape[2] != self.num_matrices)):\n raise ValueError(\"Number of matrices must be consistent with what was passed to the constructor.\")\n if variable_shape[0] != variable_shape[1]:\n raise ValueError(\"Matrices passed must be square.\")\n N = variable_shape[0]\n return int(0.5 * N * (N + 1)) * (variable_shape[2] if matrix_batch else 1)\n\n def __str__(self):\n return \"LoTri->vec\"\n\n\npositive = Log1pe()\n", "path": "GPflow/transforms.py"}]}
3,664
277
gh_patches_debug_3867
rasdani/github-patches
git_diff
googleapis__google-cloud-python-5693
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PubSub: add ack_id property accessor to subscriber Message class Version: google-cloud-pubsub 0.35.4 This is a feature request to add a property accessor for ack_id in `google.cloud.pubsub_v1.subscriber.message.Message`. Currently the `_ack_id` field is 'private', and only gets used via the `ack()` method (plus other related methods). We find it useful to log the ack ID so that we can manually ack the ID using the Google Cloud SDK (`gcloud` command line interface). This helps us get out of situations where the message is stuck in a (virtually) infinite retry loop because of some error. We can just access the 'private' field, but it doesn't seem very nice to have to do that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pubsub/google/cloud/pubsub_v1/subscriber/message.py` Content: ``` 1 # Copyright 2017, Google LLC All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 import datetime 18 import json 19 import math 20 import time 21 22 from google.api_core import datetime_helpers 23 from google.cloud.pubsub_v1.subscriber._protocol import requests 24 25 26 _MESSAGE_REPR = """\ 27 Message {{ 28 data: {!r} 29 attributes: {} 30 }}""" 31 32 33 def _indent(lines, prefix=' '): 34 """Indent some text. 35 36 Note that this is present as ``textwrap.indent``, but not in Python 2. 37 38 Args: 39 lines (str): The newline delimited string to be indented. 40 prefix (Optional[str]): The prefix to indent each line with. Default 41 to two spaces. 42 43 Returns: 44 str: The newly indented content. 45 """ 46 indented = [] 47 for line in lines.split('\n'): 48 indented.append(prefix + line) 49 return '\n'.join(indented) 50 51 52 class Message(object): 53 """A representation of a single Pub/Sub message. 54 55 The common way to interact with 56 :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive 57 them in callbacks on subscriptions; most users should never have a need 58 to instantiate them by hand. (The exception to this is if you are 59 implementing a custom subclass to 60 :class:`~.pubsub_v1.subscriber._consumer.Consumer`.) 61 62 Attributes: 63 message_id (str): The message ID. In general, you should not need 64 to use this directly. 65 data (bytes): The data in the message. Note that this will be a 66 :class:`bytes`, not a text string. 67 attributes (.ScalarMapContainer): The attributes sent along with the 68 message. See :attr:`attributes` for more information on this type. 69 publish_time (datetime): The time that this message was originally 70 published. 71 """ 72 73 def __init__(self, message, ack_id, request_queue): 74 """Construct the Message. 75 76 .. note:: 77 78 This class should not be constructed directly; it is the 79 responsibility of :class:`BasePolicy` subclasses to do so. 80 81 Args: 82 message (~.pubsub_v1.types.PubsubMessage): The message received 83 from Pub/Sub. 84 ack_id (str): The ack_id received from Pub/Sub. 85 request_queue (queue.Queue): A queue provided by the policy that 86 can accept requests; the policy is responsible for handling 87 those requests. 88 """ 89 self._message = message 90 self._ack_id = ack_id 91 self._request_queue = request_queue 92 self.message_id = message.message_id 93 94 # The instantiation time is the time that this message 95 # was received. Tracking this provides us a way to be smart about 96 # the default lease deadline. 97 self._received_timestamp = time.time() 98 99 # The policy should lease this message, telling PubSub that it has 100 # it until it is acked or otherwise dropped. 101 self.lease() 102 103 def __repr__(self): 104 # Get an abbreviated version of the data. 105 abbv_data = self._message.data 106 if len(abbv_data) > 50: 107 abbv_data = abbv_data[:50] + b'...' 108 109 pretty_attrs = json.dumps( 110 dict(self.attributes), 111 indent=2, 112 separators=(',', ': '), 113 sort_keys=True, 114 ) 115 pretty_attrs = _indent(pretty_attrs) 116 # We don't actually want the first line indented. 117 pretty_attrs = pretty_attrs.lstrip() 118 return _MESSAGE_REPR.format(abbv_data, pretty_attrs) 119 120 @property 121 def attributes(self): 122 """Return the attributes of the underlying Pub/Sub Message. 123 124 .. warning:: 125 126 A ``ScalarMapContainer`` behaves slightly differently than a 127 ``dict``. For a Pub / Sub message this is a ``string->string`` map. 128 When trying to access a value via ``map['key']``, if the key is 129 not in the map, then the default value for the string type will 130 be returned, which is an empty string. It may be more intuitive 131 to just cast the map to a ``dict`` or to one use ``map.get``. 132 133 Returns: 134 .ScalarMapContainer: The message's attributes. This is a 135 ``dict``-like object provided by ``google.protobuf``. 136 """ 137 return self._message.attributes 138 139 @property 140 def data(self): 141 """Return the data for the underlying Pub/Sub Message. 142 143 Returns: 144 bytes: The message data. This is always a bytestring; if you 145 want a text string, call :meth:`bytes.decode`. 146 """ 147 return self._message.data 148 149 @property 150 def publish_time(self): 151 """Return the time that the message was originally published. 152 153 Returns: 154 datetime: The date and time that the message was published. 155 """ 156 timestamp = self._message.publish_time 157 delta = datetime.timedelta( 158 seconds=timestamp.seconds, 159 microseconds=timestamp.nanos // 1000) 160 return datetime_helpers._UTC_EPOCH + delta 161 162 @property 163 def size(self): 164 """Return the size of the underlying message, in bytes.""" 165 return self._message.ByteSize() 166 167 def ack(self): 168 """Acknowledge the given message. 169 170 Acknowledging a message in Pub/Sub means that you are done 171 with it, and it will not be delivered to this subscription again. 172 You should avoid acknowledging messages until you have 173 *finished* processing them, so that in the event of a failure, 174 you receive the message again. 175 176 .. warning:: 177 Acks in Pub/Sub are best effort. You should always 178 ensure that your processing code is idempotent, as you may 179 receive any given message more than once. 180 """ 181 time_to_ack = math.ceil(time.time() - self._received_timestamp) 182 self._request_queue.put( 183 requests.AckRequest( 184 ack_id=self._ack_id, 185 byte_size=self.size, 186 time_to_ack=time_to_ack 187 ) 188 ) 189 190 def drop(self): 191 """Release the message from lease management. 192 193 This informs the policy to no longer hold on to the lease for this 194 message. Pub/Sub will re-deliver the message if it is not acknowledged 195 before the existing lease expires. 196 197 .. warning:: 198 For most use cases, the only reason to drop a message from 199 lease management is on :meth:`ack` or :meth:`nack`; these methods 200 both call this one. You probably do not want to call this method 201 directly. 202 """ 203 self._request_queue.put( 204 requests.DropRequest( 205 ack_id=self._ack_id, 206 byte_size=self.size 207 ) 208 ) 209 210 def lease(self): 211 """Inform the policy to lease this message continually. 212 213 .. note:: 214 This method is called by the constructor, and you should never 215 need to call it manually. 216 """ 217 self._request_queue.put( 218 requests.LeaseRequest( 219 ack_id=self._ack_id, 220 byte_size=self.size 221 ) 222 ) 223 224 def modify_ack_deadline(self, seconds): 225 """Resets the deadline for acknowledgement. 226 227 New deadline will be the given value of seconds from now. 228 229 The default implementation handles this for you; you should not need 230 to manually deal with setting ack deadlines. The exception case is 231 if you are implementing your own custom subclass of 232 :class:`~.pubsub_v1.subcriber._consumer.Consumer`. 233 234 Args: 235 seconds (int): The number of seconds to set the lease deadline 236 to. This should be between 0 and 600. Due to network latency, 237 values below 10 are advised against. 238 """ 239 self._request_queue.put( 240 requests.ModAckRequest( 241 ack_id=self._ack_id, 242 seconds=seconds 243 ) 244 ) 245 246 def nack(self): 247 """Decline to acknowldge the given message. 248 249 This will cause the message to be re-delivered to the subscription. 250 """ 251 self._request_queue.put( 252 requests.NackRequest( 253 ack_id=self._ack_id, 254 byte_size=self.size 255 ) 256 ) 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -164,6 +164,11 @@ """Return the size of the underlying message, in bytes.""" return self._message.ByteSize() + @property + def ack_id(self): + """str: the ID used to ack the message.""" + return self._ack_id + def ack(self): """Acknowledge the given message.
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n--- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n+++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n@@ -164,6 +164,11 @@\n \"\"\"Return the size of the underlying message, in bytes.\"\"\"\n return self._message.ByteSize()\n \n+ @property\n+ def ack_id(self):\n+ \"\"\"str: the ID used to ack the message.\"\"\"\n+ return self._ack_id\n+\n def ack(self):\n \"\"\"Acknowledge the given message.\n", "issue": "PubSub: add ack_id property accessor to subscriber Message class\nVersion: google-cloud-pubsub 0.35.4\r\n\r\nThis is a feature request to add a property accessor for ack_id in `google.cloud.pubsub_v1.subscriber.message.Message`.\r\n\r\nCurrently the `_ack_id` field is 'private', and only gets used via the `ack()` method (plus other related methods).\r\n\r\nWe find it useful to log the ack ID so that we can manually ack the ID using the Google Cloud SDK (`gcloud` command line interface). This helps us get out of situations where the message is stuck in a (virtually) infinite retry loop because of some error.\r\n\r\nWe can just access the 'private' field, but it doesn't seem very nice to have to do that.\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport datetime\nimport json\nimport math\nimport time\n\nfrom google.api_core import datetime_helpers\nfrom google.cloud.pubsub_v1.subscriber._protocol import requests\n\n\n_MESSAGE_REPR = \"\"\"\\\nMessage {{\n data: {!r}\n attributes: {}\n}}\"\"\"\n\n\ndef _indent(lines, prefix=' '):\n \"\"\"Indent some text.\n\n Note that this is present as ``textwrap.indent``, but not in Python 2.\n\n Args:\n lines (str): The newline delimited string to be indented.\n prefix (Optional[str]): The prefix to indent each line with. Default\n to two spaces.\n\n Returns:\n str: The newly indented content.\n \"\"\"\n indented = []\n for line in lines.split('\\n'):\n indented.append(prefix + line)\n return '\\n'.join(indented)\n\n\nclass Message(object):\n \"\"\"A representation of a single Pub/Sub message.\n\n The common way to interact with\n :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive\n them in callbacks on subscriptions; most users should never have a need\n to instantiate them by hand. (The exception to this is if you are\n implementing a custom subclass to\n :class:`~.pubsub_v1.subscriber._consumer.Consumer`.)\n\n Attributes:\n message_id (str): The message ID. In general, you should not need\n to use this directly.\n data (bytes): The data in the message. Note that this will be a\n :class:`bytes`, not a text string.\n attributes (.ScalarMapContainer): The attributes sent along with the\n message. See :attr:`attributes` for more information on this type.\n publish_time (datetime): The time that this message was originally\n published.\n \"\"\"\n\n def __init__(self, message, ack_id, request_queue):\n \"\"\"Construct the Message.\n\n .. note::\n\n This class should not be constructed directly; it is the\n responsibility of :class:`BasePolicy` subclasses to do so.\n\n Args:\n message (~.pubsub_v1.types.PubsubMessage): The message received\n from Pub/Sub.\n ack_id (str): The ack_id received from Pub/Sub.\n request_queue (queue.Queue): A queue provided by the policy that\n can accept requests; the policy is responsible for handling\n those requests.\n \"\"\"\n self._message = message\n self._ack_id = ack_id\n self._request_queue = request_queue\n self.message_id = message.message_id\n\n # The instantiation time is the time that this message\n # was received. Tracking this provides us a way to be smart about\n # the default lease deadline.\n self._received_timestamp = time.time()\n\n # The policy should lease this message, telling PubSub that it has\n # it until it is acked or otherwise dropped.\n self.lease()\n\n def __repr__(self):\n # Get an abbreviated version of the data.\n abbv_data = self._message.data\n if len(abbv_data) > 50:\n abbv_data = abbv_data[:50] + b'...'\n\n pretty_attrs = json.dumps(\n dict(self.attributes),\n indent=2,\n separators=(',', ': '),\n sort_keys=True,\n )\n pretty_attrs = _indent(pretty_attrs)\n # We don't actually want the first line indented.\n pretty_attrs = pretty_attrs.lstrip()\n return _MESSAGE_REPR.format(abbv_data, pretty_attrs)\n\n @property\n def attributes(self):\n \"\"\"Return the attributes of the underlying Pub/Sub Message.\n\n .. warning::\n\n A ``ScalarMapContainer`` behaves slightly differently than a\n ``dict``. For a Pub / Sub message this is a ``string->string`` map.\n When trying to access a value via ``map['key']``, if the key is\n not in the map, then the default value for the string type will\n be returned, which is an empty string. It may be more intuitive\n to just cast the map to a ``dict`` or to one use ``map.get``.\n\n Returns:\n .ScalarMapContainer: The message's attributes. This is a\n ``dict``-like object provided by ``google.protobuf``.\n \"\"\"\n return self._message.attributes\n\n @property\n def data(self):\n \"\"\"Return the data for the underlying Pub/Sub Message.\n\n Returns:\n bytes: The message data. This is always a bytestring; if you\n want a text string, call :meth:`bytes.decode`.\n \"\"\"\n return self._message.data\n\n @property\n def publish_time(self):\n \"\"\"Return the time that the message was originally published.\n\n Returns:\n datetime: The date and time that the message was published.\n \"\"\"\n timestamp = self._message.publish_time\n delta = datetime.timedelta(\n seconds=timestamp.seconds,\n microseconds=timestamp.nanos // 1000)\n return datetime_helpers._UTC_EPOCH + delta\n\n @property\n def size(self):\n \"\"\"Return the size of the underlying message, in bytes.\"\"\"\n return self._message.ByteSize()\n\n def ack(self):\n \"\"\"Acknowledge the given message.\n\n Acknowledging a message in Pub/Sub means that you are done\n with it, and it will not be delivered to this subscription again.\n You should avoid acknowledging messages until you have\n *finished* processing them, so that in the event of a failure,\n you receive the message again.\n\n .. warning::\n Acks in Pub/Sub are best effort. You should always\n ensure that your processing code is idempotent, as you may\n receive any given message more than once.\n \"\"\"\n time_to_ack = math.ceil(time.time() - self._received_timestamp)\n self._request_queue.put(\n requests.AckRequest(\n ack_id=self._ack_id,\n byte_size=self.size,\n time_to_ack=time_to_ack\n )\n )\n\n def drop(self):\n \"\"\"Release the message from lease management.\n\n This informs the policy to no longer hold on to the lease for this\n message. Pub/Sub will re-deliver the message if it is not acknowledged\n before the existing lease expires.\n\n .. warning::\n For most use cases, the only reason to drop a message from\n lease management is on :meth:`ack` or :meth:`nack`; these methods\n both call this one. You probably do not want to call this method\n directly.\n \"\"\"\n self._request_queue.put(\n requests.DropRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def lease(self):\n \"\"\"Inform the policy to lease this message continually.\n\n .. note::\n This method is called by the constructor, and you should never\n need to call it manually.\n \"\"\"\n self._request_queue.put(\n requests.LeaseRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def modify_ack_deadline(self, seconds):\n \"\"\"Resets the deadline for acknowledgement.\n\n New deadline will be the given value of seconds from now.\n\n The default implementation handles this for you; you should not need\n to manually deal with setting ack deadlines. The exception case is\n if you are implementing your own custom subclass of\n :class:`~.pubsub_v1.subcriber._consumer.Consumer`.\n\n Args:\n seconds (int): The number of seconds to set the lease deadline\n to. This should be between 0 and 600. Due to network latency,\n values below 10 are advised against.\n \"\"\"\n self._request_queue.put(\n requests.ModAckRequest(\n ack_id=self._ack_id,\n seconds=seconds\n )\n )\n\n def nack(self):\n \"\"\"Decline to acknowldge the given message.\n\n This will cause the message to be re-delivered to the subscription.\n \"\"\"\n self._request_queue.put(\n requests.NackRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/message.py"}], "after_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport datetime\nimport json\nimport math\nimport time\n\nfrom google.api_core import datetime_helpers\nfrom google.cloud.pubsub_v1.subscriber._protocol import requests\n\n\n_MESSAGE_REPR = \"\"\"\\\nMessage {{\n data: {!r}\n attributes: {}\n}}\"\"\"\n\n\ndef _indent(lines, prefix=' '):\n \"\"\"Indent some text.\n\n Note that this is present as ``textwrap.indent``, but not in Python 2.\n\n Args:\n lines (str): The newline delimited string to be indented.\n prefix (Optional[str]): The prefix to indent each line with. Default\n to two spaces.\n\n Returns:\n str: The newly indented content.\n \"\"\"\n indented = []\n for line in lines.split('\\n'):\n indented.append(prefix + line)\n return '\\n'.join(indented)\n\n\nclass Message(object):\n \"\"\"A representation of a single Pub/Sub message.\n\n The common way to interact with\n :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive\n them in callbacks on subscriptions; most users should never have a need\n to instantiate them by hand. (The exception to this is if you are\n implementing a custom subclass to\n :class:`~.pubsub_v1.subscriber._consumer.Consumer`.)\n\n Attributes:\n message_id (str): The message ID. In general, you should not need\n to use this directly.\n data (bytes): The data in the message. Note that this will be a\n :class:`bytes`, not a text string.\n attributes (.ScalarMapContainer): The attributes sent along with the\n message. See :attr:`attributes` for more information on this type.\n publish_time (datetime): The time that this message was originally\n published.\n \"\"\"\n\n def __init__(self, message, ack_id, request_queue):\n \"\"\"Construct the Message.\n\n .. note::\n\n This class should not be constructed directly; it is the\n responsibility of :class:`BasePolicy` subclasses to do so.\n\n Args:\n message (~.pubsub_v1.types.PubsubMessage): The message received\n from Pub/Sub.\n ack_id (str): The ack_id received from Pub/Sub.\n request_queue (queue.Queue): A queue provided by the policy that\n can accept requests; the policy is responsible for handling\n those requests.\n \"\"\"\n self._message = message\n self._ack_id = ack_id\n self._request_queue = request_queue\n self.message_id = message.message_id\n\n # The instantiation time is the time that this message\n # was received. Tracking this provides us a way to be smart about\n # the default lease deadline.\n self._received_timestamp = time.time()\n\n # The policy should lease this message, telling PubSub that it has\n # it until it is acked or otherwise dropped.\n self.lease()\n\n def __repr__(self):\n # Get an abbreviated version of the data.\n abbv_data = self._message.data\n if len(abbv_data) > 50:\n abbv_data = abbv_data[:50] + b'...'\n\n pretty_attrs = json.dumps(\n dict(self.attributes),\n indent=2,\n separators=(',', ': '),\n sort_keys=True,\n )\n pretty_attrs = _indent(pretty_attrs)\n # We don't actually want the first line indented.\n pretty_attrs = pretty_attrs.lstrip()\n return _MESSAGE_REPR.format(abbv_data, pretty_attrs)\n\n @property\n def attributes(self):\n \"\"\"Return the attributes of the underlying Pub/Sub Message.\n\n .. warning::\n\n A ``ScalarMapContainer`` behaves slightly differently than a\n ``dict``. For a Pub / Sub message this is a ``string->string`` map.\n When trying to access a value via ``map['key']``, if the key is\n not in the map, then the default value for the string type will\n be returned, which is an empty string. It may be more intuitive\n to just cast the map to a ``dict`` or to one use ``map.get``.\n\n Returns:\n .ScalarMapContainer: The message's attributes. This is a\n ``dict``-like object provided by ``google.protobuf``.\n \"\"\"\n return self._message.attributes\n\n @property\n def data(self):\n \"\"\"Return the data for the underlying Pub/Sub Message.\n\n Returns:\n bytes: The message data. This is always a bytestring; if you\n want a text string, call :meth:`bytes.decode`.\n \"\"\"\n return self._message.data\n\n @property\n def publish_time(self):\n \"\"\"Return the time that the message was originally published.\n\n Returns:\n datetime: The date and time that the message was published.\n \"\"\"\n timestamp = self._message.publish_time\n delta = datetime.timedelta(\n seconds=timestamp.seconds,\n microseconds=timestamp.nanos // 1000)\n return datetime_helpers._UTC_EPOCH + delta\n\n @property\n def size(self):\n \"\"\"Return the size of the underlying message, in bytes.\"\"\"\n return self._message.ByteSize()\n\n @property\n def ack_id(self):\n \"\"\"str: the ID used to ack the message.\"\"\"\n return self._ack_id\n\n def ack(self):\n \"\"\"Acknowledge the given message.\n\n Acknowledging a message in Pub/Sub means that you are done\n with it, and it will not be delivered to this subscription again.\n You should avoid acknowledging messages until you have\n *finished* processing them, so that in the event of a failure,\n you receive the message again.\n\n .. warning::\n Acks in Pub/Sub are best effort. You should always\n ensure that your processing code is idempotent, as you may\n receive any given message more than once.\n \"\"\"\n time_to_ack = math.ceil(time.time() - self._received_timestamp)\n self._request_queue.put(\n requests.AckRequest(\n ack_id=self._ack_id,\n byte_size=self.size,\n time_to_ack=time_to_ack\n )\n )\n\n def drop(self):\n \"\"\"Release the message from lease management.\n\n This informs the policy to no longer hold on to the lease for this\n message. Pub/Sub will re-deliver the message if it is not acknowledged\n before the existing lease expires.\n\n .. warning::\n For most use cases, the only reason to drop a message from\n lease management is on :meth:`ack` or :meth:`nack`; these methods\n both call this one. You probably do not want to call this method\n directly.\n \"\"\"\n self._request_queue.put(\n requests.DropRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def lease(self):\n \"\"\"Inform the policy to lease this message continually.\n\n .. note::\n This method is called by the constructor, and you should never\n need to call it manually.\n \"\"\"\n self._request_queue.put(\n requests.LeaseRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def modify_ack_deadline(self, seconds):\n \"\"\"Resets the deadline for acknowledgement.\n\n New deadline will be the given value of seconds from now.\n\n The default implementation handles this for you; you should not need\n to manually deal with setting ack deadlines. The exception case is\n if you are implementing your own custom subclass of\n :class:`~.pubsub_v1.subcriber._consumer.Consumer`.\n\n Args:\n seconds (int): The number of seconds to set the lease deadline\n to. This should be between 0 and 600. Due to network latency,\n values below 10 are advised against.\n \"\"\"\n self._request_queue.put(\n requests.ModAckRequest(\n ack_id=self._ack_id,\n seconds=seconds\n )\n )\n\n def nack(self):\n \"\"\"Decline to acknowldge the given message.\n\n This will cause the message to be re-delivered to the subscription.\n \"\"\"\n self._request_queue.put(\n requests.NackRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/message.py"}]}
2,998
144
gh_patches_debug_18554
rasdani/github-patches
git_diff
praw-dev__praw-846
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- UnicodeEncodeError is raised if reddit returns localized error message ## Issue Description Context: [[PRAW] UnicodeEncodeError when submitting non-unicode text : redditdev](https://www.reddit.com/r/redditdev/comments/6xf600/praw_unicodeencodeerror_when_submitting/) Reddit may return localized error messages depends on the user's preference settings. Since localized error messages may contain non-ascii characters (and underlying requests library converts the errror message to unicode type), running this code in Python2 may raise UnicodeEncodeError: https://github.com/praw-dev/praw/blob/efbe90f8c01a8afcda1fa09a59d1d89ed0da0f6b/praw/exceptions.py#L25 Here is an example of the localized message: ``` File "/usr/local/lib/python2.7/site-packages/praw/exceptions.py", line 25, in __init__ error_str = '{}: \'{}\''.format(error_type, message) UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-10: ordinal not in range(128) Uncaught exception. Entering post mortem debugging Running 'cont' or 'step' will restart the program > /usr/local/lib/python2.7/site-packages/praw/exceptions.py(25)__init__() -> error_str = '{}: \'{}\''.format(error_type, message) (Pdb) p error_type u'RATELIMIT' (Pdb) print message 実行回数が多すぎます。9分経ってから再試行してください。 ``` I think this issue is only affect to Python2 users because Python3's str type is unicode string. ## System Information PRAW Version: 5.0.0 Python Version: Python 2.7.13 Operating System: OS X El Capitan 10.11.6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `praw/exceptions.py` Content: ``` 1 """PRAW exception classes. 2 3 Includes two main exceptions: :class:`.APIException` for when something goes 4 wrong on the server side, and :class:`.ClientException` when something goes 5 wrong on the client side. Both of these classes extend :class:`.PRAWException`. 6 7 """ 8 9 10 class PRAWException(Exception): 11 """The base PRAW Exception that all other exception classes extend.""" 12 13 14 class APIException(PRAWException): 15 """Indicate exception that involve responses from Reddit's API.""" 16 17 def __init__(self, error_type, message, field): 18 """Initialize an instance of APIException. 19 20 :param error_type: The error type set on Reddit's end. 21 :param message: The associated message for the error. 22 :param field: The input field associated with the error if available. 23 24 """ 25 error_str = '{}: \'{}\''.format(error_type, message) 26 if field: 27 error_str += ' on field \'{}\''.format(field) 28 super(APIException, self).__init__(error_str) 29 self.error_type = error_type 30 self.message = message 31 self.field = field 32 33 34 class ClientException(PRAWException): 35 """Indicate exceptions that don't involve interaction with Reddit's API.""" 36 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/praw/exceptions.py b/praw/exceptions.py --- a/praw/exceptions.py +++ b/praw/exceptions.py @@ -21,10 +21,17 @@ :param message: The associated message for the error. :param field: The input field associated with the error if available. + .. note: Calling `str()` on the instance returns `unicode_escape`d + ASCII string because the message may be localized and may contain + UNICODE characters. If you want a non-escaped message, access + the `message` atribute on the instance. + """ - error_str = '{}: \'{}\''.format(error_type, message) + error_str = u'{}: \'{}\''.format(error_type, message) if field: - error_str += ' on field \'{}\''.format(field) + error_str += u' on field \'{}\''.format(field) + error_str = error_str.encode('unicode_escape').decode('ascii') + super(APIException, self).__init__(error_str) self.error_type = error_type self.message = message
{"golden_diff": "diff --git a/praw/exceptions.py b/praw/exceptions.py\n--- a/praw/exceptions.py\n+++ b/praw/exceptions.py\n@@ -21,10 +21,17 @@\n :param message: The associated message for the error.\n :param field: The input field associated with the error if available.\n \n+ .. note: Calling `str()` on the instance returns `unicode_escape`d\n+ ASCII string because the message may be localized and may contain\n+ UNICODE characters. If you want a non-escaped message, access\n+ the `message` atribute on the instance.\n+\n \"\"\"\n- error_str = '{}: \\'{}\\''.format(error_type, message)\n+ error_str = u'{}: \\'{}\\''.format(error_type, message)\n if field:\n- error_str += ' on field \\'{}\\''.format(field)\n+ error_str += u' on field \\'{}\\''.format(field)\n+ error_str = error_str.encode('unicode_escape').decode('ascii')\n+\n super(APIException, self).__init__(error_str)\n self.error_type = error_type\n self.message = message\n", "issue": "UnicodeEncodeError is raised if reddit returns localized error message\n## Issue Description\r\n\r\nContext: [[PRAW] UnicodeEncodeError when submitting non-unicode text : redditdev](https://www.reddit.com/r/redditdev/comments/6xf600/praw_unicodeencodeerror_when_submitting/)\r\n\r\nReddit may return localized error messages depends on the user's preference settings. Since\r\nlocalized error messages may contain non-ascii characters (and underlying requests library\r\nconverts the errror message to unicode type), running this code in Python2 may raise UnicodeEncodeError:\r\n\r\nhttps://github.com/praw-dev/praw/blob/efbe90f8c01a8afcda1fa09a59d1d89ed0da0f6b/praw/exceptions.py#L25\r\n\r\nHere is an example of the localized message:\r\n\r\n```\r\n File \"/usr/local/lib/python2.7/site-packages/praw/exceptions.py\", line 25, in __init__\r\n error_str = '{}: \\'{}\\''.format(error_type, message)\r\nUnicodeEncodeError: 'ascii' codec can't encode characters in position 0-10: ordinal not in range(128)\r\nUncaught exception. Entering post mortem debugging\r\nRunning 'cont' or 'step' will restart the program\r\n> /usr/local/lib/python2.7/site-packages/praw/exceptions.py(25)__init__()\r\n-> error_str = '{}: \\'{}\\''.format(error_type, message)\r\n(Pdb) p error_type\r\nu'RATELIMIT'\r\n(Pdb) print message\r\n\u5b9f\u884c\u56de\u6570\u304c\u591a\u3059\u304e\u307e\u3059\u30029\u5206\u7d4c\u3063\u3066\u304b\u3089\u518d\u8a66\u884c\u3057\u3066\u304f\u3060\u3055\u3044\u3002\r\n```\r\n\r\nI think this issue is only affect to Python2 users because Python3's str type is unicode string. \r\n\r\n## System Information\r\n\r\n PRAW Version: 5.0.0\r\n Python Version: Python 2.7.13\r\n Operating System: OS X El Capitan 10.11.6\n", "before_files": [{"content": "\"\"\"PRAW exception classes.\n\nIncludes two main exceptions: :class:`.APIException` for when something goes\nwrong on the server side, and :class:`.ClientException` when something goes\nwrong on the client side. Both of these classes extend :class:`.PRAWException`.\n\n\"\"\"\n\n\nclass PRAWException(Exception):\n \"\"\"The base PRAW Exception that all other exception classes extend.\"\"\"\n\n\nclass APIException(PRAWException):\n \"\"\"Indicate exception that involve responses from Reddit's API.\"\"\"\n\n def __init__(self, error_type, message, field):\n \"\"\"Initialize an instance of APIException.\n\n :param error_type: The error type set on Reddit's end.\n :param message: The associated message for the error.\n :param field: The input field associated with the error if available.\n\n \"\"\"\n error_str = '{}: \\'{}\\''.format(error_type, message)\n if field:\n error_str += ' on field \\'{}\\''.format(field)\n super(APIException, self).__init__(error_str)\n self.error_type = error_type\n self.message = message\n self.field = field\n\n\nclass ClientException(PRAWException):\n \"\"\"Indicate exceptions that don't involve interaction with Reddit's API.\"\"\"\n", "path": "praw/exceptions.py"}], "after_files": [{"content": "\"\"\"PRAW exception classes.\n\nIncludes two main exceptions: :class:`.APIException` for when something goes\nwrong on the server side, and :class:`.ClientException` when something goes\nwrong on the client side. Both of these classes extend :class:`.PRAWException`.\n\n\"\"\"\n\n\nclass PRAWException(Exception):\n \"\"\"The base PRAW Exception that all other exception classes extend.\"\"\"\n\n\nclass APIException(PRAWException):\n \"\"\"Indicate exception that involve responses from Reddit's API.\"\"\"\n\n def __init__(self, error_type, message, field):\n \"\"\"Initialize an instance of APIException.\n\n :param error_type: The error type set on Reddit's end.\n :param message: The associated message for the error.\n :param field: The input field associated with the error if available.\n\n .. note: Calling `str()` on the instance returns `unicode_escape`d\n ASCII string because the message may be localized and may contain\n UNICODE characters. If you want a non-escaped message, access\n the `message` atribute on the instance.\n\n \"\"\"\n error_str = u'{}: \\'{}\\''.format(error_type, message)\n if field:\n error_str += u' on field \\'{}\\''.format(field)\n error_str = error_str.encode('unicode_escape').decode('ascii')\n\n super(APIException, self).__init__(error_str)\n self.error_type = error_type\n self.message = message\n self.field = field\n\n\nclass ClientException(PRAWException):\n \"\"\"Indicate exceptions that don't involve interaction with Reddit's API.\"\"\"\n", "path": "praw/exceptions.py"}]}
1,011
252
gh_patches_debug_1683
rasdani/github-patches
git_diff
RedHatInsights__insights-core-2085
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dmesg combiner always succeeds The [Dmesg combiner has only optional dependencies](https://github.com/RedHatInsights/insights-core/blob/master/insights/combiners/dmesg.py#L51), which means it always succeeds. This is an anti-pattern. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/combiners/dmesg.py` Content: ``` 1 """ 2 Dmesg 3 ===== 4 5 Combiner for Dmesg information. It uses the results of the following parsers (if they are present): 6 :class:`insights.parsers.dmesg.DmesgLineList`, 7 :class:`insights.parsers.dmesg_log.DmesgLog` 8 9 Typical output of the ``/var/log/dmesg`` file is:: 10 11 [ 0.000000] Initializing cgroup subsys cpu 12 [ 0.000000] Linux version 3.10.0-862.el7.x86_64 (mockbuild@x86-034.build.eng.bos.redhat.com) \ 13 (gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018 14 [ 2.090905] SELinux: Completing initialization. 15 [ 2.090907] SELinux: Setting up existing superblocks. 16 [ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms. 17 [ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team 18 [ 2.117429] systemd[1]: Inserted module 'ip_tables' 19 [ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1 20 [ 2.716874] cryptd: max_cpu_qlen set to 100 21 [ 2.804152] AES CTR mode by8 optimization enabled 22 23 Typical output of the ``dmesg`` command is:: 24 25 [ 2.939498] [TTM] Initializing pool allocator 26 [ 2.939502] [TTM] Initializing DMA pool allocator 27 [ 2.940800] [drm] fb mappable at 0xFC000000 28 [ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device 29 [ 2.957375] Console: switching to colour frame buffer device 128x48 30 [ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device 31 [ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0 32 [ 3.062459] XFS (vda1): Ending clean mount 33 [ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team 34 [ 5.102434] Ebtables v2.0 registered 35 36 37 Examples: 38 >>> dmesg.dmesg_cmd_available 39 True 40 >>> dmesg.dmesg_log_available 41 True 42 >>> dmesg.dmesg_log_wrapped 43 False 44 """ 45 46 from insights.core.plugins import combiner 47 from insights.parsers.dmesg import DmesgLineList 48 from insights.parsers.dmesg_log import DmesgLog 49 50 51 @combiner(optional=[DmesgLineList, DmesgLog]) 52 class Dmesg(object): 53 """ 54 Combiner for ``dmesg`` command and ``/var/log/dmesg`` file. 55 """ 56 57 def __init__(self, dmesg_cmd, dmesg_log): 58 if dmesg_cmd is not None: 59 self.dmesg_cmd_available = True 60 self.dmesg_cmd = dmesg_cmd 61 self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False 62 else: 63 self.dmesg_cmd_available = False 64 65 if dmesg_log is not None: 66 self.dmesg_log_available = True 67 self.dmesg_log = dmesg_log 68 self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False 69 else: 70 self.dmesg_log_available = False 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py --- a/insights/combiners/dmesg.py +++ b/insights/combiners/dmesg.py @@ -48,7 +48,7 @@ from insights.parsers.dmesg_log import DmesgLog -@combiner(optional=[DmesgLineList, DmesgLog]) +@combiner([DmesgLineList, DmesgLog]) class Dmesg(object): """ Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.
{"golden_diff": "diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py\n--- a/insights/combiners/dmesg.py\n+++ b/insights/combiners/dmesg.py\n@@ -48,7 +48,7 @@\n from insights.parsers.dmesg_log import DmesgLog\n \n \n-@combiner(optional=[DmesgLineList, DmesgLog])\n+@combiner([DmesgLineList, DmesgLog])\n class Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n", "issue": "Dmesg combiner always succeeds\nThe [Dmesg combiner has only optional dependencies](https://github.com/RedHatInsights/insights-core/blob/master/insights/combiners/dmesg.py#L51), which means it always succeeds. This is an anti-pattern.\n", "before_files": [{"content": "\"\"\"\nDmesg\n=====\n\nCombiner for Dmesg information. It uses the results of the following parsers (if they are present):\n:class:`insights.parsers.dmesg.DmesgLineList`,\n:class:`insights.parsers.dmesg_log.DmesgLog`\n\nTypical output of the ``/var/log/dmesg`` file is::\n\n[ 0.000000] Initializing cgroup subsys cpu\n[ 0.000000] Linux version 3.10.0-862.el7.x86_64 (mockbuild@x86-034.build.eng.bos.redhat.com) \\\n(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018\n[ 2.090905] SELinux: Completing initialization.\n[ 2.090907] SELinux: Setting up existing superblocks.\n[ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.\n[ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team\n[ 2.117429] systemd[1]: Inserted module 'ip_tables'\n[ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1\n[ 2.716874] cryptd: max_cpu_qlen set to 100\n[ 2.804152] AES CTR mode by8 optimization enabled\n\nTypical output of the ``dmesg`` command is::\n\n[ 2.939498] [TTM] Initializing pool allocator\n[ 2.939502] [TTM] Initializing DMA pool allocator\n[ 2.940800] [drm] fb mappable at 0xFC000000\n[ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device\n[ 2.957375] Console: switching to colour frame buffer device 128x48\n[ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device\n[ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0\n[ 3.062459] XFS (vda1): Ending clean mount\n[ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team\n[ 5.102434] Ebtables v2.0 registered\n\n\nExamples:\n >>> dmesg.dmesg_cmd_available\n True\n >>> dmesg.dmesg_log_available\n True\n >>> dmesg.dmesg_log_wrapped\n False\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.dmesg import DmesgLineList\nfrom insights.parsers.dmesg_log import DmesgLog\n\n\n@combiner(optional=[DmesgLineList, DmesgLog])\nclass Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n \"\"\"\n\n def __init__(self, dmesg_cmd, dmesg_log):\n if dmesg_cmd is not None:\n self.dmesg_cmd_available = True\n self.dmesg_cmd = dmesg_cmd\n self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False\n else:\n self.dmesg_cmd_available = False\n\n if dmesg_log is not None:\n self.dmesg_log_available = True\n self.dmesg_log = dmesg_log\n self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False\n else:\n self.dmesg_log_available = False\n", "path": "insights/combiners/dmesg.py"}], "after_files": [{"content": "\"\"\"\nDmesg\n=====\n\nCombiner for Dmesg information. It uses the results of the following parsers (if they are present):\n:class:`insights.parsers.dmesg.DmesgLineList`,\n:class:`insights.parsers.dmesg_log.DmesgLog`\n\nTypical output of the ``/var/log/dmesg`` file is::\n\n[ 0.000000] Initializing cgroup subsys cpu\n[ 0.000000] Linux version 3.10.0-862.el7.x86_64 (mockbuild@x86-034.build.eng.bos.redhat.com) \\\n(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018\n[ 2.090905] SELinux: Completing initialization.\n[ 2.090907] SELinux: Setting up existing superblocks.\n[ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.\n[ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team\n[ 2.117429] systemd[1]: Inserted module 'ip_tables'\n[ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1\n[ 2.716874] cryptd: max_cpu_qlen set to 100\n[ 2.804152] AES CTR mode by8 optimization enabled\n\nTypical output of the ``dmesg`` command is::\n\n[ 2.939498] [TTM] Initializing pool allocator\n[ 2.939502] [TTM] Initializing DMA pool allocator\n[ 2.940800] [drm] fb mappable at 0xFC000000\n[ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device\n[ 2.957375] Console: switching to colour frame buffer device 128x48\n[ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device\n[ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0\n[ 3.062459] XFS (vda1): Ending clean mount\n[ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team\n[ 5.102434] Ebtables v2.0 registered\n\n\nExamples:\n >>> dmesg.dmesg_cmd_available\n True\n >>> dmesg.dmesg_log_available\n True\n >>> dmesg.dmesg_log_wrapped\n False\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.dmesg import DmesgLineList\nfrom insights.parsers.dmesg_log import DmesgLog\n\n\n@combiner([DmesgLineList, DmesgLog])\nclass Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n \"\"\"\n\n def __init__(self, dmesg_cmd, dmesg_log):\n if dmesg_cmd is not None:\n self.dmesg_cmd_available = True\n self.dmesg_cmd = dmesg_cmd\n self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False\n else:\n self.dmesg_cmd_available = False\n\n if dmesg_log is not None:\n self.dmesg_log_available = True\n self.dmesg_log = dmesg_log\n self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False\n else:\n self.dmesg_log_available = False\n", "path": "insights/combiners/dmesg.py"}]}
1,474
143
gh_patches_debug_31247
rasdani/github-patches
git_diff
PaddlePaddle__models-202
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Rename self.local_data to self._local_data in class DataGenerator. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deep_speech_2/data_utils/data.py` Content: ``` 1 """Contains data generator for orgnaizing various audio data preprocessing 2 pipeline and offering data reader interface of PaddlePaddle requirements. 3 """ 4 from __future__ import absolute_import 5 from __future__ import division 6 from __future__ import print_function 7 8 import random 9 import tarfile 10 import multiprocessing 11 import numpy as np 12 import paddle.v2 as paddle 13 from threading import local 14 from data_utils import utils 15 from data_utils.augmentor.augmentation import AugmentationPipeline 16 from data_utils.featurizer.speech_featurizer import SpeechFeaturizer 17 from data_utils.speech import SpeechSegment 18 from data_utils.normalizer import FeatureNormalizer 19 20 21 class DataGenerator(object): 22 """ 23 DataGenerator provides basic audio data preprocessing pipeline, and offers 24 data reader interfaces of PaddlePaddle requirements. 25 26 :param vocab_filepath: Vocabulary filepath for indexing tokenized 27 transcripts. 28 :type vocab_filepath: basestring 29 :param mean_std_filepath: File containing the pre-computed mean and stddev. 30 :type mean_std_filepath: None|basestring 31 :param augmentation_config: Augmentation configuration in json string. 32 Details see AugmentationPipeline.__doc__. 33 :type augmentation_config: str 34 :param max_duration: Audio with duration (in seconds) greater than 35 this will be discarded. 36 :type max_duration: float 37 :param min_duration: Audio with duration (in seconds) smaller than 38 this will be discarded. 39 :type min_duration: float 40 :param stride_ms: Striding size (in milliseconds) for generating frames. 41 :type stride_ms: float 42 :param window_ms: Window size (in milliseconds) for generating frames. 43 :type window_ms: float 44 :param max_freq: Used when specgram_type is 'linear', only FFT bins 45 corresponding to frequencies between [0, max_freq] are 46 returned. 47 :types max_freq: None|float 48 :param specgram_type: Specgram feature type. Options: 'linear'. 49 :type specgram_type: str 50 :param use_dB_normalization: Whether to normalize the audio to -20 dB 51 before extracting the features. 52 :type use_dB_normalization: bool 53 :param num_threads: Number of CPU threads for processing data. 54 :type num_threads: int 55 :param random_seed: Random seed. 56 :type random_seed: int 57 """ 58 59 def __init__(self, 60 vocab_filepath, 61 mean_std_filepath, 62 augmentation_config='{}', 63 max_duration=float('inf'), 64 min_duration=0.0, 65 stride_ms=10.0, 66 window_ms=20.0, 67 max_freq=None, 68 specgram_type='linear', 69 use_dB_normalization=True, 70 num_threads=multiprocessing.cpu_count() // 2, 71 random_seed=0): 72 self._max_duration = max_duration 73 self._min_duration = min_duration 74 self._normalizer = FeatureNormalizer(mean_std_filepath) 75 self._augmentation_pipeline = AugmentationPipeline( 76 augmentation_config=augmentation_config, random_seed=random_seed) 77 self._speech_featurizer = SpeechFeaturizer( 78 vocab_filepath=vocab_filepath, 79 specgram_type=specgram_type, 80 stride_ms=stride_ms, 81 window_ms=window_ms, 82 max_freq=max_freq, 83 use_dB_normalization=use_dB_normalization) 84 self._num_threads = num_threads 85 self._rng = random.Random(random_seed) 86 self._epoch = 0 87 # for caching tar files info 88 self.local_data = local() 89 self.local_data.tar2info = {} 90 self.local_data.tar2object = {} 91 92 def process_utterance(self, filename, transcript): 93 """Load, augment, featurize and normalize for speech data. 94 95 :param filename: Audio filepath 96 :type filename: basestring | file 97 :param transcript: Transcription text. 98 :type transcript: basestring 99 :return: Tuple of audio feature tensor and list of token ids for 100 transcription. 101 :rtype: tuple of (2darray, list) 102 """ 103 speech_segment = SpeechSegment.from_file(filename, transcript) 104 self._augmentation_pipeline.transform_audio(speech_segment) 105 specgram, text_ids = self._speech_featurizer.featurize(speech_segment) 106 specgram = self._normalizer.apply(specgram) 107 return specgram, text_ids 108 109 def batch_reader_creator(self, 110 manifest_path, 111 batch_size, 112 min_batch_size=1, 113 padding_to=-1, 114 flatten=False, 115 sortagrad=False, 116 shuffle_method="batch_shuffle"): 117 """ 118 Batch data reader creator for audio data. Return a callable generator 119 function to produce batches of data. 120 121 Audio features within one batch will be padded with zeros to have the 122 same shape, or a user-defined shape. 123 124 :param manifest_path: Filepath of manifest for audio files. 125 :type manifest_path: basestring 126 :param batch_size: Number of instances in a batch. 127 :type batch_size: int 128 :param min_batch_size: Any batch with batch size smaller than this will 129 be discarded. (To be deprecated in the future.) 130 :type min_batch_size: int 131 :param padding_to: If set -1, the maximun shape in the batch 132 will be used as the target shape for padding. 133 Otherwise, `padding_to` will be the target shape. 134 :type padding_to: int 135 :param flatten: If set True, audio features will be flatten to 1darray. 136 :type flatten: bool 137 :param sortagrad: If set True, sort the instances by audio duration 138 in the first epoch for speed up training. 139 :type sortagrad: bool 140 :param shuffle_method: Shuffle method. Options: 141 '' or None: no shuffle. 142 'instance_shuffle': instance-wise shuffle. 143 'batch_shuffle': similarly-sized instances are 144 put into batches, and then 145 batch-wise shuffle the batches. 146 For more details, please see 147 ``_batch_shuffle.__doc__``. 148 'batch_shuffle_clipped': 'batch_shuffle' with 149 head shift and tail 150 clipping. For more 151 details, please see 152 ``_batch_shuffle``. 153 If sortagrad is True, shuffle is disabled 154 for the first epoch. 155 :type shuffle_method: None|str 156 :return: Batch reader function, producing batches of data when called. 157 :rtype: callable 158 """ 159 160 def batch_reader(): 161 # read manifest 162 manifest = utils.read_manifest( 163 manifest_path=manifest_path, 164 max_duration=self._max_duration, 165 min_duration=self._min_duration) 166 # sort (by duration) or batch-wise shuffle the manifest 167 if self._epoch == 0 and sortagrad: 168 manifest.sort(key=lambda x: x["duration"]) 169 else: 170 if shuffle_method == "batch_shuffle": 171 manifest = self._batch_shuffle( 172 manifest, batch_size, clipped=False) 173 elif shuffle_method == "batch_shuffle_clipped": 174 manifest = self._batch_shuffle( 175 manifest, batch_size, clipped=True) 176 elif shuffle_method == "instance_shuffle": 177 self._rng.shuffle(manifest) 178 elif shuffle_method == None: 179 pass 180 else: 181 raise ValueError("Unknown shuffle method %s." % 182 shuffle_method) 183 # prepare batches 184 instance_reader = self._instance_reader_creator(manifest) 185 batch = [] 186 for instance in instance_reader(): 187 batch.append(instance) 188 if len(batch) == batch_size: 189 yield self._padding_batch(batch, padding_to, flatten) 190 batch = [] 191 if len(batch) >= min_batch_size: 192 yield self._padding_batch(batch, padding_to, flatten) 193 self._epoch += 1 194 195 return batch_reader 196 197 @property 198 def feeding(self): 199 """Returns data reader's feeding dict. 200 201 :return: Data feeding dict. 202 :rtype: dict 203 """ 204 return {"audio_spectrogram": 0, "transcript_text": 1} 205 206 @property 207 def vocab_size(self): 208 """Return the vocabulary size. 209 210 :return: Vocabulary size. 211 :rtype: int 212 """ 213 return self._speech_featurizer.vocab_size 214 215 @property 216 def vocab_list(self): 217 """Return the vocabulary in list. 218 219 :return: Vocabulary in list. 220 :rtype: list 221 """ 222 return self._speech_featurizer.vocab_list 223 224 def _parse_tar(self, file): 225 """Parse a tar file to get a tarfile object 226 and a map containing tarinfoes 227 """ 228 result = {} 229 f = tarfile.open(file) 230 for tarinfo in f.getmembers(): 231 result[tarinfo.name] = tarinfo 232 return f, result 233 234 def _get_file_object(self, file): 235 """Get file object by file path. 236 237 If file startwith tar, it will return a tar file object 238 and cached tar file info for next reading request. 239 It will return file directly, if the type of file is not str. 240 """ 241 if file.startswith('tar:'): 242 tarpath, filename = file.split(':', 1)[1].split('#', 1) 243 if 'tar2info' not in self.local_data.__dict__: 244 self.local_data.tar2info = {} 245 if 'tar2object' not in self.local_data.__dict__: 246 self.local_data.tar2object = {} 247 if tarpath not in self.local_data.tar2info: 248 object, infoes = self._parse_tar(tarpath) 249 self.local_data.tar2info[tarpath] = infoes 250 self.local_data.tar2object[tarpath] = object 251 return self.local_data.tar2object[tarpath].extractfile( 252 self.local_data.tar2info[tarpath][filename]) 253 else: 254 return open(file, 'r') 255 256 def _instance_reader_creator(self, manifest): 257 """ 258 Instance reader creator. Create a callable function to produce 259 instances of data. 260 261 Instance: a tuple of ndarray of audio spectrogram and a list of 262 token indices for transcript. 263 """ 264 265 def reader(): 266 for instance in manifest: 267 yield instance 268 269 def mapper(instance): 270 return self.process_utterance( 271 self._get_file_object(instance["audio_filepath"]), 272 instance["text"]) 273 274 return paddle.reader.xmap_readers( 275 mapper, reader, self._num_threads, 1024, order=True) 276 277 def _padding_batch(self, batch, padding_to=-1, flatten=False): 278 """ 279 Padding audio features with zeros to make them have the same shape (or 280 a user-defined shape) within one bach. 281 282 If ``padding_to`` is -1, the maximun shape in the batch will be used 283 as the target shape for padding. Otherwise, `padding_to` will be the 284 target shape (only refers to the second axis). 285 286 If `flatten` is True, features will be flatten to 1darray. 287 """ 288 new_batch = [] 289 # get target shape 290 max_length = max([audio.shape[1] for audio, text in batch]) 291 if padding_to != -1: 292 if padding_to < max_length: 293 raise ValueError("If padding_to is not -1, it should be larger " 294 "than any instance's shape in the batch") 295 max_length = padding_to 296 # padding 297 for audio, text in batch: 298 padded_audio = np.zeros([audio.shape[0], max_length]) 299 padded_audio[:, :audio.shape[1]] = audio 300 if flatten: 301 padded_audio = padded_audio.flatten() 302 new_batch.append((padded_audio, text)) 303 return new_batch 304 305 def _batch_shuffle(self, manifest, batch_size, clipped=False): 306 """Put similarly-sized instances into minibatches for better efficiency 307 and make a batch-wise shuffle. 308 309 1. Sort the audio clips by duration. 310 2. Generate a random number `k`, k in [0, batch_size). 311 3. Randomly shift `k` instances in order to create different batches 312 for different epochs. Create minibatches. 313 4. Shuffle the minibatches. 314 315 :param manifest: Manifest contents. List of dict. 316 :type manifest: list 317 :param batch_size: Batch size. This size is also used for generate 318 a random number for batch shuffle. 319 :type batch_size: int 320 :param clipped: Whether to clip the heading (small shift) and trailing 321 (incomplete batch) instances. 322 :type clipped: bool 323 :return: Batch shuffled mainifest. 324 :rtype: list 325 """ 326 manifest.sort(key=lambda x: x["duration"]) 327 shift_len = self._rng.randint(0, batch_size - 1) 328 batch_manifest = zip(*[iter(manifest[shift_len:])] * batch_size) 329 self._rng.shuffle(batch_manifest) 330 batch_manifest = list(sum(batch_manifest, ())) 331 if not clipped: 332 res_len = len(manifest) - shift_len - len(batch_manifest) 333 batch_manifest.extend(manifest[-res_len:]) 334 batch_manifest.extend(manifest[0:shift_len]) 335 return batch_manifest 336 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deep_speech_2/data_utils/data.py b/deep_speech_2/data_utils/data.py --- a/deep_speech_2/data_utils/data.py +++ b/deep_speech_2/data_utils/data.py @@ -85,9 +85,9 @@ self._rng = random.Random(random_seed) self._epoch = 0 # for caching tar files info - self.local_data = local() - self.local_data.tar2info = {} - self.local_data.tar2object = {} + self._local_data = local() + self._local_data.tar2info = {} + self._local_data.tar2object = {} def process_utterance(self, filename, transcript): """Load, augment, featurize and normalize for speech data. @@ -240,16 +240,16 @@ """ if file.startswith('tar:'): tarpath, filename = file.split(':', 1)[1].split('#', 1) - if 'tar2info' not in self.local_data.__dict__: - self.local_data.tar2info = {} - if 'tar2object' not in self.local_data.__dict__: - self.local_data.tar2object = {} - if tarpath not in self.local_data.tar2info: + if 'tar2info' not in self._local_data.__dict__: + self._local_data.tar2info = {} + if 'tar2object' not in self._local_data.__dict__: + self._local_data.tar2object = {} + if tarpath not in self._local_data.tar2info: object, infoes = self._parse_tar(tarpath) - self.local_data.tar2info[tarpath] = infoes - self.local_data.tar2object[tarpath] = object - return self.local_data.tar2object[tarpath].extractfile( - self.local_data.tar2info[tarpath][filename]) + self._local_data.tar2info[tarpath] = infoes + self._local_data.tar2object[tarpath] = object + return self._local_data.tar2object[tarpath].extractfile( + self._local_data.tar2info[tarpath][filename]) else: return open(file, 'r')
{"golden_diff": "diff --git a/deep_speech_2/data_utils/data.py b/deep_speech_2/data_utils/data.py\n--- a/deep_speech_2/data_utils/data.py\n+++ b/deep_speech_2/data_utils/data.py\n@@ -85,9 +85,9 @@\n self._rng = random.Random(random_seed)\n self._epoch = 0\n # for caching tar files info\n- self.local_data = local()\n- self.local_data.tar2info = {}\n- self.local_data.tar2object = {}\n+ self._local_data = local()\n+ self._local_data.tar2info = {}\n+ self._local_data.tar2object = {}\n \n def process_utterance(self, filename, transcript):\n \"\"\"Load, augment, featurize and normalize for speech data.\n@@ -240,16 +240,16 @@\n \"\"\"\n if file.startswith('tar:'):\n tarpath, filename = file.split(':', 1)[1].split('#', 1)\n- if 'tar2info' not in self.local_data.__dict__:\n- self.local_data.tar2info = {}\n- if 'tar2object' not in self.local_data.__dict__:\n- self.local_data.tar2object = {}\n- if tarpath not in self.local_data.tar2info:\n+ if 'tar2info' not in self._local_data.__dict__:\n+ self._local_data.tar2info = {}\n+ if 'tar2object' not in self._local_data.__dict__:\n+ self._local_data.tar2object = {}\n+ if tarpath not in self._local_data.tar2info:\n object, infoes = self._parse_tar(tarpath)\n- self.local_data.tar2info[tarpath] = infoes\n- self.local_data.tar2object[tarpath] = object\n- return self.local_data.tar2object[tarpath].extractfile(\n- self.local_data.tar2info[tarpath][filename])\n+ self._local_data.tar2info[tarpath] = infoes\n+ self._local_data.tar2object[tarpath] = object\n+ return self._local_data.tar2object[tarpath].extractfile(\n+ self._local_data.tar2info[tarpath][filename])\n else:\n return open(file, 'r')\n", "issue": "Rename self.local_data to self._local_data in class DataGenerator.\n\n", "before_files": [{"content": "\"\"\"Contains data generator for orgnaizing various audio data preprocessing\npipeline and offering data reader interface of PaddlePaddle requirements.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport tarfile\nimport multiprocessing\nimport numpy as np\nimport paddle.v2 as paddle\nfrom threading import local\nfrom data_utils import utils\nfrom data_utils.augmentor.augmentation import AugmentationPipeline\nfrom data_utils.featurizer.speech_featurizer import SpeechFeaturizer\nfrom data_utils.speech import SpeechSegment\nfrom data_utils.normalizer import FeatureNormalizer\n\n\nclass DataGenerator(object):\n \"\"\"\n DataGenerator provides basic audio data preprocessing pipeline, and offers\n data reader interfaces of PaddlePaddle requirements.\n\n :param vocab_filepath: Vocabulary filepath for indexing tokenized\n transcripts.\n :type vocab_filepath: basestring\n :param mean_std_filepath: File containing the pre-computed mean and stddev.\n :type mean_std_filepath: None|basestring\n :param augmentation_config: Augmentation configuration in json string.\n Details see AugmentationPipeline.__doc__.\n :type augmentation_config: str\n :param max_duration: Audio with duration (in seconds) greater than\n this will be discarded.\n :type max_duration: float\n :param min_duration: Audio with duration (in seconds) smaller than\n this will be discarded.\n :type min_duration: float\n :param stride_ms: Striding size (in milliseconds) for generating frames.\n :type stride_ms: float\n :param window_ms: Window size (in milliseconds) for generating frames.\n :type window_ms: float\n :param max_freq: Used when specgram_type is 'linear', only FFT bins\n corresponding to frequencies between [0, max_freq] are\n returned.\n :types max_freq: None|float\n :param specgram_type: Specgram feature type. Options: 'linear'.\n :type specgram_type: str\n :param use_dB_normalization: Whether to normalize the audio to -20 dB\n before extracting the features.\n :type use_dB_normalization: bool\n :param num_threads: Number of CPU threads for processing data.\n :type num_threads: int\n :param random_seed: Random seed.\n :type random_seed: int\n \"\"\"\n\n def __init__(self,\n vocab_filepath,\n mean_std_filepath,\n augmentation_config='{}',\n max_duration=float('inf'),\n min_duration=0.0,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n specgram_type='linear',\n use_dB_normalization=True,\n num_threads=multiprocessing.cpu_count() // 2,\n random_seed=0):\n self._max_duration = max_duration\n self._min_duration = min_duration\n self._normalizer = FeatureNormalizer(mean_std_filepath)\n self._augmentation_pipeline = AugmentationPipeline(\n augmentation_config=augmentation_config, random_seed=random_seed)\n self._speech_featurizer = SpeechFeaturizer(\n vocab_filepath=vocab_filepath,\n specgram_type=specgram_type,\n stride_ms=stride_ms,\n window_ms=window_ms,\n max_freq=max_freq,\n use_dB_normalization=use_dB_normalization)\n self._num_threads = num_threads\n self._rng = random.Random(random_seed)\n self._epoch = 0\n # for caching tar files info\n self.local_data = local()\n self.local_data.tar2info = {}\n self.local_data.tar2object = {}\n\n def process_utterance(self, filename, transcript):\n \"\"\"Load, augment, featurize and normalize for speech data.\n\n :param filename: Audio filepath\n :type filename: basestring | file\n :param transcript: Transcription text.\n :type transcript: basestring\n :return: Tuple of audio feature tensor and list of token ids for\n transcription.\n :rtype: tuple of (2darray, list)\n \"\"\"\n speech_segment = SpeechSegment.from_file(filename, transcript)\n self._augmentation_pipeline.transform_audio(speech_segment)\n specgram, text_ids = self._speech_featurizer.featurize(speech_segment)\n specgram = self._normalizer.apply(specgram)\n return specgram, text_ids\n\n def batch_reader_creator(self,\n manifest_path,\n batch_size,\n min_batch_size=1,\n padding_to=-1,\n flatten=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"\n Batch data reader creator for audio data. Return a callable generator\n function to produce batches of data.\n\n Audio features within one batch will be padded with zeros to have the\n same shape, or a user-defined shape.\n\n :param manifest_path: Filepath of manifest for audio files.\n :type manifest_path: basestring\n :param batch_size: Number of instances in a batch.\n :type batch_size: int\n :param min_batch_size: Any batch with batch size smaller than this will\n be discarded. (To be deprecated in the future.)\n :type min_batch_size: int\n :param padding_to: If set -1, the maximun shape in the batch\n will be used as the target shape for padding.\n Otherwise, `padding_to` will be the target shape.\n :type padding_to: int\n :param flatten: If set True, audio features will be flatten to 1darray.\n :type flatten: bool\n :param sortagrad: If set True, sort the instances by audio duration\n in the first epoch for speed up training.\n :type sortagrad: bool\n :param shuffle_method: Shuffle method. Options:\n '' or None: no shuffle.\n 'instance_shuffle': instance-wise shuffle.\n 'batch_shuffle': similarly-sized instances are\n put into batches, and then\n batch-wise shuffle the batches.\n For more details, please see\n ``_batch_shuffle.__doc__``.\n 'batch_shuffle_clipped': 'batch_shuffle' with\n head shift and tail\n clipping. For more\n details, please see\n ``_batch_shuffle``.\n If sortagrad is True, shuffle is disabled\n for the first epoch.\n :type shuffle_method: None|str\n :return: Batch reader function, producing batches of data when called.\n :rtype: callable\n \"\"\"\n\n def batch_reader():\n # read manifest\n manifest = utils.read_manifest(\n manifest_path=manifest_path,\n max_duration=self._max_duration,\n min_duration=self._min_duration)\n # sort (by duration) or batch-wise shuffle the manifest\n if self._epoch == 0 and sortagrad:\n manifest.sort(key=lambda x: x[\"duration\"])\n else:\n if shuffle_method == \"batch_shuffle\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=False)\n elif shuffle_method == \"batch_shuffle_clipped\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=True)\n elif shuffle_method == \"instance_shuffle\":\n self._rng.shuffle(manifest)\n elif shuffle_method == None:\n pass\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n shuffle_method)\n # prepare batches\n instance_reader = self._instance_reader_creator(manifest)\n batch = []\n for instance in instance_reader():\n batch.append(instance)\n if len(batch) == batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n batch = []\n if len(batch) >= min_batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n self._epoch += 1\n\n return batch_reader\n\n @property\n def feeding(self):\n \"\"\"Returns data reader's feeding dict.\n\n :return: Data feeding dict.\n :rtype: dict\n \"\"\"\n return {\"audio_spectrogram\": 0, \"transcript_text\": 1}\n\n @property\n def vocab_size(self):\n \"\"\"Return the vocabulary size.\n\n :return: Vocabulary size.\n :rtype: int\n \"\"\"\n return self._speech_featurizer.vocab_size\n\n @property\n def vocab_list(self):\n \"\"\"Return the vocabulary in list.\n\n :return: Vocabulary in list.\n :rtype: list\n \"\"\"\n return self._speech_featurizer.vocab_list\n\n def _parse_tar(self, file):\n \"\"\"Parse a tar file to get a tarfile object\n and a map containing tarinfoes\n \"\"\"\n result = {}\n f = tarfile.open(file)\n for tarinfo in f.getmembers():\n result[tarinfo.name] = tarinfo\n return f, result\n\n def _get_file_object(self, file):\n \"\"\"Get file object by file path.\n\n If file startwith tar, it will return a tar file object\n and cached tar file info for next reading request.\n It will return file directly, if the type of file is not str.\n \"\"\"\n if file.startswith('tar:'):\n tarpath, filename = file.split(':', 1)[1].split('#', 1)\n if 'tar2info' not in self.local_data.__dict__:\n self.local_data.tar2info = {}\n if 'tar2object' not in self.local_data.__dict__:\n self.local_data.tar2object = {}\n if tarpath not in self.local_data.tar2info:\n object, infoes = self._parse_tar(tarpath)\n self.local_data.tar2info[tarpath] = infoes\n self.local_data.tar2object[tarpath] = object\n return self.local_data.tar2object[tarpath].extractfile(\n self.local_data.tar2info[tarpath][filename])\n else:\n return open(file, 'r')\n\n def _instance_reader_creator(self, manifest):\n \"\"\"\n Instance reader creator. Create a callable function to produce\n instances of data.\n\n Instance: a tuple of ndarray of audio spectrogram and a list of\n token indices for transcript.\n \"\"\"\n\n def reader():\n for instance in manifest:\n yield instance\n\n def mapper(instance):\n return self.process_utterance(\n self._get_file_object(instance[\"audio_filepath\"]),\n instance[\"text\"])\n\n return paddle.reader.xmap_readers(\n mapper, reader, self._num_threads, 1024, order=True)\n\n def _padding_batch(self, batch, padding_to=-1, flatten=False):\n \"\"\"\n Padding audio features with zeros to make them have the same shape (or\n a user-defined shape) within one bach.\n\n If ``padding_to`` is -1, the maximun shape in the batch will be used\n as the target shape for padding. Otherwise, `padding_to` will be the\n target shape (only refers to the second axis).\n\n If `flatten` is True, features will be flatten to 1darray.\n \"\"\"\n new_batch = []\n # get target shape\n max_length = max([audio.shape[1] for audio, text in batch])\n if padding_to != -1:\n if padding_to < max_length:\n raise ValueError(\"If padding_to is not -1, it should be larger \"\n \"than any instance's shape in the batch\")\n max_length = padding_to\n # padding\n for audio, text in batch:\n padded_audio = np.zeros([audio.shape[0], max_length])\n padded_audio[:, :audio.shape[1]] = audio\n if flatten:\n padded_audio = padded_audio.flatten()\n new_batch.append((padded_audio, text))\n return new_batch\n\n def _batch_shuffle(self, manifest, batch_size, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param manifest: Manifest contents. List of dict.\n :type manifest: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n manifest.sort(key=lambda x: x[\"duration\"])\n shift_len = self._rng.randint(0, batch_size - 1)\n batch_manifest = zip(*[iter(manifest[shift_len:])] * batch_size)\n self._rng.shuffle(batch_manifest)\n batch_manifest = list(sum(batch_manifest, ()))\n if not clipped:\n res_len = len(manifest) - shift_len - len(batch_manifest)\n batch_manifest.extend(manifest[-res_len:])\n batch_manifest.extend(manifest[0:shift_len])\n return batch_manifest\n", "path": "deep_speech_2/data_utils/data.py"}], "after_files": [{"content": "\"\"\"Contains data generator for orgnaizing various audio data preprocessing\npipeline and offering data reader interface of PaddlePaddle requirements.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport tarfile\nimport multiprocessing\nimport numpy as np\nimport paddle.v2 as paddle\nfrom threading import local\nfrom data_utils import utils\nfrom data_utils.augmentor.augmentation import AugmentationPipeline\nfrom data_utils.featurizer.speech_featurizer import SpeechFeaturizer\nfrom data_utils.speech import SpeechSegment\nfrom data_utils.normalizer import FeatureNormalizer\n\n\nclass DataGenerator(object):\n \"\"\"\n DataGenerator provides basic audio data preprocessing pipeline, and offers\n data reader interfaces of PaddlePaddle requirements.\n\n :param vocab_filepath: Vocabulary filepath for indexing tokenized\n transcripts.\n :type vocab_filepath: basestring\n :param mean_std_filepath: File containing the pre-computed mean and stddev.\n :type mean_std_filepath: None|basestring\n :param augmentation_config: Augmentation configuration in json string.\n Details see AugmentationPipeline.__doc__.\n :type augmentation_config: str\n :param max_duration: Audio with duration (in seconds) greater than\n this will be discarded.\n :type max_duration: float\n :param min_duration: Audio with duration (in seconds) smaller than\n this will be discarded.\n :type min_duration: float\n :param stride_ms: Striding size (in milliseconds) for generating frames.\n :type stride_ms: float\n :param window_ms: Window size (in milliseconds) for generating frames.\n :type window_ms: float\n :param max_freq: Used when specgram_type is 'linear', only FFT bins\n corresponding to frequencies between [0, max_freq] are\n returned.\n :types max_freq: None|float\n :param specgram_type: Specgram feature type. Options: 'linear'.\n :type specgram_type: str\n :param use_dB_normalization: Whether to normalize the audio to -20 dB\n before extracting the features.\n :type use_dB_normalization: bool\n :param num_threads: Number of CPU threads for processing data.\n :type num_threads: int\n :param random_seed: Random seed.\n :type random_seed: int\n \"\"\"\n\n def __init__(self,\n vocab_filepath,\n mean_std_filepath,\n augmentation_config='{}',\n max_duration=float('inf'),\n min_duration=0.0,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n specgram_type='linear',\n use_dB_normalization=True,\n num_threads=multiprocessing.cpu_count() // 2,\n random_seed=0):\n self._max_duration = max_duration\n self._min_duration = min_duration\n self._normalizer = FeatureNormalizer(mean_std_filepath)\n self._augmentation_pipeline = AugmentationPipeline(\n augmentation_config=augmentation_config, random_seed=random_seed)\n self._speech_featurizer = SpeechFeaturizer(\n vocab_filepath=vocab_filepath,\n specgram_type=specgram_type,\n stride_ms=stride_ms,\n window_ms=window_ms,\n max_freq=max_freq,\n use_dB_normalization=use_dB_normalization)\n self._num_threads = num_threads\n self._rng = random.Random(random_seed)\n self._epoch = 0\n # for caching tar files info\n self._local_data = local()\n self._local_data.tar2info = {}\n self._local_data.tar2object = {}\n\n def process_utterance(self, filename, transcript):\n \"\"\"Load, augment, featurize and normalize for speech data.\n\n :param filename: Audio filepath\n :type filename: basestring | file\n :param transcript: Transcription text.\n :type transcript: basestring\n :return: Tuple of audio feature tensor and list of token ids for\n transcription.\n :rtype: tuple of (2darray, list)\n \"\"\"\n speech_segment = SpeechSegment.from_file(filename, transcript)\n self._augmentation_pipeline.transform_audio(speech_segment)\n specgram, text_ids = self._speech_featurizer.featurize(speech_segment)\n specgram = self._normalizer.apply(specgram)\n return specgram, text_ids\n\n def batch_reader_creator(self,\n manifest_path,\n batch_size,\n min_batch_size=1,\n padding_to=-1,\n flatten=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"\n Batch data reader creator for audio data. Return a callable generator\n function to produce batches of data.\n\n Audio features within one batch will be padded with zeros to have the\n same shape, or a user-defined shape.\n\n :param manifest_path: Filepath of manifest for audio files.\n :type manifest_path: basestring\n :param batch_size: Number of instances in a batch.\n :type batch_size: int\n :param min_batch_size: Any batch with batch size smaller than this will\n be discarded. (To be deprecated in the future.)\n :type min_batch_size: int\n :param padding_to: If set -1, the maximun shape in the batch\n will be used as the target shape for padding.\n Otherwise, `padding_to` will be the target shape.\n :type padding_to: int\n :param flatten: If set True, audio features will be flatten to 1darray.\n :type flatten: bool\n :param sortagrad: If set True, sort the instances by audio duration\n in the first epoch for speed up training.\n :type sortagrad: bool\n :param shuffle_method: Shuffle method. Options:\n '' or None: no shuffle.\n 'instance_shuffle': instance-wise shuffle.\n 'batch_shuffle': similarly-sized instances are\n put into batches, and then\n batch-wise shuffle the batches.\n For more details, please see\n ``_batch_shuffle.__doc__``.\n 'batch_shuffle_clipped': 'batch_shuffle' with\n head shift and tail\n clipping. For more\n details, please see\n ``_batch_shuffle``.\n If sortagrad is True, shuffle is disabled\n for the first epoch.\n :type shuffle_method: None|str\n :return: Batch reader function, producing batches of data when called.\n :rtype: callable\n \"\"\"\n\n def batch_reader():\n # read manifest\n manifest = utils.read_manifest(\n manifest_path=manifest_path,\n max_duration=self._max_duration,\n min_duration=self._min_duration)\n # sort (by duration) or batch-wise shuffle the manifest\n if self._epoch == 0 and sortagrad:\n manifest.sort(key=lambda x: x[\"duration\"])\n else:\n if shuffle_method == \"batch_shuffle\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=False)\n elif shuffle_method == \"batch_shuffle_clipped\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=True)\n elif shuffle_method == \"instance_shuffle\":\n self._rng.shuffle(manifest)\n elif shuffle_method == None:\n pass\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n shuffle_method)\n # prepare batches\n instance_reader = self._instance_reader_creator(manifest)\n batch = []\n for instance in instance_reader():\n batch.append(instance)\n if len(batch) == batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n batch = []\n if len(batch) >= min_batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n self._epoch += 1\n\n return batch_reader\n\n @property\n def feeding(self):\n \"\"\"Returns data reader's feeding dict.\n\n :return: Data feeding dict.\n :rtype: dict\n \"\"\"\n return {\"audio_spectrogram\": 0, \"transcript_text\": 1}\n\n @property\n def vocab_size(self):\n \"\"\"Return the vocabulary size.\n\n :return: Vocabulary size.\n :rtype: int\n \"\"\"\n return self._speech_featurizer.vocab_size\n\n @property\n def vocab_list(self):\n \"\"\"Return the vocabulary in list.\n\n :return: Vocabulary in list.\n :rtype: list\n \"\"\"\n return self._speech_featurizer.vocab_list\n\n def _parse_tar(self, file):\n \"\"\"Parse a tar file to get a tarfile object\n and a map containing tarinfoes\n \"\"\"\n result = {}\n f = tarfile.open(file)\n for tarinfo in f.getmembers():\n result[tarinfo.name] = tarinfo\n return f, result\n\n def _get_file_object(self, file):\n \"\"\"Get file object by file path.\n\n If file startwith tar, it will return a tar file object\n and cached tar file info for next reading request.\n It will return file directly, if the type of file is not str.\n \"\"\"\n if file.startswith('tar:'):\n tarpath, filename = file.split(':', 1)[1].split('#', 1)\n if 'tar2info' not in self._local_data.__dict__:\n self._local_data.tar2info = {}\n if 'tar2object' not in self._local_data.__dict__:\n self._local_data.tar2object = {}\n if tarpath not in self._local_data.tar2info:\n object, infoes = self._parse_tar(tarpath)\n self._local_data.tar2info[tarpath] = infoes\n self._local_data.tar2object[tarpath] = object\n return self._local_data.tar2object[tarpath].extractfile(\n self._local_data.tar2info[tarpath][filename])\n else:\n return open(file, 'r')\n\n def _instance_reader_creator(self, manifest):\n \"\"\"\n Instance reader creator. Create a callable function to produce\n instances of data.\n\n Instance: a tuple of ndarray of audio spectrogram and a list of\n token indices for transcript.\n \"\"\"\n\n def reader():\n for instance in manifest:\n yield instance\n\n def mapper(instance):\n return self.process_utterance(\n self._get_file_object(instance[\"audio_filepath\"]),\n instance[\"text\"])\n\n return paddle.reader.xmap_readers(\n mapper, reader, self._num_threads, 1024, order=True)\n\n def _padding_batch(self, batch, padding_to=-1, flatten=False):\n \"\"\"\n Padding audio features with zeros to make them have the same shape (or\n a user-defined shape) within one bach.\n\n If ``padding_to`` is -1, the maximun shape in the batch will be used\n as the target shape for padding. Otherwise, `padding_to` will be the\n target shape (only refers to the second axis).\n\n If `flatten` is True, features will be flatten to 1darray.\n \"\"\"\n new_batch = []\n # get target shape\n max_length = max([audio.shape[1] for audio, text in batch])\n if padding_to != -1:\n if padding_to < max_length:\n raise ValueError(\"If padding_to is not -1, it should be larger \"\n \"than any instance's shape in the batch\")\n max_length = padding_to\n # padding\n for audio, text in batch:\n padded_audio = np.zeros([audio.shape[0], max_length])\n padded_audio[:, :audio.shape[1]] = audio\n if flatten:\n padded_audio = padded_audio.flatten()\n new_batch.append((padded_audio, text))\n return new_batch\n\n def _batch_shuffle(self, manifest, batch_size, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param manifest: Manifest contents. List of dict.\n :type manifest: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n manifest.sort(key=lambda x: x[\"duration\"])\n shift_len = self._rng.randint(0, batch_size - 1)\n batch_manifest = zip(*[iter(manifest[shift_len:])] * batch_size)\n self._rng.shuffle(batch_manifest)\n batch_manifest = list(sum(batch_manifest, ()))\n if not clipped:\n res_len = len(manifest) - shift_len - len(batch_manifest)\n batch_manifest.extend(manifest[-res_len:])\n batch_manifest.extend(manifest[0:shift_len])\n return batch_manifest\n", "path": "deep_speech_2/data_utils/data.py"}]}
4,026
515
gh_patches_debug_31108
rasdani/github-patches
git_diff
python-poetry__poetry-5053
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update prompt environment variable when opening shell <!-- Hi there! Thank you for wanting to make Poetry better. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] I have searched the [documentation](https://poetry.eustace.io/docs/) and believe that my question is not covered. ## Feature Request <!-- Now feel free to write your idea for improvement. Thanks again 🙌 ❤️ --> When running `poetry shell` you have no idea your in the virtualenv or not. Please add the virtualenv's name to the $PROMPT or $PS1 variable. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/poetry/utils/shell.py` Content: ``` 1 import os 2 import signal 3 import sys 4 5 from pathlib import Path 6 from typing import TYPE_CHECKING 7 from typing import Any 8 from typing import Optional 9 10 import pexpect 11 12 from cleo.terminal import Terminal 13 from shellingham import ShellDetectionFailure 14 from shellingham import detect_shell 15 16 from poetry.utils._compat import WINDOWS 17 18 19 if TYPE_CHECKING: 20 from poetry.utils.env import VirtualEnv 21 22 23 class Shell: 24 """ 25 Represents the current shell. 26 """ 27 28 _shell = None 29 30 def __init__(self, name: str, path: str) -> None: 31 self._name = name 32 self._path = path 33 34 @property 35 def name(self) -> str: 36 return self._name 37 38 @property 39 def path(self) -> str: 40 return self._path 41 42 @classmethod 43 def get(cls) -> "Shell": 44 """ 45 Retrieve the current shell. 46 """ 47 if cls._shell is not None: 48 return cls._shell 49 50 try: 51 name, path = detect_shell(os.getpid()) 52 except (RuntimeError, ShellDetectionFailure): 53 shell = None 54 55 if os.name == "posix": 56 shell = os.environ.get("SHELL") 57 elif os.name == "nt": 58 shell = os.environ.get("COMSPEC") 59 60 if not shell: 61 raise RuntimeError("Unable to detect the current shell.") 62 63 name, path = Path(shell).stem, shell 64 65 cls._shell = cls(name, path) 66 67 return cls._shell 68 69 def activate(self, env: "VirtualEnv") -> Optional[int]: 70 if WINDOWS: 71 return env.execute(self.path) 72 73 import shlex 74 75 terminal = Terminal() 76 with env.temp_environ(): 77 c = pexpect.spawn( 78 self._path, ["-i"], dimensions=(terminal.height, terminal.width) 79 ) 80 81 if self._name == "zsh": 82 c.setecho(False) 83 84 activate_script = self._get_activate_script() 85 bin_dir = "Scripts" if WINDOWS else "bin" 86 activate_path = env.path / bin_dir / activate_script 87 c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}") 88 89 def resize(sig: Any, data: Any) -> None: 90 terminal = Terminal() 91 c.setwinsize(terminal.height, terminal.width) 92 93 signal.signal(signal.SIGWINCH, resize) 94 95 # Interact with the new shell. 96 c.interact(escape_character=None) 97 c.close() 98 99 sys.exit(c.exitstatus) 100 101 def _get_activate_script(self) -> str: 102 if self._name == "fish": 103 suffix = ".fish" 104 elif self._name in ("csh", "tcsh"): 105 suffix = ".csh" 106 else: 107 suffix = "" 108 109 return "activate" + suffix 110 111 def _get_source_command(self) -> str: 112 if self._name in ("fish", "csh", "tcsh"): 113 return "source" 114 return "." 115 116 def __repr__(self) -> str: 117 return f'{self.__class__.__name__}("{self._name}", "{self._path}")' 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/poetry/utils/shell.py b/src/poetry/utils/shell.py --- a/src/poetry/utils/shell.py +++ b/src/poetry/utils/shell.py @@ -1,5 +1,6 @@ import os import signal +import subprocess import sys from pathlib import Path @@ -67,8 +68,19 @@ return cls._shell def activate(self, env: "VirtualEnv") -> Optional[int]: + activate_script = self._get_activate_script() + bin_dir = "Scripts" if WINDOWS else "bin" + activate_path = env.path / bin_dir / activate_script + if WINDOWS: - return env.execute(self.path) + if self._name in ("powershell", "pwsh"): + args = ["-NoExit", "-File", str(activate_path)] + else: + # /K will execute the bat file and + # keep the cmd process from terminating + args = ["/K", str(activate_path)] + completed_proc = subprocess.run([self.path, *args]) + return completed_proc.returncode import shlex @@ -81,9 +93,6 @@ if self._name == "zsh": c.setecho(False) - activate_script = self._get_activate_script() - bin_dir = "Scripts" if WINDOWS else "bin" - activate_path = env.path / bin_dir / activate_script c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}") def resize(sig: Any, data: Any) -> None: @@ -103,6 +112,10 @@ suffix = ".fish" elif self._name in ("csh", "tcsh"): suffix = ".csh" + elif self._name in ("powershell", "pwsh"): + suffix = ".ps1" + elif self._name == "cmd": + suffix = ".bat" else: suffix = ""
{"golden_diff": "diff --git a/src/poetry/utils/shell.py b/src/poetry/utils/shell.py\n--- a/src/poetry/utils/shell.py\n+++ b/src/poetry/utils/shell.py\n@@ -1,5 +1,6 @@\n import os\n import signal\n+import subprocess\n import sys\n \n from pathlib import Path\n@@ -67,8 +68,19 @@\n return cls._shell\n \n def activate(self, env: \"VirtualEnv\") -> Optional[int]:\n+ activate_script = self._get_activate_script()\n+ bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n+ activate_path = env.path / bin_dir / activate_script\n+\n if WINDOWS:\n- return env.execute(self.path)\n+ if self._name in (\"powershell\", \"pwsh\"):\n+ args = [\"-NoExit\", \"-File\", str(activate_path)]\n+ else:\n+ # /K will execute the bat file and\n+ # keep the cmd process from terminating\n+ args = [\"/K\", str(activate_path)]\n+ completed_proc = subprocess.run([self.path, *args])\n+ return completed_proc.returncode\n \n import shlex\n \n@@ -81,9 +93,6 @@\n if self._name == \"zsh\":\n c.setecho(False)\n \n- activate_script = self._get_activate_script()\n- bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n- activate_path = env.path / bin_dir / activate_script\n c.sendline(f\"{self._get_source_command()} {shlex.quote(str(activate_path))}\")\n \n def resize(sig: Any, data: Any) -> None:\n@@ -103,6 +112,10 @@\n suffix = \".fish\"\n elif self._name in (\"csh\", \"tcsh\"):\n suffix = \".csh\"\n+ elif self._name in (\"powershell\", \"pwsh\"):\n+ suffix = \".ps1\"\n+ elif self._name == \"cmd\":\n+ suffix = \".bat\"\n else:\n suffix = \"\"\n", "issue": "Update prompt environment variable when opening shell\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have searched the [documentation](https://poetry.eustace.io/docs/) and believe that my question is not covered.\r\n\r\n## Feature Request\r\n<!-- Now feel free to write your idea for improvement. Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nWhen running `poetry shell` you have no idea your in the virtualenv or not. Please add the virtualenv's name to the $PROMPT or $PS1 variable.\n", "before_files": [{"content": "import os\nimport signal\nimport sys\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import Optional\n\nimport pexpect\n\nfrom cleo.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom poetry.utils._compat import WINDOWS\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name: str, path: str) -> None:\n self._name = name\n self._path = path\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def path(self) -> str:\n return self._path\n\n @classmethod\n def get(cls) -> \"Shell\":\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n shell = None\n\n if os.name == \"posix\":\n shell = os.environ.get(\"SHELL\")\n elif os.name == \"nt\":\n shell = os.environ.get(\"COMSPEC\")\n\n if not shell:\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n name, path = Path(shell).stem, shell\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env: \"VirtualEnv\") -> Optional[int]:\n if WINDOWS:\n return env.execute(self.path)\n\n import shlex\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n if self._name == \"zsh\":\n c.setecho(False)\n\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(f\"{self._get_source_command()} {shlex.quote(str(activate_path))}\")\n\n def resize(sig: Any, data: Any) -> None:\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self) -> str:\n if self._name == \"fish\":\n suffix = \".fish\"\n elif self._name in (\"csh\", \"tcsh\"):\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self) -> str:\n if self._name in (\"fish\", \"csh\", \"tcsh\"):\n return \"source\"\n return \".\"\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{self._name}\", \"{self._path}\")'\n", "path": "src/poetry/utils/shell.py"}], "after_files": [{"content": "import os\nimport signal\nimport subprocess\nimport sys\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import Optional\n\nimport pexpect\n\nfrom cleo.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom poetry.utils._compat import WINDOWS\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name: str, path: str) -> None:\n self._name = name\n self._path = path\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def path(self) -> str:\n return self._path\n\n @classmethod\n def get(cls) -> \"Shell\":\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n shell = None\n\n if os.name == \"posix\":\n shell = os.environ.get(\"SHELL\")\n elif os.name == \"nt\":\n shell = os.environ.get(\"COMSPEC\")\n\n if not shell:\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n name, path = Path(shell).stem, shell\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env: \"VirtualEnv\") -> Optional[int]:\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n\n if WINDOWS:\n if self._name in (\"powershell\", \"pwsh\"):\n args = [\"-NoExit\", \"-File\", str(activate_path)]\n else:\n # /K will execute the bat file and\n # keep the cmd process from terminating\n args = [\"/K\", str(activate_path)]\n completed_proc = subprocess.run([self.path, *args])\n return completed_proc.returncode\n\n import shlex\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n if self._name == \"zsh\":\n c.setecho(False)\n\n c.sendline(f\"{self._get_source_command()} {shlex.quote(str(activate_path))}\")\n\n def resize(sig: Any, data: Any) -> None:\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self) -> str:\n if self._name == \"fish\":\n suffix = \".fish\"\n elif self._name in (\"csh\", \"tcsh\"):\n suffix = \".csh\"\n elif self._name in (\"powershell\", \"pwsh\"):\n suffix = \".ps1\"\n elif self._name == \"cmd\":\n suffix = \".bat\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self) -> str:\n if self._name in (\"fish\", \"csh\", \"tcsh\"):\n return \"source\"\n return \".\"\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{self._name}\", \"{self._path}\")'\n", "path": "src/poetry/utils/shell.py"}]}
1,404
450
gh_patches_debug_40515
rasdani/github-patches
git_diff
inventree__InvenTree-6144
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [FR] Support creating reading initial superuser account password from file ### Please verify that this feature request has NOT been suggested before. - [X] I checked and didn't find a similar feature request ### Problem statement I am running the docker-compose project on [NixOS](https://nixos.org/) through [Arion](https://docs.hercules-ci.com/arion/). I am managing all of my secrets/passwords through [agenix](https://github.com/ryantm/agenix), which writes files containing the decrypted passwords to a specific path on the system (with appropriate permissions). Due to the way Nix works, it is not safe to read the password file directly into the Arion config (Arion will generate a docker-compose.yaml file which will be readable to all users). Instead, the recommended way for applications to ingest secrets is for them to be read at runtime. ### Suggested solution I would like an option like `INVENTREE_ADMIN_PASSWORD_FILE` to be added to `add_user_on_startup()` which will read the contents of a file containing the password (mapped as a volume in the generated docker-compose.yaml) ### Describe alternatives you've considered I could of course just forgo this feature and set up the superuser account manually, but part of the appeal of NixOS is the ability to bootstrap and configure the entire system with just a few commands ### Examples of other systems From the [agenix readme](https://github.com/ryantm/agenix/blob/91220a701d0bf8055547620baa765b36d885db7a/README.md), shows the suggested way to access the secret (in this case defining linux user accounts) ``` { users.users.user1 = { isNormalUser = true; passwordFile = config.age.secrets.secret1.path; }; } ``` Instead of directly accessing a file, some NixOS modules instead take a generic shell command which prints the password to stdout, which gets run at the programs runtime: ``` programs.msmtp = { enable = true; accounts.default = { auth = true; tls = true; tls_starttls = true; tls_trust_file = "/etc/ssl/certs/ca-bundle.crt"; tls_certcheck = true; host = "smtp.office365.com"; port = 587; from = "asdf@outlook.com"; user = "asdf@outlook.com"; passwordeval = "cat ${config.age.secrets.alert-outlook.path}"; }; }; ``` ### Do you want to develop this? - [X] I want to develop this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `InvenTree/InvenTree/apps.py` Content: ``` 1 """AppConfig for inventree app.""" 2 3 import logging 4 from importlib import import_module 5 from pathlib import Path 6 7 from django.apps import AppConfig, apps 8 from django.conf import settings 9 from django.contrib.auth import get_user_model 10 from django.core.exceptions import AppRegistryNotReady 11 from django.db import transaction 12 from django.db.utils import IntegrityError, OperationalError 13 14 import InvenTree.conversion 15 import InvenTree.tasks 16 from InvenTree.config import get_setting 17 from InvenTree.ready import (canAppAccessDatabase, isInMainThread, 18 isInTestMode, isPluginRegistryLoaded) 19 20 logger = logging.getLogger("inventree") 21 22 23 class InvenTreeConfig(AppConfig): 24 """AppConfig for inventree app.""" 25 name = 'InvenTree' 26 27 def ready(self): 28 """Run system wide setup init steps. 29 30 Like: 31 - Checking if migrations should be run 32 - Cleaning up tasks 33 - Starting regular tasks 34 - Updating exchange rates 35 - Collecting notification methods 36 - Collecting state transition methods 37 - Adding users set in the current environment 38 """ 39 # skip loading if plugin registry is not loaded or we run in a background thread 40 if not isPluginRegistryLoaded() or not isInMainThread(): 41 return 42 43 if canAppAccessDatabase() or settings.TESTING_ENV: 44 45 self.remove_obsolete_tasks() 46 47 self.collect_tasks() 48 self.start_background_tasks() 49 50 if not isInTestMode(): # pragma: no cover 51 self.update_exchange_rates() 52 # Let the background worker check for migrations 53 InvenTree.tasks.offload_task(InvenTree.tasks.check_for_migrations) 54 55 self.collect_notification_methods() 56 self.collect_state_transition_methods() 57 58 # Ensure the unit registry is loaded 59 InvenTree.conversion.get_unit_registry() 60 61 if canAppAccessDatabase() or settings.TESTING_ENV: 62 self.add_user_on_startup() 63 64 def remove_obsolete_tasks(self): 65 """Delete any obsolete scheduled tasks in the database.""" 66 obsolete = [ 67 'InvenTree.tasks.delete_expired_sessions', 68 'stock.tasks.delete_old_stock_items', 69 ] 70 71 try: 72 from django_q.models import Schedule 73 except AppRegistryNotReady: # pragma: no cover 74 return 75 76 # Remove any existing obsolete tasks 77 try: 78 Schedule.objects.filter(func__in=obsolete).delete() 79 except Exception: 80 logger.exception("Failed to remove obsolete tasks - database not ready") 81 82 def start_background_tasks(self): 83 """Start all background tests for InvenTree.""" 84 logger.info("Starting background tasks...") 85 86 from django_q.models import Schedule 87 88 # List of existing scheduled tasks (in the database) 89 existing_tasks = {} 90 91 for existing_task in Schedule.objects.all(): 92 existing_tasks[existing_task.func] = existing_task 93 94 tasks_to_create = [] 95 tasks_to_update = [] 96 97 # List of collected tasks found with the @scheduled_task decorator 98 tasks = InvenTree.tasks.tasks.task_list 99 100 for task in tasks: 101 102 ref_name = f'{task.func.__module__}.{task.func.__name__}' 103 104 if ref_name in existing_tasks.keys(): 105 # This task already exists - update the details if required 106 existing_task = existing_tasks[ref_name] 107 108 if existing_task.schedule_type != task.interval or existing_task.minutes != task.minutes: 109 110 existing_task.schedule_type = task.interval 111 existing_task.minutes = task.minutes 112 tasks_to_update.append(existing_task) 113 114 else: 115 # This task does *not* already exist - create it 116 tasks_to_create.append( 117 Schedule( 118 name=ref_name, 119 func=ref_name, 120 schedule_type=task.interval, 121 minutes=task.minutes, 122 ) 123 ) 124 125 if len(tasks_to_create) > 0: 126 Schedule.objects.bulk_create(tasks_to_create) 127 logger.info("Created %s new scheduled tasks", len(tasks_to_create)) 128 129 if len(tasks_to_update) > 0: 130 Schedule.objects.bulk_update(tasks_to_update, ['schedule_type', 'minutes']) 131 logger.info("Updated %s existing scheduled tasks", len(tasks_to_update)) 132 133 # Put at least one task onto the background worker stack, 134 # which will be processed as soon as the worker comes online 135 InvenTree.tasks.offload_task( 136 InvenTree.tasks.heartbeat, 137 force_async=True, 138 ) 139 140 logger.info("Started %s scheduled background tasks...", len(tasks)) 141 142 def collect_tasks(self): 143 """Collect all background tasks.""" 144 for app_name, app in apps.app_configs.items(): 145 if app_name == 'InvenTree': 146 continue 147 148 if Path(app.path).joinpath('tasks.py').exists(): 149 try: 150 import_module(f'{app.module.__package__}.tasks') 151 except Exception as e: # pragma: no cover 152 logger.exception("Error loading tasks for %s: %s", app_name, e) 153 154 def update_exchange_rates(self): # pragma: no cover 155 """Update exchange rates each time the server is started. 156 157 Only runs *if*: 158 a) Have not been updated recently (one day or less) 159 b) The base exchange rate has been altered 160 """ 161 try: 162 from djmoney.contrib.exchange.models import ExchangeBackend 163 164 from common.settings import currency_code_default 165 from InvenTree.tasks import update_exchange_rates 166 except AppRegistryNotReady: # pragma: no cover 167 pass 168 169 base_currency = currency_code_default() 170 171 update = False 172 173 try: 174 backend = ExchangeBackend.objects.filter(name='InvenTreeExchange') 175 176 if backend.exists(): 177 backend = backend.first() 178 179 last_update = backend.last_update 180 181 if last_update is None: 182 # Never been updated 183 logger.info("Exchange backend has never been updated") 184 update = True 185 186 # Backend currency has changed? 187 if base_currency != backend.base_currency: 188 logger.info("Base currency changed from %s to %s", backend.base_currency, base_currency) 189 update = True 190 191 except (ExchangeBackend.DoesNotExist): 192 logger.info("Exchange backend not found - updating") 193 update = True 194 195 except Exception: 196 # Some other error - potentially the tables are not ready yet 197 return 198 199 if update: 200 try: 201 update_exchange_rates() 202 except OperationalError: 203 logger.warning("Could not update exchange rates - database not ready") 204 except Exception as e: 205 logger.exception("Error updating exchange rates: %s (%s)", e, type(e)) 206 207 def add_user_on_startup(self): 208 """Add a user on startup.""" 209 # stop if checks were already created 210 if hasattr(settings, 'USER_ADDED') and settings.USER_ADDED: 211 return 212 213 # get values 214 add_user = get_setting('INVENTREE_ADMIN_USER', 'admin_user') 215 add_email = get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email') 216 add_password = get_setting('INVENTREE_ADMIN_PASSWORD', 'admin_password') 217 218 # check if all values are present 219 set_variables = 0 220 221 for tested_var in [add_user, add_email, add_password]: 222 if tested_var: 223 set_variables += 1 224 225 # no variable set -> do not try anything 226 if set_variables == 0: 227 settings.USER_ADDED = True 228 return 229 230 # not all needed variables set 231 if set_variables < 3: 232 logger.warning('Not all required settings for adding a user on startup are present:\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD') 233 settings.USER_ADDED = True 234 return 235 236 # good to go -> create user 237 user = get_user_model() 238 try: 239 with transaction.atomic(): 240 if user.objects.filter(username=add_user).exists(): 241 logger.info("User %s already exists - skipping creation", add_user) 242 else: 243 new_user = user.objects.create_superuser(add_user, add_email, add_password) 244 logger.info('User %s was created!', str(new_user)) 245 except IntegrityError: 246 logger.warning('The user "%s" could not be created', add_user) 247 248 # do not try again 249 settings.USER_ADDED = True 250 251 def collect_notification_methods(self): 252 """Collect all notification methods.""" 253 from common.notifications import storage 254 255 storage.collect() 256 257 def collect_state_transition_methods(self): 258 """Collect all state transition methods.""" 259 from generic.states import storage 260 261 storage.collect() 262 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/InvenTree/InvenTree/apps.py b/InvenTree/InvenTree/apps.py --- a/InvenTree/InvenTree/apps.py +++ b/InvenTree/InvenTree/apps.py @@ -60,6 +60,7 @@ if canAppAccessDatabase() or settings.TESTING_ENV: self.add_user_on_startup() + self.add_user_from_file() def remove_obsolete_tasks(self): """Delete any obsolete scheduled tasks in the database.""" @@ -214,6 +215,7 @@ add_user = get_setting('INVENTREE_ADMIN_USER', 'admin_user') add_email = get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email') add_password = get_setting('INVENTREE_ADMIN_PASSWORD', 'admin_password') + add_password_file = get_setting("INVENTREE_ADMIN_PASSWORD_FILE", "admin_password_file", None) # check if all values are present set_variables = 0 @@ -229,11 +231,21 @@ # not all needed variables set if set_variables < 3: - logger.warning('Not all required settings for adding a user on startup are present:\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD') settings.USER_ADDED = True + + # if a password file is present, do not warn - will be handled later + if add_password_file: + return + logger.warning('Not all required settings for adding a user on startup are present:\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD') return # good to go -> create user + self._create_admin_user(add_user, add_email, add_password) + + # do not try again + settings.USER_ADDED = True + + def _create_admin_user(self, add_user, add_email, add_password): user = get_user_model() try: with transaction.atomic(): @@ -245,8 +257,34 @@ except IntegrityError: logger.warning('The user "%s" could not be created', add_user) + def add_user_from_file(self): + """Add the superuser from a file.""" + # stop if checks were already created + if hasattr(settings, "USER_ADDED_FILE") and settings.USER_ADDED_FILE: + return + + # get values + add_password_file = get_setting( + "INVENTREE_ADMIN_PASSWORD_FILE", "admin_password_file", None + ) + + # no variable set -> do not try anything + if not add_password_file: + settings.USER_ADDED_FILE = True + return + + # check if file exists + add_password_file = Path(str(add_password_file)) + if not add_password_file.exists(): + logger.warning('The file "%s" does not exist', add_password_file) + settings.USER_ADDED_FILE = True + return + + # good to go -> create user + self._create_admin_user(get_setting('INVENTREE_ADMIN_USER', 'admin_user', 'admin'), get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email', ''), add_password_file.read_text(encoding="utf-8")) + # do not try again - settings.USER_ADDED = True + settings.USER_ADDED_FILE = True def collect_notification_methods(self): """Collect all notification methods."""
{"golden_diff": "diff --git a/InvenTree/InvenTree/apps.py b/InvenTree/InvenTree/apps.py\n--- a/InvenTree/InvenTree/apps.py\n+++ b/InvenTree/InvenTree/apps.py\n@@ -60,6 +60,7 @@\n \n if canAppAccessDatabase() or settings.TESTING_ENV:\n self.add_user_on_startup()\n+ self.add_user_from_file()\n \n def remove_obsolete_tasks(self):\n \"\"\"Delete any obsolete scheduled tasks in the database.\"\"\"\n@@ -214,6 +215,7 @@\n add_user = get_setting('INVENTREE_ADMIN_USER', 'admin_user')\n add_email = get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email')\n add_password = get_setting('INVENTREE_ADMIN_PASSWORD', 'admin_password')\n+ add_password_file = get_setting(\"INVENTREE_ADMIN_PASSWORD_FILE\", \"admin_password_file\", None)\n \n # check if all values are present\n set_variables = 0\n@@ -229,11 +231,21 @@\n \n # not all needed variables set\n if set_variables < 3:\n- logger.warning('Not all required settings for adding a user on startup are present:\\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD')\n settings.USER_ADDED = True\n+\n+ # if a password file is present, do not warn - will be handled later\n+ if add_password_file:\n+ return\n+ logger.warning('Not all required settings for adding a user on startup are present:\\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD')\n return\n \n # good to go -> create user\n+ self._create_admin_user(add_user, add_email, add_password)\n+\n+ # do not try again\n+ settings.USER_ADDED = True\n+\n+ def _create_admin_user(self, add_user, add_email, add_password):\n user = get_user_model()\n try:\n with transaction.atomic():\n@@ -245,8 +257,34 @@\n except IntegrityError:\n logger.warning('The user \"%s\" could not be created', add_user)\n \n+ def add_user_from_file(self):\n+ \"\"\"Add the superuser from a file.\"\"\"\n+ # stop if checks were already created\n+ if hasattr(settings, \"USER_ADDED_FILE\") and settings.USER_ADDED_FILE:\n+ return\n+\n+ # get values\n+ add_password_file = get_setting(\n+ \"INVENTREE_ADMIN_PASSWORD_FILE\", \"admin_password_file\", None\n+ )\n+\n+ # no variable set -> do not try anything\n+ if not add_password_file:\n+ settings.USER_ADDED_FILE = True\n+ return\n+\n+ # check if file exists\n+ add_password_file = Path(str(add_password_file))\n+ if not add_password_file.exists():\n+ logger.warning('The file \"%s\" does not exist', add_password_file)\n+ settings.USER_ADDED_FILE = True\n+ return\n+\n+ # good to go -> create user\n+ self._create_admin_user(get_setting('INVENTREE_ADMIN_USER', 'admin_user', 'admin'), get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email', ''), add_password_file.read_text(encoding=\"utf-8\"))\n+\n # do not try again\n- settings.USER_ADDED = True\n+ settings.USER_ADDED_FILE = True\n \n def collect_notification_methods(self):\n \"\"\"Collect all notification methods.\"\"\"\n", "issue": "[FR] Support creating reading initial superuser account password from file\n### Please verify that this feature request has NOT been suggested before.\n\n- [X] I checked and didn't find a similar feature request\n\n### Problem statement\n\nI am running the docker-compose project on [NixOS](https://nixos.org/) through [Arion](https://docs.hercules-ci.com/arion/).\r\n\r\nI am managing all of my secrets/passwords through [agenix](https://github.com/ryantm/agenix), which writes files containing the decrypted passwords to a specific path on the system (with appropriate permissions).\r\nDue to the way Nix works, it is not safe to read the password file directly into the Arion config (Arion will generate a docker-compose.yaml file which will be readable to all users).\r\n\r\nInstead, the recommended way for applications to ingest secrets is for them to be read at runtime.\r\n\n\n### Suggested solution\n\nI would like an option like `INVENTREE_ADMIN_PASSWORD_FILE` to be added to `add_user_on_startup()` which will read the contents of a file containing the password (mapped as a volume in the generated docker-compose.yaml)\r\n\n\n### Describe alternatives you've considered\n\nI could of course just forgo this feature and set up the superuser account manually, but part of the appeal of NixOS is the ability to bootstrap and configure the entire system with just a few commands\n\n### Examples of other systems\n\nFrom the [agenix readme](https://github.com/ryantm/agenix/blob/91220a701d0bf8055547620baa765b36d885db7a/README.md), shows the suggested way to access the secret (in this case defining linux user accounts)\r\n```\r\n{\r\n users.users.user1 = {\r\n isNormalUser = true;\r\n passwordFile = config.age.secrets.secret1.path;\r\n };\r\n}\r\n```\r\n\r\nInstead of directly accessing a file, some NixOS modules instead take a generic shell command which prints the password to stdout, which gets run at the programs runtime:\r\n```\r\n programs.msmtp = {\r\n enable = true;\r\n accounts.default = {\r\n auth = true;\r\n tls = true;\r\n tls_starttls = true;\r\n tls_trust_file = \"/etc/ssl/certs/ca-bundle.crt\";\r\n tls_certcheck = true;\r\n host = \"smtp.office365.com\";\r\n port = 587;\r\n from = \"asdf@outlook.com\";\r\n user = \"asdf@outlook.com\";\r\n passwordeval = \"cat ${config.age.secrets.alert-outlook.path}\";\r\n };\r\n };\r\n```\n\n### Do you want to develop this?\n\n- [X] I want to develop this.\n", "before_files": [{"content": "\"\"\"AppConfig for inventree app.\"\"\"\n\nimport logging\nfrom importlib import import_module\nfrom pathlib import Path\n\nfrom django.apps import AppConfig, apps\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError, OperationalError\n\nimport InvenTree.conversion\nimport InvenTree.tasks\nfrom InvenTree.config import get_setting\nfrom InvenTree.ready import (canAppAccessDatabase, isInMainThread,\n isInTestMode, isPluginRegistryLoaded)\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass InvenTreeConfig(AppConfig):\n \"\"\"AppConfig for inventree app.\"\"\"\n name = 'InvenTree'\n\n def ready(self):\n \"\"\"Run system wide setup init steps.\n\n Like:\n - Checking if migrations should be run\n - Cleaning up tasks\n - Starting regular tasks\n - Updating exchange rates\n - Collecting notification methods\n - Collecting state transition methods\n - Adding users set in the current environment\n \"\"\"\n # skip loading if plugin registry is not loaded or we run in a background thread\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n\n if canAppAccessDatabase() or settings.TESTING_ENV:\n\n self.remove_obsolete_tasks()\n\n self.collect_tasks()\n self.start_background_tasks()\n\n if not isInTestMode(): # pragma: no cover\n self.update_exchange_rates()\n # Let the background worker check for migrations\n InvenTree.tasks.offload_task(InvenTree.tasks.check_for_migrations)\n\n self.collect_notification_methods()\n self.collect_state_transition_methods()\n\n # Ensure the unit registry is loaded\n InvenTree.conversion.get_unit_registry()\n\n if canAppAccessDatabase() or settings.TESTING_ENV:\n self.add_user_on_startup()\n\n def remove_obsolete_tasks(self):\n \"\"\"Delete any obsolete scheduled tasks in the database.\"\"\"\n obsolete = [\n 'InvenTree.tasks.delete_expired_sessions',\n 'stock.tasks.delete_old_stock_items',\n ]\n\n try:\n from django_q.models import Schedule\n except AppRegistryNotReady: # pragma: no cover\n return\n\n # Remove any existing obsolete tasks\n try:\n Schedule.objects.filter(func__in=obsolete).delete()\n except Exception:\n logger.exception(\"Failed to remove obsolete tasks - database not ready\")\n\n def start_background_tasks(self):\n \"\"\"Start all background tests for InvenTree.\"\"\"\n logger.info(\"Starting background tasks...\")\n\n from django_q.models import Schedule\n\n # List of existing scheduled tasks (in the database)\n existing_tasks = {}\n\n for existing_task in Schedule.objects.all():\n existing_tasks[existing_task.func] = existing_task\n\n tasks_to_create = []\n tasks_to_update = []\n\n # List of collected tasks found with the @scheduled_task decorator\n tasks = InvenTree.tasks.tasks.task_list\n\n for task in tasks:\n\n ref_name = f'{task.func.__module__}.{task.func.__name__}'\n\n if ref_name in existing_tasks.keys():\n # This task already exists - update the details if required\n existing_task = existing_tasks[ref_name]\n\n if existing_task.schedule_type != task.interval or existing_task.minutes != task.minutes:\n\n existing_task.schedule_type = task.interval\n existing_task.minutes = task.minutes\n tasks_to_update.append(existing_task)\n\n else:\n # This task does *not* already exist - create it\n tasks_to_create.append(\n Schedule(\n name=ref_name,\n func=ref_name,\n schedule_type=task.interval,\n minutes=task.minutes,\n )\n )\n\n if len(tasks_to_create) > 0:\n Schedule.objects.bulk_create(tasks_to_create)\n logger.info(\"Created %s new scheduled tasks\", len(tasks_to_create))\n\n if len(tasks_to_update) > 0:\n Schedule.objects.bulk_update(tasks_to_update, ['schedule_type', 'minutes'])\n logger.info(\"Updated %s existing scheduled tasks\", len(tasks_to_update))\n\n # Put at least one task onto the background worker stack,\n # which will be processed as soon as the worker comes online\n InvenTree.tasks.offload_task(\n InvenTree.tasks.heartbeat,\n force_async=True,\n )\n\n logger.info(\"Started %s scheduled background tasks...\", len(tasks))\n\n def collect_tasks(self):\n \"\"\"Collect all background tasks.\"\"\"\n for app_name, app in apps.app_configs.items():\n if app_name == 'InvenTree':\n continue\n\n if Path(app.path).joinpath('tasks.py').exists():\n try:\n import_module(f'{app.module.__package__}.tasks')\n except Exception as e: # pragma: no cover\n logger.exception(\"Error loading tasks for %s: %s\", app_name, e)\n\n def update_exchange_rates(self): # pragma: no cover\n \"\"\"Update exchange rates each time the server is started.\n\n Only runs *if*:\n a) Have not been updated recently (one day or less)\n b) The base exchange rate has been altered\n \"\"\"\n try:\n from djmoney.contrib.exchange.models import ExchangeBackend\n\n from common.settings import currency_code_default\n from InvenTree.tasks import update_exchange_rates\n except AppRegistryNotReady: # pragma: no cover\n pass\n\n base_currency = currency_code_default()\n\n update = False\n\n try:\n backend = ExchangeBackend.objects.filter(name='InvenTreeExchange')\n\n if backend.exists():\n backend = backend.first()\n\n last_update = backend.last_update\n\n if last_update is None:\n # Never been updated\n logger.info(\"Exchange backend has never been updated\")\n update = True\n\n # Backend currency has changed?\n if base_currency != backend.base_currency:\n logger.info(\"Base currency changed from %s to %s\", backend.base_currency, base_currency)\n update = True\n\n except (ExchangeBackend.DoesNotExist):\n logger.info(\"Exchange backend not found - updating\")\n update = True\n\n except Exception:\n # Some other error - potentially the tables are not ready yet\n return\n\n if update:\n try:\n update_exchange_rates()\n except OperationalError:\n logger.warning(\"Could not update exchange rates - database not ready\")\n except Exception as e:\n logger.exception(\"Error updating exchange rates: %s (%s)\", e, type(e))\n\n def add_user_on_startup(self):\n \"\"\"Add a user on startup.\"\"\"\n # stop if checks were already created\n if hasattr(settings, 'USER_ADDED') and settings.USER_ADDED:\n return\n\n # get values\n add_user = get_setting('INVENTREE_ADMIN_USER', 'admin_user')\n add_email = get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email')\n add_password = get_setting('INVENTREE_ADMIN_PASSWORD', 'admin_password')\n\n # check if all values are present\n set_variables = 0\n\n for tested_var in [add_user, add_email, add_password]:\n if tested_var:\n set_variables += 1\n\n # no variable set -> do not try anything\n if set_variables == 0:\n settings.USER_ADDED = True\n return\n\n # not all needed variables set\n if set_variables < 3:\n logger.warning('Not all required settings for adding a user on startup are present:\\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD')\n settings.USER_ADDED = True\n return\n\n # good to go -> create user\n user = get_user_model()\n try:\n with transaction.atomic():\n if user.objects.filter(username=add_user).exists():\n logger.info(\"User %s already exists - skipping creation\", add_user)\n else:\n new_user = user.objects.create_superuser(add_user, add_email, add_password)\n logger.info('User %s was created!', str(new_user))\n except IntegrityError:\n logger.warning('The user \"%s\" could not be created', add_user)\n\n # do not try again\n settings.USER_ADDED = True\n\n def collect_notification_methods(self):\n \"\"\"Collect all notification methods.\"\"\"\n from common.notifications import storage\n\n storage.collect()\n\n def collect_state_transition_methods(self):\n \"\"\"Collect all state transition methods.\"\"\"\n from generic.states import storage\n\n storage.collect()\n", "path": "InvenTree/InvenTree/apps.py"}], "after_files": [{"content": "\"\"\"AppConfig for inventree app.\"\"\"\n\nimport logging\nfrom importlib import import_module\nfrom pathlib import Path\n\nfrom django.apps import AppConfig, apps\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError, OperationalError\n\nimport InvenTree.conversion\nimport InvenTree.tasks\nfrom InvenTree.config import get_setting\nfrom InvenTree.ready import (canAppAccessDatabase, isInMainThread,\n isInTestMode, isPluginRegistryLoaded)\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass InvenTreeConfig(AppConfig):\n \"\"\"AppConfig for inventree app.\"\"\"\n name = 'InvenTree'\n\n def ready(self):\n \"\"\"Run system wide setup init steps.\n\n Like:\n - Checking if migrations should be run\n - Cleaning up tasks\n - Starting regular tasks\n - Updating exchange rates\n - Collecting notification methods\n - Collecting state transition methods\n - Adding users set in the current environment\n \"\"\"\n # skip loading if plugin registry is not loaded or we run in a background thread\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n\n if canAppAccessDatabase() or settings.TESTING_ENV:\n\n self.remove_obsolete_tasks()\n\n self.collect_tasks()\n self.start_background_tasks()\n\n if not isInTestMode(): # pragma: no cover\n self.update_exchange_rates()\n # Let the background worker check for migrations\n InvenTree.tasks.offload_task(InvenTree.tasks.check_for_migrations)\n\n self.collect_notification_methods()\n self.collect_state_transition_methods()\n\n # Ensure the unit registry is loaded\n InvenTree.conversion.get_unit_registry()\n\n if canAppAccessDatabase() or settings.TESTING_ENV:\n self.add_user_on_startup()\n self.add_user_from_file()\n\n def remove_obsolete_tasks(self):\n \"\"\"Delete any obsolete scheduled tasks in the database.\"\"\"\n obsolete = [\n 'InvenTree.tasks.delete_expired_sessions',\n 'stock.tasks.delete_old_stock_items',\n ]\n\n try:\n from django_q.models import Schedule\n except AppRegistryNotReady: # pragma: no cover\n return\n\n # Remove any existing obsolete tasks\n try:\n Schedule.objects.filter(func__in=obsolete).delete()\n except Exception:\n logger.exception(\"Failed to remove obsolete tasks - database not ready\")\n\n def start_background_tasks(self):\n \"\"\"Start all background tests for InvenTree.\"\"\"\n logger.info(\"Starting background tasks...\")\n\n from django_q.models import Schedule\n\n # List of existing scheduled tasks (in the database)\n existing_tasks = {}\n\n for existing_task in Schedule.objects.all():\n existing_tasks[existing_task.func] = existing_task\n\n tasks_to_create = []\n tasks_to_update = []\n\n # List of collected tasks found with the @scheduled_task decorator\n tasks = InvenTree.tasks.tasks.task_list\n\n for task in tasks:\n\n ref_name = f'{task.func.__module__}.{task.func.__name__}'\n\n if ref_name in existing_tasks.keys():\n # This task already exists - update the details if required\n existing_task = existing_tasks[ref_name]\n\n if existing_task.schedule_type != task.interval or existing_task.minutes != task.minutes:\n\n existing_task.schedule_type = task.interval\n existing_task.minutes = task.minutes\n tasks_to_update.append(existing_task)\n\n else:\n # This task does *not* already exist - create it\n tasks_to_create.append(\n Schedule(\n name=ref_name,\n func=ref_name,\n schedule_type=task.interval,\n minutes=task.minutes,\n )\n )\n\n if len(tasks_to_create) > 0:\n Schedule.objects.bulk_create(tasks_to_create)\n logger.info(\"Created %s new scheduled tasks\", len(tasks_to_create))\n\n if len(tasks_to_update) > 0:\n Schedule.objects.bulk_update(tasks_to_update, ['schedule_type', 'minutes'])\n logger.info(\"Updated %s existing scheduled tasks\", len(tasks_to_update))\n\n # Put at least one task onto the background worker stack,\n # which will be processed as soon as the worker comes online\n InvenTree.tasks.offload_task(\n InvenTree.tasks.heartbeat,\n force_async=True,\n )\n\n logger.info(\"Started %s scheduled background tasks...\", len(tasks))\n\n def collect_tasks(self):\n \"\"\"Collect all background tasks.\"\"\"\n for app_name, app in apps.app_configs.items():\n if app_name == 'InvenTree':\n continue\n\n if Path(app.path).joinpath('tasks.py').exists():\n try:\n import_module(f'{app.module.__package__}.tasks')\n except Exception as e: # pragma: no cover\n logger.exception(\"Error loading tasks for %s: %s\", app_name, e)\n\n def update_exchange_rates(self): # pragma: no cover\n \"\"\"Update exchange rates each time the server is started.\n\n Only runs *if*:\n a) Have not been updated recently (one day or less)\n b) The base exchange rate has been altered\n \"\"\"\n try:\n from djmoney.contrib.exchange.models import ExchangeBackend\n\n from common.settings import currency_code_default\n from InvenTree.tasks import update_exchange_rates\n except AppRegistryNotReady: # pragma: no cover\n pass\n\n base_currency = currency_code_default()\n\n update = False\n\n try:\n backend = ExchangeBackend.objects.filter(name='InvenTreeExchange')\n\n if backend.exists():\n backend = backend.first()\n\n last_update = backend.last_update\n\n if last_update is None:\n # Never been updated\n logger.info(\"Exchange backend has never been updated\")\n update = True\n\n # Backend currency has changed?\n if base_currency != backend.base_currency:\n logger.info(\"Base currency changed from %s to %s\", backend.base_currency, base_currency)\n update = True\n\n except (ExchangeBackend.DoesNotExist):\n logger.info(\"Exchange backend not found - updating\")\n update = True\n\n except Exception:\n # Some other error - potentially the tables are not ready yet\n return\n\n if update:\n try:\n update_exchange_rates()\n except OperationalError:\n logger.warning(\"Could not update exchange rates - database not ready\")\n except Exception as e:\n logger.exception(\"Error updating exchange rates: %s (%s)\", e, type(e))\n\n def add_user_on_startup(self):\n \"\"\"Add a user on startup.\"\"\"\n # stop if checks were already created\n if hasattr(settings, 'USER_ADDED') and settings.USER_ADDED:\n return\n\n # get values\n add_user = get_setting('INVENTREE_ADMIN_USER', 'admin_user')\n add_email = get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email')\n add_password = get_setting('INVENTREE_ADMIN_PASSWORD', 'admin_password')\n add_password_file = get_setting(\"INVENTREE_ADMIN_PASSWORD_FILE\", \"admin_password_file\", None)\n\n # check if all values are present\n set_variables = 0\n\n for tested_var in [add_user, add_email, add_password]:\n if tested_var:\n set_variables += 1\n\n # no variable set -> do not try anything\n if set_variables == 0:\n settings.USER_ADDED = True\n return\n\n # not all needed variables set\n if set_variables < 3:\n settings.USER_ADDED = True\n\n # if a password file is present, do not warn - will be handled later\n if add_password_file:\n return\n logger.warning('Not all required settings for adding a user on startup are present:\\nINVENTREE_ADMIN_USER, INVENTREE_ADMIN_EMAIL, INVENTREE_ADMIN_PASSWORD')\n return\n\n # good to go -> create user\n self._create_admin_user(add_user, add_email, add_password)\n\n # do not try again\n settings.USER_ADDED = True\n\n def _create_admin_user(self, add_user, add_email, add_password):\n user = get_user_model()\n try:\n with transaction.atomic():\n if user.objects.filter(username=add_user).exists():\n logger.info(\"User %s already exists - skipping creation\", add_user)\n else:\n new_user = user.objects.create_superuser(add_user, add_email, add_password)\n logger.info('User %s was created!', str(new_user))\n except IntegrityError:\n logger.warning('The user \"%s\" could not be created', add_user)\n\n def add_user_from_file(self):\n \"\"\"Add the superuser from a file.\"\"\"\n # stop if checks were already created\n if hasattr(settings, \"USER_ADDED_FILE\") and settings.USER_ADDED_FILE:\n return\n\n # get values\n add_password_file = get_setting(\n \"INVENTREE_ADMIN_PASSWORD_FILE\", \"admin_password_file\", None\n )\n\n # no variable set -> do not try anything\n if not add_password_file:\n settings.USER_ADDED_FILE = True\n return\n\n # check if file exists\n add_password_file = Path(str(add_password_file))\n if not add_password_file.exists():\n logger.warning('The file \"%s\" does not exist', add_password_file)\n settings.USER_ADDED_FILE = True\n return\n\n # good to go -> create user\n self._create_admin_user(get_setting('INVENTREE_ADMIN_USER', 'admin_user', 'admin'), get_setting('INVENTREE_ADMIN_EMAIL', 'admin_email', ''), add_password_file.read_text(encoding=\"utf-8\"))\n\n # do not try again\n settings.USER_ADDED_FILE = True\n\n def collect_notification_methods(self):\n \"\"\"Collect all notification methods.\"\"\"\n from common.notifications import storage\n\n storage.collect()\n\n def collect_state_transition_methods(self):\n \"\"\"Collect all state transition methods.\"\"\"\n from generic.states import storage\n\n storage.collect()\n", "path": "InvenTree/InvenTree/apps.py"}]}
3,339
768
gh_patches_debug_27038
rasdani/github-patches
git_diff
open-mmlab__mmocr-74
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Webcam demo script is not working properly **Checklist** 1. I have searched related issues but cannot get the expected help: Yes 2. The bug has not been fixed in the latest version: Yes **Describe the bug** The current model_inference function expects to receive a model and a path to an image as inputs but the webcam demo scripts tries to call it with a model and a numpy array (the return value from cv2.VideoCapture.read()). This raises an assertion error due to type mismatch (np.ndarray vs str) **Reproduction** 1. What command or script did you run? ```none python demo/webcam_demo.py ``` 2. Did you make any modifications on the code or config? Did you understand what you have modified? No. 3. What dataset did you use? **Environment** 1. Please run `python mmocr/utils/collect_env.py` to collect necessary environment information and paste it here. sys.platform: linux Python: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0] CUDA available: True GPU 0: GeForce GTX 1050 Ti CUDA_HOME: /usr/local/cuda NVCC: Build cuda_11.1.TC455_06.29190527_0 GCC: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 PyTorch: 1.5.0 PyTorch compiling details: PyTorch built with: - GCC 7.3 - C++ Version: 201402 - Intel(R) Math Kernel Library Version 2020.0.4 Product Build 20200917 for Intel(R) 64 architecture applications - Intel(R) MKL-DNN v0.21.1 (Git Hash 7d2fd500bc78936d1d648ca713b901012f470dbc) - OpenMP 201511 (a.k.a. OpenMP 4.5) - NNPACK is enabled - CPU capability usage: AVX2 - CUDA Runtime 10.1 - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_37,code=compute_37 - CuDNN 7.6.3 - Magma 2.5.2 - Build settings: BLAS=MKL, BUILD_TYPE=Release, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -fopenmp -DNDEBUG -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DUSE_INTERNAL_THREADPOOL_IMPL -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, USE_CUDA=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_STATIC_DISPATCH=OFF, TorchVision: 0.6.0a0+82fd1c8 OpenCV: 4.5.1 MMCV: 1.2.7 MMCV Compiler: GCC 9.3 MMCV CUDA Compiler: not available MMOCR: 0.1.0+344cc9a 2. You may add addition that may be helpful for locating the problem, such as - How you installed PyTorch: conda **Error traceback** ```none Use load_from_local loader Press "Esc", "q" or "Q" to exit. Traceback (most recent call last): File "demo/webcam_demo.py", line 52, in <module> main() File "demo/webcam_demo.py", line 41, in main result = model_inference(model, img) File "/home/sbugallo/Projects/mmocr/mmocr/apis/inference.py", line 18, in model_inference assert isinstance(img, str) AssertionError ``` **Bug fix** The inference method should accept the following types as input image(s) (str/ndarray or list[str/ndarray] or tuple[str/ndarray]) like in MMDetection --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmocr/apis/inference.py` Content: ``` 1 import torch 2 from mmcv.ops import RoIPool 3 from mmcv.parallel import collate, scatter 4 5 from mmdet.datasets.pipelines import Compose 6 7 8 def model_inference(model, img): 9 """Inference image(s) with the detector. 10 11 Args: 12 model (nn.Module): The loaded detector. 13 imgs (str): Image files. 14 15 Returns: 16 result (dict): Detection results. 17 """ 18 assert isinstance(img, str) 19 20 cfg = model.cfg 21 device = next(model.parameters()).device # model device 22 data = dict(img_info=dict(filename=img), img_prefix=None) 23 # build the data pipeline 24 test_pipeline = Compose(cfg.data.test.pipeline) 25 data = test_pipeline(data) 26 data = collate([data], samples_per_gpu=1) 27 28 # process img_metas 29 if isinstance(data['img_metas'], list): 30 data['img_metas'] = data['img_metas'][0].data 31 else: 32 data['img_metas'] = data['img_metas'].data[0] 33 34 if next(model.parameters()).is_cuda: 35 # scatter to specified GPU 36 data = scatter(data, [device])[0] 37 else: 38 for m in model.modules(): 39 assert not isinstance( 40 m, RoIPool 41 ), 'CPU inference with RoIPool is not supported currently.' 42 43 # forward the model 44 with torch.no_grad(): 45 result = model(return_loss=False, rescale=True, **data)[0] 46 return result 47 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmocr/apis/inference.py b/mmocr/apis/inference.py --- a/mmocr/apis/inference.py +++ b/mmocr/apis/inference.py @@ -1,7 +1,9 @@ +import numpy as np import torch from mmcv.ops import RoIPool from mmcv.parallel import collate, scatter +from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose @@ -10,18 +12,33 @@ Args: model (nn.Module): The loaded detector. - imgs (str): Image files. + imgs (str/ndarray): Image files. Returns: result (dict): Detection results. """ - assert isinstance(img, str) + + assert isinstance(img, (str, np.ndarray)) cfg = model.cfg device = next(model.parameters()).device # model device - data = dict(img_info=dict(filename=img), img_prefix=None) - # build the data pipeline + + if isinstance(img, np.ndarray): + cfg = cfg.copy() + # set loading pipeline type + cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) + + if isinstance(img, np.ndarray): + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + + # build the data pipeline data = test_pipeline(data) data = collate([data], samples_per_gpu=1)
{"golden_diff": "diff --git a/mmocr/apis/inference.py b/mmocr/apis/inference.py\n--- a/mmocr/apis/inference.py\n+++ b/mmocr/apis/inference.py\n@@ -1,7 +1,9 @@\n+import numpy as np\n import torch\n from mmcv.ops import RoIPool\n from mmcv.parallel import collate, scatter\n \n+from mmdet.datasets import replace_ImageToTensor\n from mmdet.datasets.pipelines import Compose\n \n \n@@ -10,18 +12,33 @@\n \n Args:\n model (nn.Module): The loaded detector.\n- imgs (str): Image files.\n+ imgs (str/ndarray): Image files.\n \n Returns:\n result (dict): Detection results.\n \"\"\"\n- assert isinstance(img, str)\n+\n+ assert isinstance(img, (str, np.ndarray))\n \n cfg = model.cfg\n device = next(model.parameters()).device # model device\n- data = dict(img_info=dict(filename=img), img_prefix=None)\n- # build the data pipeline\n+\n+ if isinstance(img, np.ndarray):\n+ cfg = cfg.copy()\n+ # set loading pipeline type\n+ cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n+\n+ cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n test_pipeline = Compose(cfg.data.test.pipeline)\n+\n+ if isinstance(img, np.ndarray):\n+ # directly add img\n+ data = dict(img=img)\n+ else:\n+ # add information into dict\n+ data = dict(img_info=dict(filename=img), img_prefix=None)\n+\n+ # build the data pipeline\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n", "issue": "Webcam demo script is not working properly\n**Checklist**\r\n\r\n1. I have searched related issues but cannot get the expected help: Yes\r\n2. The bug has not been fixed in the latest version: Yes\r\n\r\n**Describe the bug**\r\n\r\nThe current model_inference function expects to receive a model and a path to an image as inputs but the webcam demo scripts tries to call it with a model and a numpy array (the return value from cv2.VideoCapture.read()). \r\n\r\nThis raises an assertion error due to type mismatch (np.ndarray vs str)\r\n\r\n**Reproduction**\r\n\r\n1. What command or script did you run?\r\n\r\n```none\r\npython demo/webcam_demo.py\r\n```\r\n\r\n2. Did you make any modifications on the code or config? Did you understand what you have modified?\r\n\r\nNo.\r\n\r\n3. What dataset did you use?\r\n\r\n**Environment**\r\n\r\n1. Please run `python mmocr/utils/collect_env.py` to collect necessary environment information and paste it here.\r\n\r\nsys.platform: linux\r\nPython: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0]\r\nCUDA available: True\r\nGPU 0: GeForce GTX 1050 Ti\r\nCUDA_HOME: /usr/local/cuda\r\nNVCC: Build cuda_11.1.TC455_06.29190527_0\r\nGCC: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nPyTorch: 1.5.0\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 7.3\r\n - C++ Version: 201402\r\n - Intel(R) Math Kernel Library Version 2020.0.4 Product Build 20200917 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v0.21.1 (Git Hash 7d2fd500bc78936d1d648ca713b901012f470dbc)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 10.1\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_37,code=compute_37\r\n - CuDNN 7.6.3\r\n - Magma 2.5.2\r\n - Build settings: BLAS=MKL, BUILD_TYPE=Release, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -fopenmp -DNDEBUG -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DUSE_INTERNAL_THREADPOOL_IMPL -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, USE_CUDA=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_STATIC_DISPATCH=OFF, \r\n\r\nTorchVision: 0.6.0a0+82fd1c8\r\nOpenCV: 4.5.1\r\nMMCV: 1.2.7\r\nMMCV Compiler: GCC 9.3\r\nMMCV CUDA Compiler: not available\r\nMMOCR: 0.1.0+344cc9a\r\n\r\n2. You may add addition that may be helpful for locating the problem, such as\r\n - How you installed PyTorch: conda\r\n\r\n**Error traceback**\r\n\r\n```none\r\nUse load_from_local loader\r\nPress \"Esc\", \"q\" or \"Q\" to exit.\r\nTraceback (most recent call last):\r\n File \"demo/webcam_demo.py\", line 52, in <module>\r\n main()\r\n File \"demo/webcam_demo.py\", line 41, in main\r\n result = model_inference(model, img)\r\n File \"/home/sbugallo/Projects/mmocr/mmocr/apis/inference.py\", line 18, in model_inference\r\n assert isinstance(img, str)\r\nAssertionError\r\n```\r\n\r\n**Bug fix**\r\n\r\nThe inference method should accept the following types as input image(s) (str/ndarray or list[str/ndarray] or tuple[str/ndarray]) like in MMDetection\n", "before_files": [{"content": "import torch\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\n\nfrom mmdet.datasets.pipelines import Compose\n\n\ndef model_inference(model, img):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str): Image files.\n\n Returns:\n result (dict): Detection results.\n \"\"\"\n assert isinstance(img, str)\n\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n data = dict(img_info=dict(filename=img), img_prefix=None)\n # build the data pipeline\n test_pipeline = Compose(cfg.data.test.pipeline)\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n\n # process img_metas\n if isinstance(data['img_metas'], list):\n data['img_metas'] = data['img_metas'][0].data\n else:\n data['img_metas'] = data['img_metas'].data[0]\n\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n else:\n for m in model.modules():\n assert not isinstance(\n m, RoIPool\n ), 'CPU inference with RoIPool is not supported currently.'\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)[0]\n return result\n", "path": "mmocr/apis/inference.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\n\nfrom mmdet.datasets import replace_ImageToTensor\nfrom mmdet.datasets.pipelines import Compose\n\n\ndef model_inference(model, img):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray): Image files.\n\n Returns:\n result (dict): Detection results.\n \"\"\"\n\n assert isinstance(img, (str, np.ndarray))\n\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n\n if isinstance(img, np.ndarray):\n cfg = cfg.copy()\n # set loading pipeline type\n cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n\n cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n test_pipeline = Compose(cfg.data.test.pipeline)\n\n if isinstance(img, np.ndarray):\n # directly add img\n data = dict(img=img)\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n\n # build the data pipeline\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n\n # process img_metas\n if isinstance(data['img_metas'], list):\n data['img_metas'] = data['img_metas'][0].data\n else:\n data['img_metas'] = data['img_metas'].data[0]\n\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n else:\n for m in model.modules():\n assert not isinstance(\n m, RoIPool\n ), 'CPU inference with RoIPool is not supported currently.'\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)[0]\n return result\n", "path": "mmocr/apis/inference.py"}]}
1,975
378
gh_patches_debug_61040
rasdani/github-patches
git_diff
google-research__text-to-text-transfer-transformer-480
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running hf_model.py I am trying to run your models with [`hf_model`](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py). The current blocker issue is that the code is using `num_parallel_calls` in [in multiple places](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py#L128), however, this function seems to be [deprecated](https://github.com/google-research/text-to-text-transfer-transformer/blob/838157d433995473e96b773c9c761b6aadf01e37/t5/data/preprocessors.py#L2651). Wondering if there is a replacement for this function I can use as a quick fix. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `t5/version.py` Content: ``` 1 # Copyright 2020 The T5 Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # Lint as: python3 16 r"""Separate file for storing the current version of T5. 17 18 Stored in a separate file so that setup.py can reference the version without 19 pulling in all the dependencies in __init__.py. 20 """ 21 __version__ = '0.7.0' 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/t5/version.py b/t5/version.py --- a/t5/version.py +++ b/t5/version.py @@ -18,4 +18,4 @@ Stored in a separate file so that setup.py can reference the version without pulling in all the dependencies in __init__.py. """ -__version__ = '0.7.0' +__version__ = '0.7.1'
{"golden_diff": "diff --git a/t5/version.py b/t5/version.py\n--- a/t5/version.py\n+++ b/t5/version.py\n@@ -18,4 +18,4 @@\n Stored in a separate file so that setup.py can reference the version without\n pulling in all the dependencies in __init__.py.\n \"\"\"\n-__version__ = '0.7.0'\n+__version__ = '0.7.1'\n", "issue": "Running hf_model.py \nI am trying to run your models with [`hf_model`](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py). The current blocker issue is that the code is using `num_parallel_calls` in [in multiple places](https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/models/hf_model.py#L128), however, this function seems to be [deprecated](https://github.com/google-research/text-to-text-transfer-transformer/blob/838157d433995473e96b773c9c761b6aadf01e37/t5/data/preprocessors.py#L2651).\r\n\r\nWondering if there is a replacement for this function I can use as a quick fix. \n", "before_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Separate file for storing the current version of T5.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n__version__ = '0.7.0'\n", "path": "t5/version.py"}], "after_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Separate file for storing the current version of T5.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n__version__ = '0.7.1'\n", "path": "t5/version.py"}]}
673
91
gh_patches_debug_44814
rasdani/github-patches
git_diff
tiangolo__fastapi-437
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OpenAPI: HTTP_422 response does not use custom media_type **Describe the bug** FastAPI automatically adds an HTTP_422 response to all paths in the OpenAPI specification that have parameters or request body. This response does not use the media_type of response_class if any custom defined. Furthermore, it overwrites any error object format with the default one. **To Reproduce** Create a path with parameters and add custom response_class to decorator. Add custom exception handlers that reformat the default error responses as per your liking. Then observe generated openapi.json ```python from fastapi import FastAPI, HTTPException from fastapi.exceptions import RequestValidationError from starlette import status from starlette.responses import JSONResponse from . import schemas app = FastAPI() class JsonApiResponse(JSONResponse): media_type = 'application/vnd+json.api' @app.exception_handler(HTTPException) async def http_exception_handler(request, exc: HTTPException) -> JsonApiResponse: headers = getattr(exc, "headers", None) content = schemas.ErrorResponse(errors=[dict(title="Bad request", detail=exc.detail, status=exc.status_code)]).dict() status_code = exc.status_code if headers: return JsonApiResponse(content=content, status_code=status_code, headers=headers) else: return JsonApiResponse(content=content, status_code=status_code) @app.exception_handler(RequestValidationError) async def request_validation_exception_handler(request, exc: RequestValidationError) -> JsonApiResponse: http422 = status.HTTP_422_UNPROCESSABLE_ENTITY return JsonApiResponse( content=schemas.ErrorResponse(errors=[ dict(title=err['type'], detail=err['msg'], source='/'.join(err['loc']), status=http422) for err in exc.errors() ]).dict(), status_code=http422, ) @app.post('/customers', status_code=status.HTTP_201_CREATED, response_model=schemas.CustomerDetailsResponse, response_class=JsonApiResponse, ) def customer_create(data: schemas.Customer = Body(..., media_type='application/vnd+json.api', embed=True)): created_customer = {**data.dict(), **{'id': '1'}} return {'data': created_customer} ``` The openapi.json will include the unwanted 422 response with the FastAPI default error object definitions: ```yaml # ... '422': description: Validation Error content: application/json: schema: "$ref": "#/components/schemas/HTTPValidationError" ``` **Expected behavior** At least, the media_type of the response_class should be respected. But the best would be if the 422 would not be added to the specification unless requested via the path decorator. Or if the 422 definitions of mine were respected. ```python @app.post('/customers', status_code=status.HTTP_201_CREATED, response_model=schemas.CustomerDetailsResponse, response_class=JsonApiResponse, responses={ 422: { 'model': schemas.ErrorResponse }, }) data: schemas.Customer = Body(..., media_type='application/vnd+json.api', embed=True)): pass ``` **Environment:** - OS: masOS 10.14.6 - Python: 3.6.5 - FastAPI: 0.35.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fastapi/openapi/utils.py` Content: ``` 1 import http.client 2 from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast 3 4 from fastapi import routing 5 from fastapi.dependencies.models import Dependant 6 from fastapi.dependencies.utils import get_flat_dependant 7 from fastapi.encoders import jsonable_encoder 8 from fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX 9 from fastapi.openapi.models import OpenAPI 10 from fastapi.params import Body, Param 11 from fastapi.utils import ( 12 generate_operation_id_for_path, 13 get_flat_models_from_routes, 14 get_model_definitions, 15 ) 16 from pydantic.fields import Field 17 from pydantic.schema import field_schema, get_model_name_map 18 from pydantic.utils import lenient_issubclass 19 from starlette.responses import JSONResponse 20 from starlette.routing import BaseRoute 21 from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY 22 23 validation_error_definition = { 24 "title": "ValidationError", 25 "type": "object", 26 "properties": { 27 "loc": {"title": "Location", "type": "array", "items": {"type": "string"}}, 28 "msg": {"title": "Message", "type": "string"}, 29 "type": {"title": "Error Type", "type": "string"}, 30 }, 31 "required": ["loc", "msg", "type"], 32 } 33 34 validation_error_response_definition = { 35 "title": "HTTPValidationError", 36 "type": "object", 37 "properties": { 38 "detail": { 39 "title": "Detail", 40 "type": "array", 41 "items": {"$ref": REF_PREFIX + "ValidationError"}, 42 } 43 }, 44 } 45 46 47 def get_openapi_params(dependant: Dependant) -> List[Field]: 48 flat_dependant = get_flat_dependant(dependant) 49 return ( 50 flat_dependant.path_params 51 + flat_dependant.query_params 52 + flat_dependant.header_params 53 + flat_dependant.cookie_params 54 ) 55 56 57 def get_openapi_security_definitions(flat_dependant: Dependant) -> Tuple[Dict, List]: 58 security_definitions = {} 59 operation_security = [] 60 for security_requirement in flat_dependant.security_requirements: 61 security_definition = jsonable_encoder( 62 security_requirement.security_scheme.model, 63 by_alias=True, 64 include_none=False, 65 ) 66 security_name = security_requirement.security_scheme.scheme_name 67 security_definitions[security_name] = security_definition 68 operation_security.append({security_name: security_requirement.scopes}) 69 return security_definitions, operation_security 70 71 72 def get_openapi_operation_parameters( 73 all_route_params: Sequence[Field] 74 ) -> Tuple[Dict[str, Dict], List[Dict[str, Any]]]: 75 definitions: Dict[str, Dict] = {} 76 parameters = [] 77 for param in all_route_params: 78 schema = param.schema 79 schema = cast(Param, schema) 80 if "ValidationError" not in definitions: 81 definitions["ValidationError"] = validation_error_definition 82 definitions["HTTPValidationError"] = validation_error_response_definition 83 parameter = { 84 "name": param.alias, 85 "in": schema.in_.value, 86 "required": param.required, 87 "schema": field_schema(param, model_name_map={})[0], 88 } 89 if schema.description: 90 parameter["description"] = schema.description 91 if schema.deprecated: 92 parameter["deprecated"] = schema.deprecated 93 parameters.append(parameter) 94 return definitions, parameters 95 96 97 def get_openapi_operation_request_body( 98 *, body_field: Optional[Field], model_name_map: Dict[Type, str] 99 ) -> Optional[Dict]: 100 if not body_field: 101 return None 102 assert isinstance(body_field, Field) 103 body_schema, _, _ = field_schema( 104 body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX 105 ) 106 body_field.schema = cast(Body, body_field.schema) 107 request_media_type = body_field.schema.media_type 108 required = body_field.required 109 request_body_oai: Dict[str, Any] = {} 110 if required: 111 request_body_oai["required"] = required 112 request_body_oai["content"] = {request_media_type: {"schema": body_schema}} 113 return request_body_oai 114 115 116 def generate_operation_id(*, route: routing.APIRoute, method: str) -> str: 117 if route.operation_id: 118 return route.operation_id 119 path: str = route.path_format 120 return generate_operation_id_for_path(name=route.name, path=path, method=method) 121 122 123 def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str: 124 if route.summary: 125 return route.summary 126 return route.name.replace("_", " ").title() 127 128 129 def get_openapi_operation_metadata(*, route: routing.APIRoute, method: str) -> Dict: 130 operation: Dict[str, Any] = {} 131 if route.tags: 132 operation["tags"] = route.tags 133 operation["summary"] = generate_operation_summary(route=route, method=method) 134 if route.description: 135 operation["description"] = route.description 136 operation["operationId"] = generate_operation_id(route=route, method=method) 137 if route.deprecated: 138 operation["deprecated"] = route.deprecated 139 return operation 140 141 142 def get_openapi_path( 143 *, route: routing.APIRoute, model_name_map: Dict[Type, str] 144 ) -> Tuple[Dict, Dict, Dict]: 145 path = {} 146 security_schemes: Dict[str, Any] = {} 147 definitions: Dict[str, Any] = {} 148 assert route.methods is not None, "Methods must be a list" 149 if route.include_in_schema: 150 for method in route.methods: 151 operation = get_openapi_operation_metadata(route=route, method=method) 152 parameters: List[Dict] = [] 153 flat_dependant = get_flat_dependant(route.dependant) 154 security_definitions, operation_security = get_openapi_security_definitions( 155 flat_dependant=flat_dependant 156 ) 157 if operation_security: 158 operation.setdefault("security", []).extend(operation_security) 159 if security_definitions: 160 security_schemes.update(security_definitions) 161 all_route_params = get_openapi_params(route.dependant) 162 validation_definitions, operation_parameters = get_openapi_operation_parameters( 163 all_route_params=all_route_params 164 ) 165 definitions.update(validation_definitions) 166 parameters.extend(operation_parameters) 167 if parameters: 168 operation["parameters"] = parameters 169 if method in METHODS_WITH_BODY: 170 request_body_oai = get_openapi_operation_request_body( 171 body_field=route.body_field, model_name_map=model_name_map 172 ) 173 if request_body_oai: 174 operation["requestBody"] = request_body_oai 175 if "ValidationError" not in definitions: 176 definitions["ValidationError"] = validation_error_definition 177 definitions[ 178 "HTTPValidationError" 179 ] = validation_error_response_definition 180 if route.responses: 181 for (additional_status_code, response) in route.responses.items(): 182 assert isinstance( 183 response, dict 184 ), "An additional response must be a dict" 185 field = route.response_fields.get(additional_status_code) 186 if field: 187 response_schema, _, _ = field_schema( 188 field, model_name_map=model_name_map, ref_prefix=REF_PREFIX 189 ) 190 response.setdefault("content", {}).setdefault( 191 "application/json", {} 192 )["schema"] = response_schema 193 status_text = http.client.responses.get(int(additional_status_code)) 194 response.setdefault( 195 "description", status_text or "Additional Response" 196 ) 197 operation.setdefault("responses", {})[ 198 str(additional_status_code) 199 ] = response 200 status_code = str(route.status_code) 201 response_schema = {"type": "string"} 202 if lenient_issubclass(route.response_class, JSONResponse): 203 if route.response_field: 204 response_schema, _, _ = field_schema( 205 route.response_field, 206 model_name_map=model_name_map, 207 ref_prefix=REF_PREFIX, 208 ) 209 else: 210 response_schema = {} 211 operation.setdefault("responses", {}).setdefault(status_code, {})[ 212 "description" 213 ] = route.response_description 214 operation.setdefault("responses", {}).setdefault( 215 status_code, {} 216 ).setdefault("content", {}).setdefault(route.response_class.media_type, {})[ 217 "schema" 218 ] = response_schema 219 if all_route_params or route.body_field: 220 operation["responses"][str(HTTP_422_UNPROCESSABLE_ENTITY)] = { 221 "description": "Validation Error", 222 "content": { 223 "application/json": { 224 "schema": {"$ref": REF_PREFIX + "HTTPValidationError"} 225 } 226 }, 227 } 228 path[method.lower()] = operation 229 return path, security_schemes, definitions 230 231 232 def get_openapi( 233 *, 234 title: str, 235 version: str, 236 openapi_version: str = "3.0.2", 237 description: str = None, 238 routes: Sequence[BaseRoute], 239 openapi_prefix: str = "" 240 ) -> Dict: 241 info = {"title": title, "version": version} 242 if description: 243 info["description"] = description 244 output = {"openapi": openapi_version, "info": info} 245 components: Dict[str, Dict] = {} 246 paths: Dict[str, Dict] = {} 247 flat_models = get_flat_models_from_routes(routes) 248 model_name_map = get_model_name_map(flat_models) 249 definitions = get_model_definitions( 250 flat_models=flat_models, model_name_map=model_name_map 251 ) 252 for route in routes: 253 if isinstance(route, routing.APIRoute): 254 result = get_openapi_path(route=route, model_name_map=model_name_map) 255 if result: 256 path, security_schemes, path_definitions = result 257 if path: 258 paths.setdefault(openapi_prefix + route.path_format, {}).update( 259 path 260 ) 261 if security_schemes: 262 components.setdefault("securitySchemes", {}).update( 263 security_schemes 264 ) 265 if path_definitions: 266 definitions.update(path_definitions) 267 if definitions: 268 components.setdefault("schemas", {}).update(definitions) 269 if components: 270 output["components"] = components 271 output["paths"] = paths 272 return jsonable_encoder(OpenAPI(**output), by_alias=True, include_none=False) 273 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fastapi/openapi/utils.py b/fastapi/openapi/utils.py --- a/fastapi/openapi/utils.py +++ b/fastapi/openapi/utils.py @@ -71,15 +71,11 @@ def get_openapi_operation_parameters( all_route_params: Sequence[Field] -) -> Tuple[Dict[str, Dict], List[Dict[str, Any]]]: - definitions: Dict[str, Dict] = {} +) -> List[Dict[str, Any]]: parameters = [] for param in all_route_params: schema = param.schema schema = cast(Param, schema) - if "ValidationError" not in definitions: - definitions["ValidationError"] = validation_error_definition - definitions["HTTPValidationError"] = validation_error_response_definition parameter = { "name": param.alias, "in": schema.in_.value, @@ -91,7 +87,7 @@ if schema.deprecated: parameter["deprecated"] = schema.deprecated parameters.append(parameter) - return definitions, parameters + return parameters def get_openapi_operation_request_body( @@ -159,10 +155,7 @@ if security_definitions: security_schemes.update(security_definitions) all_route_params = get_openapi_params(route.dependant) - validation_definitions, operation_parameters = get_openapi_operation_parameters( - all_route_params=all_route_params - ) - definitions.update(validation_definitions) + operation_parameters = get_openapi_operation_parameters(all_route_params) parameters.extend(operation_parameters) if parameters: operation["parameters"] = parameters @@ -172,11 +165,6 @@ ) if request_body_oai: operation["requestBody"] = request_body_oai - if "ValidationError" not in definitions: - definitions["ValidationError"] = validation_error_definition - definitions[ - "HTTPValidationError" - ] = validation_error_response_definition if route.responses: for (additional_status_code, response) in route.responses.items(): assert isinstance( @@ -188,7 +176,7 @@ field, model_name_map=model_name_map, ref_prefix=REF_PREFIX ) response.setdefault("content", {}).setdefault( - "application/json", {} + route.response_class.media_type, {} )["schema"] = response_schema status_text = http.client.responses.get(int(additional_status_code)) response.setdefault( @@ -216,8 +204,15 @@ ).setdefault("content", {}).setdefault(route.response_class.media_type, {})[ "schema" ] = response_schema - if all_route_params or route.body_field: - operation["responses"][str(HTTP_422_UNPROCESSABLE_ENTITY)] = { + + http422 = str(HTTP_422_UNPROCESSABLE_ENTITY) + if (all_route_params or route.body_field) and not any( + [ + status in operation["responses"] + for status in [http422, "4xx", "default"] + ] + ): + operation["responses"][http422] = { "description": "Validation Error", "content": { "application/json": { @@ -225,6 +220,13 @@ } }, } + if "ValidationError" not in definitions: + definitions.update( + { + "ValidationError": validation_error_definition, + "HTTPValidationError": validation_error_response_definition, + } + ) path[method.lower()] = operation return path, security_schemes, definitions
{"golden_diff": "diff --git a/fastapi/openapi/utils.py b/fastapi/openapi/utils.py\n--- a/fastapi/openapi/utils.py\n+++ b/fastapi/openapi/utils.py\n@@ -71,15 +71,11 @@\n \n def get_openapi_operation_parameters(\n all_route_params: Sequence[Field]\n-) -> Tuple[Dict[str, Dict], List[Dict[str, Any]]]:\n- definitions: Dict[str, Dict] = {}\n+) -> List[Dict[str, Any]]:\n parameters = []\n for param in all_route_params:\n schema = param.schema\n schema = cast(Param, schema)\n- if \"ValidationError\" not in definitions:\n- definitions[\"ValidationError\"] = validation_error_definition\n- definitions[\"HTTPValidationError\"] = validation_error_response_definition\n parameter = {\n \"name\": param.alias,\n \"in\": schema.in_.value,\n@@ -91,7 +87,7 @@\n if schema.deprecated:\n parameter[\"deprecated\"] = schema.deprecated\n parameters.append(parameter)\n- return definitions, parameters\n+ return parameters\n \n \n def get_openapi_operation_request_body(\n@@ -159,10 +155,7 @@\n if security_definitions:\n security_schemes.update(security_definitions)\n all_route_params = get_openapi_params(route.dependant)\n- validation_definitions, operation_parameters = get_openapi_operation_parameters(\n- all_route_params=all_route_params\n- )\n- definitions.update(validation_definitions)\n+ operation_parameters = get_openapi_operation_parameters(all_route_params)\n parameters.extend(operation_parameters)\n if parameters:\n operation[\"parameters\"] = parameters\n@@ -172,11 +165,6 @@\n )\n if request_body_oai:\n operation[\"requestBody\"] = request_body_oai\n- if \"ValidationError\" not in definitions:\n- definitions[\"ValidationError\"] = validation_error_definition\n- definitions[\n- \"HTTPValidationError\"\n- ] = validation_error_response_definition\n if route.responses:\n for (additional_status_code, response) in route.responses.items():\n assert isinstance(\n@@ -188,7 +176,7 @@\n field, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n response.setdefault(\"content\", {}).setdefault(\n- \"application/json\", {}\n+ route.response_class.media_type, {}\n )[\"schema\"] = response_schema\n status_text = http.client.responses.get(int(additional_status_code))\n response.setdefault(\n@@ -216,8 +204,15 @@\n ).setdefault(\"content\", {}).setdefault(route.response_class.media_type, {})[\n \"schema\"\n ] = response_schema\n- if all_route_params or route.body_field:\n- operation[\"responses\"][str(HTTP_422_UNPROCESSABLE_ENTITY)] = {\n+\n+ http422 = str(HTTP_422_UNPROCESSABLE_ENTITY)\n+ if (all_route_params or route.body_field) and not any(\n+ [\n+ status in operation[\"responses\"]\n+ for status in [http422, \"4xx\", \"default\"]\n+ ]\n+ ):\n+ operation[\"responses\"][http422] = {\n \"description\": \"Validation Error\",\n \"content\": {\n \"application/json\": {\n@@ -225,6 +220,13 @@\n }\n },\n }\n+ if \"ValidationError\" not in definitions:\n+ definitions.update(\n+ {\n+ \"ValidationError\": validation_error_definition,\n+ \"HTTPValidationError\": validation_error_response_definition,\n+ }\n+ )\n path[method.lower()] = operation\n return path, security_schemes, definitions\n", "issue": "OpenAPI: HTTP_422 response does not use custom media_type\n**Describe the bug**\r\nFastAPI automatically adds an HTTP_422 response to all paths in the OpenAPI specification that have parameters or request body. This response does not use the media_type of response_class if any custom defined. Furthermore, it overwrites any error object format with the default one.\r\n\r\n**To Reproduce**\r\nCreate a path with parameters and add custom response_class to decorator. Add custom exception handlers that reformat the default error responses as per your liking. Then observe generated openapi.json\r\n\r\n```python\r\nfrom fastapi import FastAPI, HTTPException\r\nfrom fastapi.exceptions import RequestValidationError\r\nfrom starlette import status\r\nfrom starlette.responses import JSONResponse\r\nfrom . import schemas\r\n\r\napp = FastAPI()\r\n\r\nclass JsonApiResponse(JSONResponse):\r\n media_type = 'application/vnd+json.api'\r\n\r\n@app.exception_handler(HTTPException)\r\nasync def http_exception_handler(request, exc: HTTPException) -> JsonApiResponse:\r\n headers = getattr(exc, \"headers\", None)\r\n content = schemas.ErrorResponse(errors=[dict(title=\"Bad request\", detail=exc.detail, status=exc.status_code)]).dict()\r\n status_code = exc.status_code\r\n if headers:\r\n return JsonApiResponse(content=content, status_code=status_code, headers=headers)\r\n else:\r\n return JsonApiResponse(content=content, status_code=status_code)\r\n\r\n@app.exception_handler(RequestValidationError)\r\nasync def request_validation_exception_handler(request, exc: RequestValidationError) -> JsonApiResponse:\r\n http422 = status.HTTP_422_UNPROCESSABLE_ENTITY\r\n return JsonApiResponse(\r\n content=schemas.ErrorResponse(errors=[\r\n dict(title=err['type'], detail=err['msg'], source='/'.join(err['loc']), status=http422)\r\n for err in exc.errors()\r\n ]).dict(),\r\n status_code=http422,\r\n )\r\n\r\n@app.post('/customers',\r\n status_code=status.HTTP_201_CREATED,\r\n response_model=schemas.CustomerDetailsResponse,\r\n response_class=JsonApiResponse,\r\n )\r\ndef customer_create(data: schemas.Customer = Body(..., media_type='application/vnd+json.api', embed=True)):\r\n created_customer = {**data.dict(), **{'id': '1'}}\r\n return {'data': created_customer}\r\n``` \r\n\r\nThe openapi.json will include the unwanted 422 response with the FastAPI default error object definitions:\r\n\r\n```yaml\r\n # ...\r\n '422':\r\n description: Validation Error\r\n content:\r\n application/json:\r\n schema:\r\n \"$ref\": \"#/components/schemas/HTTPValidationError\"\r\n```\r\n\r\n**Expected behavior**\r\nAt least, the media_type of the response_class should be respected. But the best would be if the 422 would not be added to the specification unless requested via the path decorator. Or if the 422 definitions of mine were respected.\r\n\r\n```python\r\n@app.post('/customers',\r\n status_code=status.HTTP_201_CREATED,\r\n response_model=schemas.CustomerDetailsResponse,\r\n response_class=JsonApiResponse,\r\n responses={\r\n 422: {\r\n 'model': schemas.ErrorResponse\r\n },\r\n })\r\ndata: schemas.Customer = Body(..., media_type='application/vnd+json.api', embed=True)):\r\n pass\r\n```\r\n\r\n**Environment:**\r\n - OS: masOS 10.14.6\r\n - Python: 3.6.5\r\n - FastAPI: 0.35.0\n", "before_files": [{"content": "import http.client\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast\n\nfrom fastapi import routing\nfrom fastapi.dependencies.models import Dependant\nfrom fastapi.dependencies.utils import get_flat_dependant\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX\nfrom fastapi.openapi.models import OpenAPI\nfrom fastapi.params import Body, Param\nfrom fastapi.utils import (\n generate_operation_id_for_path,\n get_flat_models_from_routes,\n get_model_definitions,\n)\nfrom pydantic.fields import Field\nfrom pydantic.schema import field_schema, get_model_name_map\nfrom pydantic.utils import lenient_issubclass\nfrom starlette.responses import JSONResponse\nfrom starlette.routing import BaseRoute\nfrom starlette.status import HTTP_422_UNPROCESSABLE_ENTITY\n\nvalidation_error_definition = {\n \"title\": \"ValidationError\",\n \"type\": \"object\",\n \"properties\": {\n \"loc\": {\"title\": \"Location\", \"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"msg\": {\"title\": \"Message\", \"type\": \"string\"},\n \"type\": {\"title\": \"Error Type\", \"type\": \"string\"},\n },\n \"required\": [\"loc\", \"msg\", \"type\"],\n}\n\nvalidation_error_response_definition = {\n \"title\": \"HTTPValidationError\",\n \"type\": \"object\",\n \"properties\": {\n \"detail\": {\n \"title\": \"Detail\",\n \"type\": \"array\",\n \"items\": {\"$ref\": REF_PREFIX + \"ValidationError\"},\n }\n },\n}\n\n\ndef get_openapi_params(dependant: Dependant) -> List[Field]:\n flat_dependant = get_flat_dependant(dependant)\n return (\n flat_dependant.path_params\n + flat_dependant.query_params\n + flat_dependant.header_params\n + flat_dependant.cookie_params\n )\n\n\ndef get_openapi_security_definitions(flat_dependant: Dependant) -> Tuple[Dict, List]:\n security_definitions = {}\n operation_security = []\n for security_requirement in flat_dependant.security_requirements:\n security_definition = jsonable_encoder(\n security_requirement.security_scheme.model,\n by_alias=True,\n include_none=False,\n )\n security_name = security_requirement.security_scheme.scheme_name\n security_definitions[security_name] = security_definition\n operation_security.append({security_name: security_requirement.scopes})\n return security_definitions, operation_security\n\n\ndef get_openapi_operation_parameters(\n all_route_params: Sequence[Field]\n) -> Tuple[Dict[str, Dict], List[Dict[str, Any]]]:\n definitions: Dict[str, Dict] = {}\n parameters = []\n for param in all_route_params:\n schema = param.schema\n schema = cast(Param, schema)\n if \"ValidationError\" not in definitions:\n definitions[\"ValidationError\"] = validation_error_definition\n definitions[\"HTTPValidationError\"] = validation_error_response_definition\n parameter = {\n \"name\": param.alias,\n \"in\": schema.in_.value,\n \"required\": param.required,\n \"schema\": field_schema(param, model_name_map={})[0],\n }\n if schema.description:\n parameter[\"description\"] = schema.description\n if schema.deprecated:\n parameter[\"deprecated\"] = schema.deprecated\n parameters.append(parameter)\n return definitions, parameters\n\n\ndef get_openapi_operation_request_body(\n *, body_field: Optional[Field], model_name_map: Dict[Type, str]\n) -> Optional[Dict]:\n if not body_field:\n return None\n assert isinstance(body_field, Field)\n body_schema, _, _ = field_schema(\n body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n body_field.schema = cast(Body, body_field.schema)\n request_media_type = body_field.schema.media_type\n required = body_field.required\n request_body_oai: Dict[str, Any] = {}\n if required:\n request_body_oai[\"required\"] = required\n request_body_oai[\"content\"] = {request_media_type: {\"schema\": body_schema}}\n return request_body_oai\n\n\ndef generate_operation_id(*, route: routing.APIRoute, method: str) -> str:\n if route.operation_id:\n return route.operation_id\n path: str = route.path_format\n return generate_operation_id_for_path(name=route.name, path=path, method=method)\n\n\ndef generate_operation_summary(*, route: routing.APIRoute, method: str) -> str:\n if route.summary:\n return route.summary\n return route.name.replace(\"_\", \" \").title()\n\n\ndef get_openapi_operation_metadata(*, route: routing.APIRoute, method: str) -> Dict:\n operation: Dict[str, Any] = {}\n if route.tags:\n operation[\"tags\"] = route.tags\n operation[\"summary\"] = generate_operation_summary(route=route, method=method)\n if route.description:\n operation[\"description\"] = route.description\n operation[\"operationId\"] = generate_operation_id(route=route, method=method)\n if route.deprecated:\n operation[\"deprecated\"] = route.deprecated\n return operation\n\n\ndef get_openapi_path(\n *, route: routing.APIRoute, model_name_map: Dict[Type, str]\n) -> Tuple[Dict, Dict, Dict]:\n path = {}\n security_schemes: Dict[str, Any] = {}\n definitions: Dict[str, Any] = {}\n assert route.methods is not None, \"Methods must be a list\"\n if route.include_in_schema:\n for method in route.methods:\n operation = get_openapi_operation_metadata(route=route, method=method)\n parameters: List[Dict] = []\n flat_dependant = get_flat_dependant(route.dependant)\n security_definitions, operation_security = get_openapi_security_definitions(\n flat_dependant=flat_dependant\n )\n if operation_security:\n operation.setdefault(\"security\", []).extend(operation_security)\n if security_definitions:\n security_schemes.update(security_definitions)\n all_route_params = get_openapi_params(route.dependant)\n validation_definitions, operation_parameters = get_openapi_operation_parameters(\n all_route_params=all_route_params\n )\n definitions.update(validation_definitions)\n parameters.extend(operation_parameters)\n if parameters:\n operation[\"parameters\"] = parameters\n if method in METHODS_WITH_BODY:\n request_body_oai = get_openapi_operation_request_body(\n body_field=route.body_field, model_name_map=model_name_map\n )\n if request_body_oai:\n operation[\"requestBody\"] = request_body_oai\n if \"ValidationError\" not in definitions:\n definitions[\"ValidationError\"] = validation_error_definition\n definitions[\n \"HTTPValidationError\"\n ] = validation_error_response_definition\n if route.responses:\n for (additional_status_code, response) in route.responses.items():\n assert isinstance(\n response, dict\n ), \"An additional response must be a dict\"\n field = route.response_fields.get(additional_status_code)\n if field:\n response_schema, _, _ = field_schema(\n field, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n response.setdefault(\"content\", {}).setdefault(\n \"application/json\", {}\n )[\"schema\"] = response_schema\n status_text = http.client.responses.get(int(additional_status_code))\n response.setdefault(\n \"description\", status_text or \"Additional Response\"\n )\n operation.setdefault(\"responses\", {})[\n str(additional_status_code)\n ] = response\n status_code = str(route.status_code)\n response_schema = {\"type\": \"string\"}\n if lenient_issubclass(route.response_class, JSONResponse):\n if route.response_field:\n response_schema, _, _ = field_schema(\n route.response_field,\n model_name_map=model_name_map,\n ref_prefix=REF_PREFIX,\n )\n else:\n response_schema = {}\n operation.setdefault(\"responses\", {}).setdefault(status_code, {})[\n \"description\"\n ] = route.response_description\n operation.setdefault(\"responses\", {}).setdefault(\n status_code, {}\n ).setdefault(\"content\", {}).setdefault(route.response_class.media_type, {})[\n \"schema\"\n ] = response_schema\n if all_route_params or route.body_field:\n operation[\"responses\"][str(HTTP_422_UNPROCESSABLE_ENTITY)] = {\n \"description\": \"Validation Error\",\n \"content\": {\n \"application/json\": {\n \"schema\": {\"$ref\": REF_PREFIX + \"HTTPValidationError\"}\n }\n },\n }\n path[method.lower()] = operation\n return path, security_schemes, definitions\n\n\ndef get_openapi(\n *,\n title: str,\n version: str,\n openapi_version: str = \"3.0.2\",\n description: str = None,\n routes: Sequence[BaseRoute],\n openapi_prefix: str = \"\"\n) -> Dict:\n info = {\"title\": title, \"version\": version}\n if description:\n info[\"description\"] = description\n output = {\"openapi\": openapi_version, \"info\": info}\n components: Dict[str, Dict] = {}\n paths: Dict[str, Dict] = {}\n flat_models = get_flat_models_from_routes(routes)\n model_name_map = get_model_name_map(flat_models)\n definitions = get_model_definitions(\n flat_models=flat_models, model_name_map=model_name_map\n )\n for route in routes:\n if isinstance(route, routing.APIRoute):\n result = get_openapi_path(route=route, model_name_map=model_name_map)\n if result:\n path, security_schemes, path_definitions = result\n if path:\n paths.setdefault(openapi_prefix + route.path_format, {}).update(\n path\n )\n if security_schemes:\n components.setdefault(\"securitySchemes\", {}).update(\n security_schemes\n )\n if path_definitions:\n definitions.update(path_definitions)\n if definitions:\n components.setdefault(\"schemas\", {}).update(definitions)\n if components:\n output[\"components\"] = components\n output[\"paths\"] = paths\n return jsonable_encoder(OpenAPI(**output), by_alias=True, include_none=False)\n", "path": "fastapi/openapi/utils.py"}], "after_files": [{"content": "import http.client\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast\n\nfrom fastapi import routing\nfrom fastapi.dependencies.models import Dependant\nfrom fastapi.dependencies.utils import get_flat_dependant\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX\nfrom fastapi.openapi.models import OpenAPI\nfrom fastapi.params import Body, Param\nfrom fastapi.utils import (\n generate_operation_id_for_path,\n get_flat_models_from_routes,\n get_model_definitions,\n)\nfrom pydantic.fields import Field\nfrom pydantic.schema import field_schema, get_model_name_map\nfrom pydantic.utils import lenient_issubclass\nfrom starlette.responses import JSONResponse\nfrom starlette.routing import BaseRoute\nfrom starlette.status import HTTP_422_UNPROCESSABLE_ENTITY\n\nvalidation_error_definition = {\n \"title\": \"ValidationError\",\n \"type\": \"object\",\n \"properties\": {\n \"loc\": {\"title\": \"Location\", \"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"msg\": {\"title\": \"Message\", \"type\": \"string\"},\n \"type\": {\"title\": \"Error Type\", \"type\": \"string\"},\n },\n \"required\": [\"loc\", \"msg\", \"type\"],\n}\n\nvalidation_error_response_definition = {\n \"title\": \"HTTPValidationError\",\n \"type\": \"object\",\n \"properties\": {\n \"detail\": {\n \"title\": \"Detail\",\n \"type\": \"array\",\n \"items\": {\"$ref\": REF_PREFIX + \"ValidationError\"},\n }\n },\n}\n\n\ndef get_openapi_params(dependant: Dependant) -> List[Field]:\n flat_dependant = get_flat_dependant(dependant)\n return (\n flat_dependant.path_params\n + flat_dependant.query_params\n + flat_dependant.header_params\n + flat_dependant.cookie_params\n )\n\n\ndef get_openapi_security_definitions(flat_dependant: Dependant) -> Tuple[Dict, List]:\n security_definitions = {}\n operation_security = []\n for security_requirement in flat_dependant.security_requirements:\n security_definition = jsonable_encoder(\n security_requirement.security_scheme.model,\n by_alias=True,\n include_none=False,\n )\n security_name = security_requirement.security_scheme.scheme_name\n security_definitions[security_name] = security_definition\n operation_security.append({security_name: security_requirement.scopes})\n return security_definitions, operation_security\n\n\ndef get_openapi_operation_parameters(\n all_route_params: Sequence[Field]\n) -> List[Dict[str, Any]]:\n parameters = []\n for param in all_route_params:\n schema = param.schema\n schema = cast(Param, schema)\n parameter = {\n \"name\": param.alias,\n \"in\": schema.in_.value,\n \"required\": param.required,\n \"schema\": field_schema(param, model_name_map={})[0],\n }\n if schema.description:\n parameter[\"description\"] = schema.description\n if schema.deprecated:\n parameter[\"deprecated\"] = schema.deprecated\n parameters.append(parameter)\n return parameters\n\n\ndef get_openapi_operation_request_body(\n *, body_field: Optional[Field], model_name_map: Dict[Type, str]\n) -> Optional[Dict]:\n if not body_field:\n return None\n assert isinstance(body_field, Field)\n body_schema, _, _ = field_schema(\n body_field, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n body_field.schema = cast(Body, body_field.schema)\n request_media_type = body_field.schema.media_type\n required = body_field.required\n request_body_oai: Dict[str, Any] = {}\n if required:\n request_body_oai[\"required\"] = required\n request_body_oai[\"content\"] = {request_media_type: {\"schema\": body_schema}}\n return request_body_oai\n\n\ndef generate_operation_id(*, route: routing.APIRoute, method: str) -> str:\n if route.operation_id:\n return route.operation_id\n path: str = route.path_format\n return generate_operation_id_for_path(name=route.name, path=path, method=method)\n\n\ndef generate_operation_summary(*, route: routing.APIRoute, method: str) -> str:\n if route.summary:\n return route.summary\n return route.name.replace(\"_\", \" \").title()\n\n\ndef get_openapi_operation_metadata(*, route: routing.APIRoute, method: str) -> Dict:\n operation: Dict[str, Any] = {}\n if route.tags:\n operation[\"tags\"] = route.tags\n operation[\"summary\"] = generate_operation_summary(route=route, method=method)\n if route.description:\n operation[\"description\"] = route.description\n operation[\"operationId\"] = generate_operation_id(route=route, method=method)\n if route.deprecated:\n operation[\"deprecated\"] = route.deprecated\n return operation\n\n\ndef get_openapi_path(\n *, route: routing.APIRoute, model_name_map: Dict[Type, str]\n) -> Tuple[Dict, Dict, Dict]:\n path = {}\n security_schemes: Dict[str, Any] = {}\n definitions: Dict[str, Any] = {}\n assert route.methods is not None, \"Methods must be a list\"\n if route.include_in_schema:\n for method in route.methods:\n operation = get_openapi_operation_metadata(route=route, method=method)\n parameters: List[Dict] = []\n flat_dependant = get_flat_dependant(route.dependant)\n security_definitions, operation_security = get_openapi_security_definitions(\n flat_dependant=flat_dependant\n )\n if operation_security:\n operation.setdefault(\"security\", []).extend(operation_security)\n if security_definitions:\n security_schemes.update(security_definitions)\n all_route_params = get_openapi_params(route.dependant)\n operation_parameters = get_openapi_operation_parameters(all_route_params)\n parameters.extend(operation_parameters)\n if parameters:\n operation[\"parameters\"] = parameters\n if method in METHODS_WITH_BODY:\n request_body_oai = get_openapi_operation_request_body(\n body_field=route.body_field, model_name_map=model_name_map\n )\n if request_body_oai:\n operation[\"requestBody\"] = request_body_oai\n if route.responses:\n for (additional_status_code, response) in route.responses.items():\n assert isinstance(\n response, dict\n ), \"An additional response must be a dict\"\n field = route.response_fields.get(additional_status_code)\n if field:\n response_schema, _, _ = field_schema(\n field, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n response.setdefault(\"content\", {}).setdefault(\n route.response_class.media_type, {}\n )[\"schema\"] = response_schema\n status_text = http.client.responses.get(int(additional_status_code))\n response.setdefault(\n \"description\", status_text or \"Additional Response\"\n )\n operation.setdefault(\"responses\", {})[\n str(additional_status_code)\n ] = response\n status_code = str(route.status_code)\n response_schema = {\"type\": \"string\"}\n if lenient_issubclass(route.response_class, JSONResponse):\n if route.response_field:\n response_schema, _, _ = field_schema(\n route.response_field,\n model_name_map=model_name_map,\n ref_prefix=REF_PREFIX,\n )\n else:\n response_schema = {}\n operation.setdefault(\"responses\", {}).setdefault(status_code, {})[\n \"description\"\n ] = route.response_description\n operation.setdefault(\"responses\", {}).setdefault(\n status_code, {}\n ).setdefault(\"content\", {}).setdefault(route.response_class.media_type, {})[\n \"schema\"\n ] = response_schema\n\n http422 = str(HTTP_422_UNPROCESSABLE_ENTITY)\n if (all_route_params or route.body_field) and not any(\n [\n status in operation[\"responses\"]\n for status in [http422, \"4xx\", \"default\"]\n ]\n ):\n operation[\"responses\"][http422] = {\n \"description\": \"Validation Error\",\n \"content\": {\n \"application/json\": {\n \"schema\": {\"$ref\": REF_PREFIX + \"HTTPValidationError\"}\n }\n },\n }\n if \"ValidationError\" not in definitions:\n definitions.update(\n {\n \"ValidationError\": validation_error_definition,\n \"HTTPValidationError\": validation_error_response_definition,\n }\n )\n path[method.lower()] = operation\n return path, security_schemes, definitions\n\n\ndef get_openapi(\n *,\n title: str,\n version: str,\n openapi_version: str = \"3.0.2\",\n description: str = None,\n routes: Sequence[BaseRoute],\n openapi_prefix: str = \"\"\n) -> Dict:\n info = {\"title\": title, \"version\": version}\n if description:\n info[\"description\"] = description\n output = {\"openapi\": openapi_version, \"info\": info}\n components: Dict[str, Dict] = {}\n paths: Dict[str, Dict] = {}\n flat_models = get_flat_models_from_routes(routes)\n model_name_map = get_model_name_map(flat_models)\n definitions = get_model_definitions(\n flat_models=flat_models, model_name_map=model_name_map\n )\n for route in routes:\n if isinstance(route, routing.APIRoute):\n result = get_openapi_path(route=route, model_name_map=model_name_map)\n if result:\n path, security_schemes, path_definitions = result\n if path:\n paths.setdefault(openapi_prefix + route.path_format, {}).update(\n path\n )\n if security_schemes:\n components.setdefault(\"securitySchemes\", {}).update(\n security_schemes\n )\n if path_definitions:\n definitions.update(path_definitions)\n if definitions:\n components.setdefault(\"schemas\", {}).update(definitions)\n if components:\n output[\"components\"] = components\n output[\"paths\"] = paths\n return jsonable_encoder(OpenAPI(**output), by_alias=True, include_none=False)\n", "path": "fastapi/openapi/utils.py"}]}
3,854
789
gh_patches_debug_21897
rasdani/github-patches
git_diff
weecology__retriever-1267
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fetch method should return an ordered dict (not a dict) Hello, I noticed that `fetch` method returns a dict of dataframes. To preserve order of tables (which may have a meaning) it should return an ordered dict. [Datapackage](https://frictionlessdata.io/specs/data-package/) ressources are stored in a list so it's ordered. Kind regards --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `retriever/engines/sqlite.py` Content: ``` 1 import os 2 import pandas as pd 3 from builtins import range 4 5 from retriever.lib.defaults import DATA_DIR 6 from retriever.lib.models import Engine, no_cleanup 7 8 9 class engine(Engine): 10 """Engine instance for SQLite.""" 11 12 name = "SQLite" 13 abbreviation = "sqlite" 14 datatypes = { 15 "auto": ("INTEGER", "AUTOINCREMENT"), 16 "int": "INTEGER", 17 "bigint": "INTEGER", 18 "double": "REAL", 19 "decimal": "REAL", 20 "char": "TEXT", 21 "bool": "INTEGER", 22 } 23 placeholder = "?" 24 insert_limit = 1000 25 required_opts = [("file", 26 "Enter the filename of your SQLite database", 27 "sqlite.db"), 28 ("table_name", 29 "Format of table name", 30 "{db}_{table}"), 31 ("data_dir", 32 "Install directory", 33 DATA_DIR), 34 ] 35 36 def create_db(self): 37 """Don't create database for SQLite 38 39 SQLite doesn't create databases. Each database is a file and needs a separate 40 connection. This overloads`create_db` to do nothing in this case. 41 """ 42 return None 43 44 def fetch_tables(self, dataset, table_names): 45 """Return sqlite dataset as list of pandas dataframe.""" 46 connection = self.get_connection() 47 data = {table[len(dataset) + 1:]: pd.read_sql_query("SELECT * " 48 "FROM {};".format(table), 49 connection) 50 for table in table_names} 51 return data 52 53 def get_bulk_insert_statement(self): 54 """Get insert statement for bulk inserts 55 56 This places ?'s instead of the actual values so that executemany() can 57 operate as designed 58 """ 59 columns = self.table.get_insert_columns() 60 column_count = len(self.table.get_insert_columns(False)) 61 insert_stmt = "INSERT INTO " + self.table_name() 62 insert_stmt += " (" + columns + ")" 63 insert_stmt += " VALUES (" 64 for _ in range(0, column_count): 65 insert_stmt += "?, " 66 insert_stmt = insert_stmt.rstrip(", ") + ")" 67 return insert_stmt 68 69 def insert_data_from_file(self, filename): 70 """Perform a high speed bulk insert 71 72 Checks to see if a given file can be bulk inserted, and if so loads 73 it in chunks and inserts those chunks into the database using 74 executemany. 75 """ 76 chunk_size = 1000000 77 self.get_cursor() 78 79 # Determine if the dataset includes cross-tab data 80 crosstab = len([True for c in self.table.columns if c[1][0][:3] == "ct-"]) != 0 81 82 if (([self.table.cleanup.function, self.table.header_rows] == [no_cleanup, 1]) 83 and not self.table.fixed_width 84 and not crosstab 85 and (not hasattr(self.table, "do_not_bulk_insert") or not self.table.do_not_bulk_insert)): 86 filename = os.path.abspath(filename) 87 try: 88 bulk_insert_statement = self.get_bulk_insert_statement() 89 line_endings = set(['\n', '\r', '\r\n']) 90 with open(filename, 'r') as data_file: 91 data_chunk = data_file.readlines(chunk_size) 92 data_chunk = [line.rstrip('\r\n') for line in data_chunk if line not in line_endings] 93 del data_chunk[:self.table.header_rows] 94 while data_chunk: 95 data_chunk_split = [row.split(self.table.delimiter) 96 for row in data_chunk] 97 self.cursor.executemany(bulk_insert_statement, data_chunk_split) 98 data_chunk = data_file.readlines(chunk_size) 99 self.connection.commit() 100 except: 101 self.connection.rollback() 102 return Engine.insert_data_from_file(self, filename) 103 else: 104 return Engine.insert_data_from_file(self, filename) 105 106 def get_connection(self): 107 """Get db connection.""" 108 import sqlite3 as dbapi 109 110 self.get_input() 111 file = self.opts["file"] 112 db_file = self.opts["data_dir"] 113 full_path = os.path.join(db_file, file) 114 115 return dbapi.connect(os.path.normpath(full_path)) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/retriever/engines/sqlite.py b/retriever/engines/sqlite.py --- a/retriever/engines/sqlite.py +++ b/retriever/engines/sqlite.py @@ -1,6 +1,7 @@ import os import pandas as pd from builtins import range +from collections import OrderedDict from retriever.lib.defaults import DATA_DIR from retriever.lib.models import Engine, no_cleanup @@ -44,10 +45,12 @@ def fetch_tables(self, dataset, table_names): """Return sqlite dataset as list of pandas dataframe.""" connection = self.get_connection() - data = {table[len(dataset) + 1:]: pd.read_sql_query("SELECT * " - "FROM {};".format(table), - connection) - for table in table_names} + sql_query = "SELECT * FROM {};" + data = OrderedDict({ + table[len(dataset) + 1:] + :pd.read_sql_query(sql_query.format(table), connection) + for table in table_names + }) return data def get_bulk_insert_statement(self):
{"golden_diff": "diff --git a/retriever/engines/sqlite.py b/retriever/engines/sqlite.py\n--- a/retriever/engines/sqlite.py\n+++ b/retriever/engines/sqlite.py\n@@ -1,6 +1,7 @@\n import os\n import pandas as pd\n from builtins import range\n+from collections import OrderedDict\n \n from retriever.lib.defaults import DATA_DIR\n from retriever.lib.models import Engine, no_cleanup\n@@ -44,10 +45,12 @@\n def fetch_tables(self, dataset, table_names):\n \"\"\"Return sqlite dataset as list of pandas dataframe.\"\"\"\n connection = self.get_connection()\n- data = {table[len(dataset) + 1:]: pd.read_sql_query(\"SELECT * \"\n- \"FROM {};\".format(table),\n- connection)\n- for table in table_names}\n+ sql_query = \"SELECT * FROM {};\"\n+ data = OrderedDict({\n+ table[len(dataset) + 1:]\n+ :pd.read_sql_query(sql_query.format(table), connection)\n+ for table in table_names\n+ })\n return data\n \n def get_bulk_insert_statement(self):\n", "issue": "fetch method should return an ordered dict (not a dict)\nHello,\r\n\r\nI noticed that `fetch` method returns a dict of dataframes.\r\nTo preserve order of tables (which may have a meaning) it should return an ordered dict.\r\n[Datapackage](https://frictionlessdata.io/specs/data-package/) ressources are stored in a list so it's ordered.\r\n\r\nKind regards\n", "before_files": [{"content": "import os\nimport pandas as pd\nfrom builtins import range\n\nfrom retriever.lib.defaults import DATA_DIR\nfrom retriever.lib.models import Engine, no_cleanup\n\n\nclass engine(Engine):\n \"\"\"Engine instance for SQLite.\"\"\"\n\n name = \"SQLite\"\n abbreviation = \"sqlite\"\n datatypes = {\n \"auto\": (\"INTEGER\", \"AUTOINCREMENT\"),\n \"int\": \"INTEGER\",\n \"bigint\": \"INTEGER\",\n \"double\": \"REAL\",\n \"decimal\": \"REAL\",\n \"char\": \"TEXT\",\n \"bool\": \"INTEGER\",\n }\n placeholder = \"?\"\n insert_limit = 1000\n required_opts = [(\"file\",\n \"Enter the filename of your SQLite database\",\n \"sqlite.db\"),\n (\"table_name\",\n \"Format of table name\",\n \"{db}_{table}\"),\n (\"data_dir\",\n \"Install directory\",\n DATA_DIR),\n ]\n\n def create_db(self):\n \"\"\"Don't create database for SQLite\n\n SQLite doesn't create databases. Each database is a file and needs a separate\n connection. This overloads`create_db` to do nothing in this case.\n \"\"\"\n return None\n\n def fetch_tables(self, dataset, table_names):\n \"\"\"Return sqlite dataset as list of pandas dataframe.\"\"\"\n connection = self.get_connection()\n data = {table[len(dataset) + 1:]: pd.read_sql_query(\"SELECT * \"\n \"FROM {};\".format(table),\n connection)\n for table in table_names}\n return data\n\n def get_bulk_insert_statement(self):\n \"\"\"Get insert statement for bulk inserts\n\n This places ?'s instead of the actual values so that executemany() can\n operate as designed\n \"\"\"\n columns = self.table.get_insert_columns()\n column_count = len(self.table.get_insert_columns(False))\n insert_stmt = \"INSERT INTO \" + self.table_name()\n insert_stmt += \" (\" + columns + \")\"\n insert_stmt += \" VALUES (\"\n for _ in range(0, column_count):\n insert_stmt += \"?, \"\n insert_stmt = insert_stmt.rstrip(\", \") + \")\"\n return insert_stmt\n\n def insert_data_from_file(self, filename):\n \"\"\"Perform a high speed bulk insert\n\n Checks to see if a given file can be bulk inserted, and if so loads\n it in chunks and inserts those chunks into the database using\n executemany.\n \"\"\"\n chunk_size = 1000000\n self.get_cursor()\n\n # Determine if the dataset includes cross-tab data\n crosstab = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n\n if (([self.table.cleanup.function, self.table.header_rows] == [no_cleanup, 1])\n and not self.table.fixed_width\n and not crosstab\n and (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n filename = os.path.abspath(filename)\n try:\n bulk_insert_statement = self.get_bulk_insert_statement()\n line_endings = set(['\\n', '\\r', '\\r\\n'])\n with open(filename, 'r') as data_file:\n data_chunk = data_file.readlines(chunk_size)\n data_chunk = [line.rstrip('\\r\\n') for line in data_chunk if line not in line_endings]\n del data_chunk[:self.table.header_rows]\n while data_chunk:\n data_chunk_split = [row.split(self.table.delimiter)\n for row in data_chunk]\n self.cursor.executemany(bulk_insert_statement, data_chunk_split)\n data_chunk = data_file.readlines(chunk_size)\n self.connection.commit()\n except:\n self.connection.rollback()\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)\n\n def get_connection(self):\n \"\"\"Get db connection.\"\"\"\n import sqlite3 as dbapi\n\n self.get_input()\n file = self.opts[\"file\"]\n db_file = self.opts[\"data_dir\"]\n full_path = os.path.join(db_file, file)\n\n return dbapi.connect(os.path.normpath(full_path))\n", "path": "retriever/engines/sqlite.py"}], "after_files": [{"content": "import os\nimport pandas as pd\nfrom builtins import range\nfrom collections import OrderedDict\n\nfrom retriever.lib.defaults import DATA_DIR\nfrom retriever.lib.models import Engine, no_cleanup\n\n\nclass engine(Engine):\n \"\"\"Engine instance for SQLite.\"\"\"\n\n name = \"SQLite\"\n abbreviation = \"sqlite\"\n datatypes = {\n \"auto\": (\"INTEGER\", \"AUTOINCREMENT\"),\n \"int\": \"INTEGER\",\n \"bigint\": \"INTEGER\",\n \"double\": \"REAL\",\n \"decimal\": \"REAL\",\n \"char\": \"TEXT\",\n \"bool\": \"INTEGER\",\n }\n placeholder = \"?\"\n insert_limit = 1000\n required_opts = [(\"file\",\n \"Enter the filename of your SQLite database\",\n \"sqlite.db\"),\n (\"table_name\",\n \"Format of table name\",\n \"{db}_{table}\"),\n (\"data_dir\",\n \"Install directory\",\n DATA_DIR),\n ]\n\n def create_db(self):\n \"\"\"Don't create database for SQLite\n\n SQLite doesn't create databases. Each database is a file and needs a separate\n connection. This overloads`create_db` to do nothing in this case.\n \"\"\"\n return None\n\n def fetch_tables(self, dataset, table_names):\n \"\"\"Return sqlite dataset as list of pandas dataframe.\"\"\"\n connection = self.get_connection()\n sql_query = \"SELECT * FROM {};\"\n data = OrderedDict({\n table[len(dataset) + 1:]\n :pd.read_sql_query(sql_query.format(table), connection)\n for table in table_names\n })\n return data\n\n def get_bulk_insert_statement(self):\n \"\"\"Get insert statement for bulk inserts\n\n This places ?'s instead of the actual values so that executemany() can\n operate as designed\n \"\"\"\n columns = self.table.get_insert_columns()\n column_count = len(self.table.get_insert_columns(False))\n insert_stmt = \"INSERT INTO \" + self.table_name()\n insert_stmt += \" (\" + columns + \")\"\n insert_stmt += \" VALUES (\"\n for _ in range(0, column_count):\n insert_stmt += \"?, \"\n insert_stmt = insert_stmt.rstrip(\", \") + \")\"\n return insert_stmt\n\n def insert_data_from_file(self, filename):\n \"\"\"Perform a high speed bulk insert\n\n Checks to see if a given file can be bulk inserted, and if so loads\n it in chunks and inserts those chunks into the database using\n executemany.\n \"\"\"\n chunk_size = 1000000\n self.get_cursor()\n\n # Determine if the dataset includes cross-tab data\n crosstab = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n\n if (([self.table.cleanup.function, self.table.header_rows] == [no_cleanup, 1])\n and not self.table.fixed_width\n and not crosstab\n and (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n filename = os.path.abspath(filename)\n try:\n bulk_insert_statement = self.get_bulk_insert_statement()\n line_endings = set(['\\n', '\\r', '\\r\\n'])\n with open(filename, 'r') as data_file:\n data_chunk = data_file.readlines(chunk_size)\n data_chunk = [line.rstrip('\\r\\n') for line in data_chunk if line not in line_endings]\n del data_chunk[:self.table.header_rows]\n while data_chunk:\n data_chunk_split = [row.split(self.table.delimiter)\n for row in data_chunk]\n self.cursor.executemany(bulk_insert_statement, data_chunk_split)\n data_chunk = data_file.readlines(chunk_size)\n self.connection.commit()\n except:\n self.connection.rollback()\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)\n\n def get_connection(self):\n \"\"\"Get db connection.\"\"\"\n import sqlite3 as dbapi\n\n self.get_input()\n file = self.opts[\"file\"]\n db_file = self.opts[\"data_dir\"]\n full_path = os.path.join(db_file, file)\n\n return dbapi.connect(os.path.normpath(full_path))\n", "path": "retriever/engines/sqlite.py"}]}
1,473
248
gh_patches_debug_1120
rasdani/github-patches
git_diff
conan-io__conan-6333
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [bug] New warning in python 3.8 makes some tests fail (line buffering isn't supported in binary mode) <!-- Please don't forget to update the issue title. Include all applicable information to help us reproduce your problem. To help us debug your issue please explain: --> ### Environment Details (include every applicable attribute) * Operating System+version: Arch Linux * Compiler+version: - * Conan version: develop * Python version: python 3.8 ### Steps to reproduce (Include if Applicable) Run some unit tests, for example: ``` nosetests conans.test.functional.settings ``` ### Logs (Executed commands with output) (Include/Attach if Applicable) ``` $ nosetests conans.test.functional.settings ............F............F.F............. ====================================================================== FAIL: test_only_cppstd (conan.conans.test.functional.settings.cppstd.compiler_cppstd_test.UseCompilerCppStdSettingTests) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd/compiler_cppstd_test.py", line 140, in test_only_cppstd self.t.run("info . -s cppstd=14") File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__ next(self.gen) File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning test_suite.assertEqual(len(w), n) AssertionError: 2 != 1 ====================================================================== FAIL: gcc_8_std_20_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py", line 47, in gcc_8_std_20_test client.run('create . user/testing -s compiler="gcc" ' File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__ next(self.gen) File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning test_suite.assertEqual(len(w), n) AssertionError: 2 != 1 ====================================================================== FAIL: use_wrong_setting_for_compiler_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py", line 23, in use_wrong_setting_for_compiler_test client.run('create . user/testing -s compiler="gcc" ' File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__ next(self.gen) File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning test_suite.assertEqual(len(w), n) AssertionError: 2 != 1 ---------------------------------------------------------------------- Ran 41 tests in 4.690s FAILED (failures=3) ``` <!-- Your log content should be related to the bug description, it can be: - Conan command output - Server output (Artifactory, conan_server) --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/conf/detect.py` Content: ``` 1 import os 2 import platform 3 import re 4 from subprocess import PIPE, Popen, STDOUT 5 6 from conans.client.output import Color 7 from conans.client.tools import detected_os, OSInfo 8 from conans.client.tools.win import latest_visual_studio_version_installed 9 from conans.model.version import Version 10 11 12 def _execute(command): 13 proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT) 14 15 output_buffer = [] 16 while True: 17 line = proc.stdout.readline() 18 if not line: 19 break 20 # output.write(line) 21 output_buffer.append(str(line)) 22 23 proc.communicate() 24 return proc.returncode, "".join(output_buffer) 25 26 27 def _gcc_compiler(output, compiler_exe="gcc"): 28 29 try: 30 if platform.system() == "Darwin": 31 # In Mac OS X check if gcc is a fronted using apple-clang 32 _, out = _execute("%s --version" % compiler_exe) 33 out = out.lower() 34 if "clang" in out: 35 return None 36 37 ret, out = _execute('%s -dumpversion' % compiler_exe) 38 if ret != 0: 39 return None 40 compiler = "gcc" 41 installed_version = re.search("([0-9](\.[0-9])?)", out).group() 42 # Since GCC 7.1, -dumpversion return the major version number 43 # only ("7"). We must use -dumpfullversion to get the full version 44 # number ("7.1.1"). 45 if installed_version: 46 output.success("Found %s %s" % (compiler, installed_version)) 47 return compiler, installed_version 48 except Exception: 49 return None 50 51 52 def _clang_compiler(output, compiler_exe="clang"): 53 try: 54 ret, out = _execute('%s --version' % compiler_exe) 55 if ret != 0: 56 return None 57 if "Apple" in out: 58 compiler = "apple-clang" 59 elif "clang version" in out: 60 compiler = "clang" 61 installed_version = re.search("([0-9]+\.[0-9])", out).group() 62 if installed_version: 63 output.success("Found %s %s" % (compiler, installed_version)) 64 return compiler, installed_version 65 except Exception: 66 return None 67 68 69 def _sun_cc_compiler(output, compiler_exe="cc"): 70 try: 71 _, out = _execute('%s -V' % compiler_exe) 72 compiler = "sun-cc" 73 installed_version = re.search("([0-9]+\.[0-9]+)", out).group() 74 if installed_version: 75 output.success("Found %s %s" % (compiler, installed_version)) 76 return compiler, installed_version 77 except Exception: 78 return None 79 80 81 def _get_default_compiler(output): 82 cc = os.environ.get("CC", "") 83 cxx = os.environ.get("CXX", "") 84 if cc or cxx: # Env defined, use them 85 output.info("CC and CXX: %s, %s " % (cc or "None", cxx or "None")) 86 command = cc or cxx 87 if "gcc" in command: 88 gcc = _gcc_compiler(output, command) 89 if platform.system() == "Darwin" and gcc is None: 90 output.error( 91 "%s detected as a frontend using apple-clang. Compiler not supported" % command 92 ) 93 return gcc 94 if "clang" in command.lower(): 95 return _clang_compiler(output, command) 96 if platform.system() == "SunOS" and command.lower() == "cc": 97 return _sun_cc_compiler(output, command) 98 # I am not able to find its version 99 output.error("Not able to automatically detect '%s' version" % command) 100 return None 101 102 if detected_os() == "Windows": 103 version = latest_visual_studio_version_installed(output) 104 vs = ('Visual Studio', version) if version else None 105 gcc = _gcc_compiler(output) 106 clang = _clang_compiler(output) 107 if platform.system() == "SunOS": 108 sun_cc = _sun_cc_compiler(output) 109 110 if detected_os() == "Windows": 111 return vs or gcc or clang 112 elif platform.system() == "Darwin": 113 return clang or gcc 114 elif platform.system() == "SunOS": 115 return sun_cc or gcc or clang 116 else: 117 return gcc or clang 118 119 120 def _get_profile_compiler_version(compiler, version, output): 121 major = version.split(".")[0] 122 if compiler == "clang" and int(major) >= 8: 123 output.info("clang>=8, using the major as version") 124 return major 125 elif compiler == "gcc" and int(major) >= 5: 126 output.info("gcc>=5, using the major as version") 127 return major 128 return version 129 130 131 def _detect_compiler_version(result, output, profile_path): 132 try: 133 compiler, version = _get_default_compiler(output) 134 except Exception: 135 compiler, version = None, None 136 if not compiler or not version: 137 output.error("Unable to find a working compiler") 138 else: 139 result.append(("compiler", compiler)) 140 result.append(("compiler.version", 141 _get_profile_compiler_version(compiler, version, output))) 142 if compiler == "apple-clang": 143 result.append(("compiler.libcxx", "libc++")) 144 elif compiler == "gcc": 145 result.append(("compiler.libcxx", "libstdc++")) 146 if Version(version) >= Version("5.1"): 147 profile_name = os.path.basename(profile_path) 148 msg = """ 149 Conan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to 150 'libstdc++' for backwards compatibility. 151 Your compiler is likely using the new CXX11 ABI by default (libstdc++11). 152 153 If you want Conan to use the new ABI for the {profile} profile, run: 154 155 $ conan profile update settings.compiler.libcxx=libstdc++11 {profile} 156 157 Or edit '{profile_path}' and set compiler.libcxx=libstdc++11 158 """.format(profile=profile_name, profile_path=profile_path) 159 output.writeln("\n************************* WARNING: GCC OLD ABI COMPATIBILITY " 160 "***********************\n %s\n************************************" 161 "************************************************\n\n\n" % msg, 162 Color.BRIGHT_RED) 163 elif compiler == "cc": 164 if platform.system() == "SunOS": 165 result.append(("compiler.libstdcxx", "libstdcxx4")) 166 elif compiler == "clang": 167 if platform.system() == "FreeBSD": 168 result.append(("compiler.libcxx", "libc++")) 169 else: 170 result.append(("compiler.libcxx", "libstdc++")) 171 elif compiler == "sun-cc": 172 result.append(("compiler.libcxx", "libCstd")) 173 174 175 def _detect_os_arch(result, output): 176 architectures = {'i386': 'x86', 177 'i686': 'x86', 178 'i86pc': 'x86', 179 'amd64': 'x86_64', 180 'aarch64': 'armv8', 181 'sun4v': 'sparc'} 182 the_os = detected_os() 183 result.append(("os", the_os)) 184 result.append(("os_build", the_os)) 185 186 platform_machine = platform.machine().lower() 187 if platform_machine: 188 arch = architectures.get(platform_machine, platform_machine) 189 if arch.startswith('arm'): 190 for a in ("armv6", "armv7hf", "armv7", "armv8"): 191 if arch.startswith(a): 192 arch = a 193 break 194 else: 195 output.error("Your ARM '%s' architecture is probably not defined in settings.yml\n" 196 "Please check your conan.conf and settings.yml files" % arch) 197 elif OSInfo().is_aix: 198 arch = OSInfo.get_aix_architecture() or arch 199 200 result.append(("arch", arch)) 201 result.append(("arch_build", arch)) 202 203 204 def detect_defaults_settings(output, profile_path): 205 """ try to deduce current machine values without any constraints at all 206 :param output: Conan Output instance 207 :param profile_path: Conan profile file path 208 :return: A list with default settings 209 """ 210 result = [] 211 _detect_os_arch(result, output) 212 _detect_compiler_version(result, output, profile_path) 213 result.append(("build_type", "Release")) 214 215 return result 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/conf/detect.py b/conans/client/conf/detect.py --- a/conans/client/conf/detect.py +++ b/conans/client/conf/detect.py @@ -10,7 +10,8 @@ def _execute(command): - proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT) + proc = Popen(command, shell=True, bufsize=1, universal_newlines=True, stdout=PIPE, + stderr=STDOUT) output_buffer = [] while True:
{"golden_diff": "diff --git a/conans/client/conf/detect.py b/conans/client/conf/detect.py\n--- a/conans/client/conf/detect.py\n+++ b/conans/client/conf/detect.py\n@@ -10,7 +10,8 @@\n \n \n def _execute(command):\n- proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)\n+ proc = Popen(command, shell=True, bufsize=1, universal_newlines=True, stdout=PIPE,\n+ stderr=STDOUT)\n \n output_buffer = []\n while True:\n", "issue": "[bug] New warning in python 3.8 makes some tests fail (line buffering isn't supported in binary mode)\n<!--\r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n\r\n To help us debug your issue please explain:\r\n-->\r\n\r\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: Arch Linux\r\n * Compiler+version: -\r\n * Conan version: develop\r\n * Python version: python 3.8\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nRun some unit tests, for example:\r\n\r\n```\r\nnosetests conans.test.functional.settings\r\n```\r\n\r\n### Logs (Executed commands with output) (Include/Attach if Applicable)\r\n\r\n```\r\n$ nosetests conans.test.functional.settings\r\n............F............F.F.............\r\n======================================================================\r\nFAIL: test_only_cppstd (conan.conans.test.functional.settings.cppstd.compiler_cppstd_test.UseCompilerCppStdSettingTests)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/siu/src/extern/conan/conans/test/functional/settings/cppstd/compiler_cppstd_test.py\", line 140, in test_only_cppstd\r\n self.t.run(\"info . -s cppstd=14\")\r\n File \"/usr/lib/python3.8/contextlib.py\", line 120, in __exit__\r\n next(self.gen)\r\n File \"/home/siu/src/extern/conan/conans/test/utils/deprecation.py\", line 13, in catch_deprecation_warning\r\n test_suite.assertEqual(len(w), n)\r\nAssertionError: 2 != 1\r\n\r\n======================================================================\r\nFAIL: gcc_8_std_20_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py\", line 47, in gcc_8_std_20_test\r\n client.run('create . user/testing -s compiler=\"gcc\" '\r\n File \"/usr/lib/python3.8/contextlib.py\", line 120, in __exit__\r\n next(self.gen)\r\n File \"/home/siu/src/extern/conan/conans/test/utils/deprecation.py\", line 13, in catch_deprecation_warning\r\n test_suite.assertEqual(len(w), n)\r\nAssertionError: 2 != 1\r\n\r\n======================================================================\r\nFAIL: use_wrong_setting_for_compiler_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py\", line 23, in use_wrong_setting_for_compiler_test\r\n client.run('create . user/testing -s compiler=\"gcc\" '\r\n File \"/usr/lib/python3.8/contextlib.py\", line 120, in __exit__\r\n next(self.gen)\r\n File \"/home/siu/src/extern/conan/conans/test/utils/deprecation.py\", line 13, in catch_deprecation_warning\r\n test_suite.assertEqual(len(w), n)\r\nAssertionError: 2 != 1\r\n\r\n----------------------------------------------------------------------\r\nRan 41 tests in 4.690s\r\n\r\nFAILED (failures=3)\r\n```\r\n\r\n<!--\r\n Your log content should be related to the bug description, it can be:\r\n - Conan command output\r\n - Server output (Artifactory, conan_server)\r\n-->\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport re\nfrom subprocess import PIPE, Popen, STDOUT\n\nfrom conans.client.output import Color\nfrom conans.client.tools import detected_os, OSInfo\nfrom conans.client.tools.win import latest_visual_studio_version_installed\nfrom conans.model.version import Version\n\n\ndef _execute(command):\n proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)\n\n output_buffer = []\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n # output.write(line)\n output_buffer.append(str(line))\n\n proc.communicate()\n return proc.returncode, \"\".join(output_buffer)\n\n\ndef _gcc_compiler(output, compiler_exe=\"gcc\"):\n\n try:\n if platform.system() == \"Darwin\":\n # In Mac OS X check if gcc is a fronted using apple-clang\n _, out = _execute(\"%s --version\" % compiler_exe)\n out = out.lower()\n if \"clang\" in out:\n return None\n\n ret, out = _execute('%s -dumpversion' % compiler_exe)\n if ret != 0:\n return None\n compiler = \"gcc\"\n installed_version = re.search(\"([0-9](\\.[0-9])?)\", out).group()\n # Since GCC 7.1, -dumpversion return the major version number\n # only (\"7\"). We must use -dumpfullversion to get the full version\n # number (\"7.1.1\").\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _clang_compiler(output, compiler_exe=\"clang\"):\n try:\n ret, out = _execute('%s --version' % compiler_exe)\n if ret != 0:\n return None\n if \"Apple\" in out:\n compiler = \"apple-clang\"\n elif \"clang version\" in out:\n compiler = \"clang\"\n installed_version = re.search(\"([0-9]+\\.[0-9])\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _sun_cc_compiler(output, compiler_exe=\"cc\"):\n try:\n _, out = _execute('%s -V' % compiler_exe)\n compiler = \"sun-cc\"\n installed_version = re.search(\"([0-9]+\\.[0-9]+)\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _get_default_compiler(output):\n cc = os.environ.get(\"CC\", \"\")\n cxx = os.environ.get(\"CXX\", \"\")\n if cc or cxx: # Env defined, use them\n output.info(\"CC and CXX: %s, %s \" % (cc or \"None\", cxx or \"None\"))\n command = cc or cxx\n if \"gcc\" in command:\n gcc = _gcc_compiler(output, command)\n if platform.system() == \"Darwin\" and gcc is None:\n output.error(\n \"%s detected as a frontend using apple-clang. Compiler not supported\" % command\n )\n return gcc\n if \"clang\" in command.lower():\n return _clang_compiler(output, command)\n if platform.system() == \"SunOS\" and command.lower() == \"cc\":\n return _sun_cc_compiler(output, command)\n # I am not able to find its version\n output.error(\"Not able to automatically detect '%s' version\" % command)\n return None\n\n if detected_os() == \"Windows\":\n version = latest_visual_studio_version_installed(output)\n vs = ('Visual Studio', version) if version else None\n gcc = _gcc_compiler(output)\n clang = _clang_compiler(output)\n if platform.system() == \"SunOS\":\n sun_cc = _sun_cc_compiler(output)\n\n if detected_os() == \"Windows\":\n return vs or gcc or clang\n elif platform.system() == \"Darwin\":\n return clang or gcc\n elif platform.system() == \"SunOS\":\n return sun_cc or gcc or clang\n else:\n return gcc or clang\n\n\ndef _get_profile_compiler_version(compiler, version, output):\n major = version.split(\".\")[0]\n if compiler == \"clang\" and int(major) >= 8:\n output.info(\"clang>=8, using the major as version\")\n return major\n elif compiler == \"gcc\" and int(major) >= 5:\n output.info(\"gcc>=5, using the major as version\")\n return major\n return version\n\n\ndef _detect_compiler_version(result, output, profile_path):\n try:\n compiler, version = _get_default_compiler(output)\n except Exception:\n compiler, version = None, None\n if not compiler or not version:\n output.error(\"Unable to find a working compiler\")\n else:\n result.append((\"compiler\", compiler))\n result.append((\"compiler.version\",\n _get_profile_compiler_version(compiler, version, output)))\n if compiler == \"apple-clang\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n elif compiler == \"gcc\":\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n if Version(version) >= Version(\"5.1\"):\n profile_name = os.path.basename(profile_path)\n msg = \"\"\"\nConan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to\n'libstdc++' for backwards compatibility.\nYour compiler is likely using the new CXX11 ABI by default (libstdc++11).\n\nIf you want Conan to use the new ABI for the {profile} profile, run:\n\n $ conan profile update settings.compiler.libcxx=libstdc++11 {profile}\n\nOr edit '{profile_path}' and set compiler.libcxx=libstdc++11\n\"\"\".format(profile=profile_name, profile_path=profile_path)\n output.writeln(\"\\n************************* WARNING: GCC OLD ABI COMPATIBILITY \"\n \"***********************\\n %s\\n************************************\"\n \"************************************************\\n\\n\\n\" % msg,\n Color.BRIGHT_RED)\n elif compiler == \"cc\":\n if platform.system() == \"SunOS\":\n result.append((\"compiler.libstdcxx\", \"libstdcxx4\"))\n elif compiler == \"clang\":\n if platform.system() == \"FreeBSD\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n else:\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n elif compiler == \"sun-cc\":\n result.append((\"compiler.libcxx\", \"libCstd\"))\n\n\ndef _detect_os_arch(result, output):\n architectures = {'i386': 'x86',\n 'i686': 'x86',\n 'i86pc': 'x86',\n 'amd64': 'x86_64',\n 'aarch64': 'armv8',\n 'sun4v': 'sparc'}\n the_os = detected_os()\n result.append((\"os\", the_os))\n result.append((\"os_build\", the_os))\n\n platform_machine = platform.machine().lower()\n if platform_machine:\n arch = architectures.get(platform_machine, platform_machine)\n if arch.startswith('arm'):\n for a in (\"armv6\", \"armv7hf\", \"armv7\", \"armv8\"):\n if arch.startswith(a):\n arch = a\n break\n else:\n output.error(\"Your ARM '%s' architecture is probably not defined in settings.yml\\n\"\n \"Please check your conan.conf and settings.yml files\" % arch)\n elif OSInfo().is_aix:\n arch = OSInfo.get_aix_architecture() or arch\n\n result.append((\"arch\", arch))\n result.append((\"arch_build\", arch))\n\n\ndef detect_defaults_settings(output, profile_path):\n \"\"\" try to deduce current machine values without any constraints at all\n :param output: Conan Output instance\n :param profile_path: Conan profile file path\n :return: A list with default settings\n \"\"\"\n result = []\n _detect_os_arch(result, output)\n _detect_compiler_version(result, output, profile_path)\n result.append((\"build_type\", \"Release\"))\n\n return result\n", "path": "conans/client/conf/detect.py"}], "after_files": [{"content": "import os\nimport platform\nimport re\nfrom subprocess import PIPE, Popen, STDOUT\n\nfrom conans.client.output import Color\nfrom conans.client.tools import detected_os, OSInfo\nfrom conans.client.tools.win import latest_visual_studio_version_installed\nfrom conans.model.version import Version\n\n\ndef _execute(command):\n proc = Popen(command, shell=True, bufsize=1, universal_newlines=True, stdout=PIPE,\n stderr=STDOUT)\n\n output_buffer = []\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n # output.write(line)\n output_buffer.append(str(line))\n\n proc.communicate()\n return proc.returncode, \"\".join(output_buffer)\n\n\ndef _gcc_compiler(output, compiler_exe=\"gcc\"):\n\n try:\n if platform.system() == \"Darwin\":\n # In Mac OS X check if gcc is a fronted using apple-clang\n _, out = _execute(\"%s --version\" % compiler_exe)\n out = out.lower()\n if \"clang\" in out:\n return None\n\n ret, out = _execute('%s -dumpversion' % compiler_exe)\n if ret != 0:\n return None\n compiler = \"gcc\"\n installed_version = re.search(\"([0-9](\\.[0-9])?)\", out).group()\n # Since GCC 7.1, -dumpversion return the major version number\n # only (\"7\"). We must use -dumpfullversion to get the full version\n # number (\"7.1.1\").\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _clang_compiler(output, compiler_exe=\"clang\"):\n try:\n ret, out = _execute('%s --version' % compiler_exe)\n if ret != 0:\n return None\n if \"Apple\" in out:\n compiler = \"apple-clang\"\n elif \"clang version\" in out:\n compiler = \"clang\"\n installed_version = re.search(\"([0-9]+\\.[0-9])\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _sun_cc_compiler(output, compiler_exe=\"cc\"):\n try:\n _, out = _execute('%s -V' % compiler_exe)\n compiler = \"sun-cc\"\n installed_version = re.search(\"([0-9]+\\.[0-9]+)\", out).group()\n if installed_version:\n output.success(\"Found %s %s\" % (compiler, installed_version))\n return compiler, installed_version\n except Exception:\n return None\n\n\ndef _get_default_compiler(output):\n cc = os.environ.get(\"CC\", \"\")\n cxx = os.environ.get(\"CXX\", \"\")\n if cc or cxx: # Env defined, use them\n output.info(\"CC and CXX: %s, %s \" % (cc or \"None\", cxx or \"None\"))\n command = cc or cxx\n if \"gcc\" in command:\n gcc = _gcc_compiler(output, command)\n if platform.system() == \"Darwin\" and gcc is None:\n output.error(\n \"%s detected as a frontend using apple-clang. Compiler not supported\" % command\n )\n return gcc\n if \"clang\" in command.lower():\n return _clang_compiler(output, command)\n if platform.system() == \"SunOS\" and command.lower() == \"cc\":\n return _sun_cc_compiler(output, command)\n # I am not able to find its version\n output.error(\"Not able to automatically detect '%s' version\" % command)\n return None\n\n if detected_os() == \"Windows\":\n version = latest_visual_studio_version_installed(output)\n vs = ('Visual Studio', version) if version else None\n gcc = _gcc_compiler(output)\n clang = _clang_compiler(output)\n if platform.system() == \"SunOS\":\n sun_cc = _sun_cc_compiler(output)\n\n if detected_os() == \"Windows\":\n return vs or gcc or clang\n elif platform.system() == \"Darwin\":\n return clang or gcc\n elif platform.system() == \"SunOS\":\n return sun_cc or gcc or clang\n else:\n return gcc or clang\n\n\ndef _get_profile_compiler_version(compiler, version, output):\n major = version.split(\".\")[0]\n if compiler == \"clang\" and int(major) >= 8:\n output.info(\"clang>=8, using the major as version\")\n return major\n elif compiler == \"gcc\" and int(major) >= 5:\n output.info(\"gcc>=5, using the major as version\")\n return major\n return version\n\n\ndef _detect_compiler_version(result, output, profile_path):\n try:\n compiler, version = _get_default_compiler(output)\n except Exception:\n compiler, version = None, None\n if not compiler or not version:\n output.error(\"Unable to find a working compiler\")\n else:\n result.append((\"compiler\", compiler))\n result.append((\"compiler.version\",\n _get_profile_compiler_version(compiler, version, output)))\n if compiler == \"apple-clang\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n elif compiler == \"gcc\":\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n if Version(version) >= Version(\"5.1\"):\n profile_name = os.path.basename(profile_path)\n msg = \"\"\"\nConan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to\n'libstdc++' for backwards compatibility.\nYour compiler is likely using the new CXX11 ABI by default (libstdc++11).\n\nIf you want Conan to use the new ABI for the {profile} profile, run:\n\n $ conan profile update settings.compiler.libcxx=libstdc++11 {profile}\n\nOr edit '{profile_path}' and set compiler.libcxx=libstdc++11\n\"\"\".format(profile=profile_name, profile_path=profile_path)\n output.writeln(\"\\n************************* WARNING: GCC OLD ABI COMPATIBILITY \"\n \"***********************\\n %s\\n************************************\"\n \"************************************************\\n\\n\\n\" % msg,\n Color.BRIGHT_RED)\n elif compiler == \"cc\":\n if platform.system() == \"SunOS\":\n result.append((\"compiler.libstdcxx\", \"libstdcxx4\"))\n elif compiler == \"clang\":\n if platform.system() == \"FreeBSD\":\n result.append((\"compiler.libcxx\", \"libc++\"))\n else:\n result.append((\"compiler.libcxx\", \"libstdc++\"))\n elif compiler == \"sun-cc\":\n result.append((\"compiler.libcxx\", \"libCstd\"))\n\n\ndef _detect_os_arch(result, output):\n architectures = {'i386': 'x86',\n 'i686': 'x86',\n 'i86pc': 'x86',\n 'amd64': 'x86_64',\n 'aarch64': 'armv8',\n 'sun4v': 'sparc'}\n the_os = detected_os()\n result.append((\"os\", the_os))\n result.append((\"os_build\", the_os))\n\n platform_machine = platform.machine().lower()\n if platform_machine:\n arch = architectures.get(platform_machine, platform_machine)\n if arch.startswith('arm'):\n for a in (\"armv6\", \"armv7hf\", \"armv7\", \"armv8\"):\n if arch.startswith(a):\n arch = a\n break\n else:\n output.error(\"Your ARM '%s' architecture is probably not defined in settings.yml\\n\"\n \"Please check your conan.conf and settings.yml files\" % arch)\n elif OSInfo().is_aix:\n arch = OSInfo.get_aix_architecture() or arch\n\n result.append((\"arch\", arch))\n result.append((\"arch_build\", arch))\n\n\ndef detect_defaults_settings(output, profile_path):\n \"\"\" try to deduce current machine values without any constraints at all\n :param output: Conan Output instance\n :param profile_path: Conan profile file path\n :return: A list with default settings\n \"\"\"\n result = []\n _detect_os_arch(result, output)\n _detect_compiler_version(result, output, profile_path)\n result.append((\"build_type\", \"Release\"))\n\n return result\n", "path": "conans/client/conf/detect.py"}]}
3,379
121
gh_patches_debug_8515
rasdani/github-patches
git_diff
Gallopsled__pwntools-1892
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- phd throws an exception when output pipe fails, as with e.g. `head` We should silence the BrokenPipeError exception when `pwn phd` output closes. ``` $ phd < /dev/random | head -n 1 00000000 43 18 3f 38 0e 45 9c 5d d9 b8 ed 44 7c 64 ee e3 │C·?8│·E·]│···D│|d··│ Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> BrokenPipeError: [Errno 32] Broken pipe ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwnlib/commandline/phd.py` Content: ``` 1 #!/usr/bin/env python2 2 from __future__ import absolute_import 3 from __future__ import division 4 5 import argparse 6 import os 7 import sys 8 import io 9 10 import pwnlib.args 11 pwnlib.args.free_form = False 12 13 from pwn import * 14 from pwnlib.commandline import common 15 16 parser = common.parser_commands.add_parser( 17 'phd', 18 help = 'Pretty hex dump', 19 description = 'Pretty hex dump' 20 ) 21 22 parser.add_argument( 23 'file', 24 metavar='file', 25 nargs='?', 26 help='File to hexdump. Reads from stdin if missing.', 27 type=argparse.FileType('rb'), 28 default=getattr(sys.stdin, 'buffer', sys.stdin) 29 ) 30 31 parser.add_argument( 32 "-w", "--width", 33 help="Number of bytes per line.", 34 default='16', 35 ) 36 37 parser.add_argument( 38 "-l", "--highlight", 39 help="Byte to highlight.", 40 nargs="*", 41 ) 42 43 parser.add_argument( 44 "-s", "--skip", 45 help="Skip this many initial bytes.", 46 default='0', 47 ) 48 49 parser.add_argument( 50 "-c", "--count", 51 help="Only show this many bytes.", 52 default='-1', 53 ) 54 55 parser.add_argument( 56 "-o", "--offset", 57 help="Addresses in left hand column starts at this address.", 58 default='0', 59 ) 60 61 parser.add_argument( 62 "--color", 63 nargs='?', 64 help="Colorize the output. When 'auto' output is colorized exactly when stdout is a TTY. Default is 'auto'.", 65 choices = ('always', 'never', 'auto'), 66 default='auto', 67 ) 68 69 def asint(s): 70 if s.startswith('0x'): 71 return int(s, 16) 72 elif s.startswith('0'): 73 return int(s, 8) 74 else: 75 return int(s, 10) 76 77 def main(args): 78 infile = args.file 79 width = asint(args.width) 80 skip = asint(args.skip) 81 count = asint(args.count) 82 offset = asint(args.offset) 83 84 # if `--color` has no argument it is `None` 85 color = args.color or 'always' 86 text.when = color 87 88 if skip: 89 try: 90 infile.seek(skip, os.SEEK_CUR) 91 except IOError: 92 infile.read(skip) 93 94 if count != -1: 95 infile = io.BytesIO(infile.read(count)) 96 97 hl = [] 98 if args.highlight: 99 for hs in args.highlight: 100 for h in hs.split(','): 101 hl.append(asint(h)) 102 103 try: 104 for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip): 105 print(line) 106 except (KeyboardInterrupt, IOError): 107 pass 108 109 if __name__ == '__main__': 110 pwnlib.commandline.common.main(__file__) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwnlib/commandline/phd.py b/pwnlib/commandline/phd.py --- a/pwnlib/commandline/phd.py +++ b/pwnlib/commandline/phd.py @@ -4,6 +4,7 @@ import argparse import os +import signal import sys import io @@ -100,6 +101,8 @@ for h in hs.split(','): hl.append(asint(h)) + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + try: for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip): print(line)
{"golden_diff": "diff --git a/pwnlib/commandline/phd.py b/pwnlib/commandline/phd.py\n--- a/pwnlib/commandline/phd.py\n+++ b/pwnlib/commandline/phd.py\n@@ -4,6 +4,7 @@\n \n import argparse\n import os\n+import signal\n import sys\n import io\n \n@@ -100,6 +101,8 @@\n for h in hs.split(','):\n hl.append(asint(h))\n \n+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n+\n try:\n for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):\n print(line)\n", "issue": "phd throws an exception when output pipe fails, as with e.g. `head`\nWe should silence the BrokenPipeError exception when `pwn phd` output closes.\r\n\r\n```\r\n$ phd < /dev/random | head -n 1\r\n00000000 43 18 3f 38 0e 45 9c 5d d9 b8 ed 44 7c 64 ee e3 \u2502C\u00b7?8\u2502\u00b7E\u00b7]\u2502\u00b7\u00b7\u00b7D\u2502|d\u00b7\u00b7\u2502\r\nException ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>\r\nBrokenPipeError: [Errno 32] Broken pipe\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport os\nimport sys\nimport io\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_commands.add_parser(\n 'phd',\n help = 'Pretty hex dump',\n description = 'Pretty hex dump'\n)\n\nparser.add_argument(\n 'file',\n metavar='file',\n nargs='?',\n help='File to hexdump. Reads from stdin if missing.',\n type=argparse.FileType('rb'),\n default=getattr(sys.stdin, 'buffer', sys.stdin)\n)\n\nparser.add_argument(\n \"-w\", \"--width\",\n help=\"Number of bytes per line.\",\n default='16',\n)\n\nparser.add_argument(\n \"-l\", \"--highlight\",\n help=\"Byte to highlight.\",\n nargs=\"*\",\n)\n\nparser.add_argument(\n \"-s\", \"--skip\",\n help=\"Skip this many initial bytes.\",\n default='0',\n)\n\nparser.add_argument(\n \"-c\", \"--count\",\n help=\"Only show this many bytes.\",\n default='-1',\n)\n\nparser.add_argument(\n \"-o\", \"--offset\",\n help=\"Addresses in left hand column starts at this address.\",\n default='0',\n)\n\nparser.add_argument(\n \"--color\",\n nargs='?',\n help=\"Colorize the output. When 'auto' output is colorized exactly when stdout is a TTY. Default is 'auto'.\",\n choices = ('always', 'never', 'auto'),\n default='auto',\n)\n\ndef asint(s):\n if s.startswith('0x'):\n return int(s, 16)\n elif s.startswith('0'):\n return int(s, 8)\n else:\n return int(s, 10)\n\ndef main(args):\n infile = args.file\n width = asint(args.width)\n skip = asint(args.skip)\n count = asint(args.count)\n offset = asint(args.offset)\n\n # if `--color` has no argument it is `None`\n color = args.color or 'always'\n text.when = color\n\n if skip:\n try:\n infile.seek(skip, os.SEEK_CUR)\n except IOError:\n infile.read(skip)\n\n if count != -1:\n infile = io.BytesIO(infile.read(count))\n\n hl = []\n if args.highlight:\n for hs in args.highlight:\n for h in hs.split(','):\n hl.append(asint(h))\n\n try:\n for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):\n print(line)\n except (KeyboardInterrupt, IOError):\n pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/phd.py"}], "after_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport os\nimport signal\nimport sys\nimport io\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_commands.add_parser(\n 'phd',\n help = 'Pretty hex dump',\n description = 'Pretty hex dump'\n)\n\nparser.add_argument(\n 'file',\n metavar='file',\n nargs='?',\n help='File to hexdump. Reads from stdin if missing.',\n type=argparse.FileType('rb'),\n default=getattr(sys.stdin, 'buffer', sys.stdin)\n)\n\nparser.add_argument(\n \"-w\", \"--width\",\n help=\"Number of bytes per line.\",\n default='16',\n)\n\nparser.add_argument(\n \"-l\", \"--highlight\",\n help=\"Byte to highlight.\",\n nargs=\"*\",\n)\n\nparser.add_argument(\n \"-s\", \"--skip\",\n help=\"Skip this many initial bytes.\",\n default='0',\n)\n\nparser.add_argument(\n \"-c\", \"--count\",\n help=\"Only show this many bytes.\",\n default='-1',\n)\n\nparser.add_argument(\n \"-o\", \"--offset\",\n help=\"Addresses in left hand column starts at this address.\",\n default='0',\n)\n\nparser.add_argument(\n \"--color\",\n nargs='?',\n help=\"Colorize the output. When 'auto' output is colorized exactly when stdout is a TTY. Default is 'auto'.\",\n choices = ('always', 'never', 'auto'),\n default='auto',\n)\n\ndef asint(s):\n if s.startswith('0x'):\n return int(s, 16)\n elif s.startswith('0'):\n return int(s, 8)\n else:\n return int(s, 10)\n\ndef main(args):\n infile = args.file\n width = asint(args.width)\n skip = asint(args.skip)\n count = asint(args.count)\n offset = asint(args.offset)\n\n # if `--color` has no argument it is `None`\n color = args.color or 'always'\n text.when = color\n\n if skip:\n try:\n infile.seek(skip, os.SEEK_CUR)\n except IOError:\n infile.read(skip)\n\n if count != -1:\n infile = io.BytesIO(infile.read(count))\n\n hl = []\n if args.highlight:\n for hs in args.highlight:\n for h in hs.split(','):\n hl.append(asint(h))\n\n signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n\n try:\n for line in hexdump_iter(infile, width, highlight = hl, begin = offset + skip):\n print(line)\n except (KeyboardInterrupt, IOError):\n pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/phd.py"}]}
1,275
143
gh_patches_debug_16435
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider tapetro is broken During the global build at 2021-10-06-14-42-44, spider **tapetro** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/logs/tapetro.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/tapetro.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 4 from locations.items import GeojsonPointItem 5 from xlrd import open_workbook 6 7 BRANDS = { 8 'T': 'TravelCenters of America', 9 'P': 'Petro', 10 'TE': 'TA Express' 11 } 12 13 14 class TAPetroSpider(scrapy.Spider): 15 name = 'tapetro' 16 item_attributes = {'brand': "TravelCenters of America", 17 'brand_wikidata': "Q7835892"} 18 allowed_domains = ['www.ta-petro.com'] 19 start_urls = ( 20 'http://www.ta-petro.com/assets/ce/Documents/Master-Location-List.xls', 21 ) 22 23 def parse(self, response): 24 workbook = open_workbook(file_contents=response.body) 25 sheet = workbook.sheets()[0] # Sheet1 26 27 # read header 28 nrow = 0 29 columns = [] 30 for ncol in range(sheet.ncols): 31 columns.append((ncol, sheet.cell(nrow, ncol).value)) 32 33 for nrow in range(1, sheet.nrows): 34 store = {} 35 for ncol, column in columns: 36 value = sheet.cell(nrow, ncol).value 37 store[column] = value 38 39 if not (store.get("LATITUDE") and store.get("LONGITUDE")): 40 continue 41 42 ref = '%s-%s-%s' % ( 43 store['SITE ID#'], store['BRAND'], store['LOCATION_ID']) 44 yield GeojsonPointItem( 45 ref=ref, 46 lat=float(store['LATITUDE']), 47 lon=float(store['LONGITUDE']), 48 name=store['LOCATION'], 49 addr_full=store['ADDRESS'], 50 city=store['CITY'], 51 state=store['STATE'], 52 postcode=store['ZIPCODE'], 53 phone=store['PHONE'], 54 brand=BRANDS.get(store['BRAND'], BRANDS['T']), 55 extras={ 56 'amenity:fuel': True, 57 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y', 58 'fuel:diesel': True, 59 'fuel:HGV_diesel': True, 60 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0, 61 'fuel:propane': store['PROPANE'] == 'Y', 62 'hgv': True 63 } 64 ) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/tapetro.py b/locations/spiders/tapetro.py --- a/locations/spiders/tapetro.py +++ b/locations/spiders/tapetro.py @@ -57,7 +57,7 @@ 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y', 'fuel:diesel': True, 'fuel:HGV_diesel': True, - 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0, + 'fuel:lng': store['LNG(Liquified Natural Gas)'] == 'Y', 'fuel:propane': store['PROPANE'] == 'Y', 'hgv': True }
{"golden_diff": "diff --git a/locations/spiders/tapetro.py b/locations/spiders/tapetro.py\n--- a/locations/spiders/tapetro.py\n+++ b/locations/spiders/tapetro.py\n@@ -57,7 +57,7 @@\n 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',\n 'fuel:diesel': True,\n 'fuel:HGV_diesel': True,\n- 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,\n+ 'fuel:lng': store['LNG(Liquified Natural Gas)'] == 'Y',\n 'fuel:propane': store['PROPANE'] == 'Y',\n 'hgv': True\n }\n", "issue": "Spider tapetro is broken\nDuring the global build at 2021-10-06-14-42-44, spider **tapetro** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/logs/tapetro.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-06-14-42-44/output/tapetro.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom xlrd import open_workbook\n\nBRANDS = {\n 'T': 'TravelCenters of America',\n 'P': 'Petro',\n 'TE': 'TA Express'\n}\n\n\nclass TAPetroSpider(scrapy.Spider):\n name = 'tapetro'\n item_attributes = {'brand': \"TravelCenters of America\",\n 'brand_wikidata': \"Q7835892\"}\n allowed_domains = ['www.ta-petro.com']\n start_urls = (\n 'http://www.ta-petro.com/assets/ce/Documents/Master-Location-List.xls',\n )\n\n def parse(self, response):\n workbook = open_workbook(file_contents=response.body)\n sheet = workbook.sheets()[0] # Sheet1\n\n # read header\n nrow = 0\n columns = []\n for ncol in range(sheet.ncols):\n columns.append((ncol, sheet.cell(nrow, ncol).value))\n\n for nrow in range(1, sheet.nrows):\n store = {}\n for ncol, column in columns:\n value = sheet.cell(nrow, ncol).value\n store[column] = value\n\n if not (store.get(\"LATITUDE\") and store.get(\"LONGITUDE\")):\n continue\n\n ref = '%s-%s-%s' % (\n store['SITE ID#'], store['BRAND'], store['LOCATION_ID'])\n yield GeojsonPointItem(\n ref=ref,\n lat=float(store['LATITUDE']),\n lon=float(store['LONGITUDE']),\n name=store['LOCATION'],\n addr_full=store['ADDRESS'],\n city=store['CITY'],\n state=store['STATE'],\n postcode=store['ZIPCODE'],\n phone=store['PHONE'],\n brand=BRANDS.get(store['BRAND'], BRANDS['T']),\n extras={\n 'amenity:fuel': True,\n 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',\n 'fuel:diesel': True,\n 'fuel:HGV_diesel': True,\n 'fuel:lng': int(store['LNG(Liquified Natural Gas)/Lanes'] or 0) > 0,\n 'fuel:propane': store['PROPANE'] == 'Y',\n 'hgv': True\n }\n )\n", "path": "locations/spiders/tapetro.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom xlrd import open_workbook\n\nBRANDS = {\n 'T': 'TravelCenters of America',\n 'P': 'Petro',\n 'TE': 'TA Express'\n}\n\n\nclass TAPetroSpider(scrapy.Spider):\n name = 'tapetro'\n item_attributes = {'brand': \"TravelCenters of America\",\n 'brand_wikidata': \"Q7835892\"}\n allowed_domains = ['www.ta-petro.com']\n start_urls = (\n 'http://www.ta-petro.com/assets/ce/Documents/Master-Location-List.xls',\n )\n\n def parse(self, response):\n workbook = open_workbook(file_contents=response.body)\n sheet = workbook.sheets()[0] # Sheet1\n\n # read header\n nrow = 0\n columns = []\n for ncol in range(sheet.ncols):\n columns.append((ncol, sheet.cell(nrow, ncol).value))\n\n for nrow in range(1, sheet.nrows):\n store = {}\n for ncol, column in columns:\n value = sheet.cell(nrow, ncol).value\n store[column] = value\n\n if not (store.get(\"LATITUDE\") and store.get(\"LONGITUDE\")):\n continue\n\n ref = '%s-%s-%s' % (\n store['SITE ID#'], store['BRAND'], store['LOCATION_ID'])\n yield GeojsonPointItem(\n ref=ref,\n lat=float(store['LATITUDE']),\n lon=float(store['LONGITUDE']),\n name=store['LOCATION'],\n addr_full=store['ADDRESS'],\n city=store['CITY'],\n state=store['STATE'],\n postcode=store['ZIPCODE'],\n phone=store['PHONE'],\n brand=BRANDS.get(store['BRAND'], BRANDS['T']),\n extras={\n 'amenity:fuel': True,\n 'fuel:diesel:class2': store['WINTERIZED DIESEL NOV-MAR(any temp)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 10 degrees or below)'] == 'Y' or store['WINTERIZED DIESEL NOV-MAR (when temps are 30 degrees or below)'] == 'y',\n 'fuel:diesel': True,\n 'fuel:HGV_diesel': True,\n 'fuel:lng': store['LNG(Liquified Natural Gas)'] == 'Y',\n 'fuel:propane': store['PROPANE'] == 'Y',\n 'hgv': True\n }\n )\n", "path": "locations/spiders/tapetro.py"}]}
1,150
233
gh_patches_debug_1692
rasdani/github-patches
git_diff
iterative__dvc-562
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dvc repro --force does not work It looks like the flag was just ignored. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/project.py` Content: ``` 1 import os 2 import stat 3 import networkx as nx 4 5 import dvc.cloud.base as cloud 6 7 from dvc.logger import Logger 8 from dvc.exceptions import DvcException 9 from dvc.stage import Stage, Output 10 from dvc.config import Config 11 from dvc.state import State 12 from dvc.lock import Lock 13 from dvc.scm import SCM 14 from dvc.cache import Cache 15 from dvc.cloud.data_cloud import DataCloud 16 from dvc.system import System 17 18 19 class StageNotFoundError(DvcException): 20 def __init__(self, path): 21 msg = 'Stage file {} does not exist'.format(path) 22 super(StageNotFoundError, self).__init__(msg) 23 24 25 class ReproductionError(DvcException): 26 def __init__(self, dvc_file_name, ex): 27 msg = 'Failed to reproduce \'{}\''.format(dvc_file_name) 28 super(ReproductionError, self).__init__(msg, cause=ex) 29 30 31 class Project(object): 32 DVC_DIR = '.dvc' 33 34 def __init__(self, root_dir): 35 self.root_dir = os.path.abspath(os.path.realpath(root_dir)) 36 self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR) 37 38 self.scm = SCM(self.root_dir) 39 self.lock = Lock(self.dvc_dir) 40 self.cache = Cache(self.dvc_dir) 41 self.state = State(self.root_dir, self.dvc_dir) 42 self.config = Config(self.dvc_dir) 43 self.logger = Logger(self.config._config) 44 self.cloud = DataCloud(self.cache, self.config._config) 45 46 @staticmethod 47 def init(root_dir=os.curdir): 48 """ 49 Initiate dvc project in directory. 50 51 Args: 52 root_dir: Path to project's root directory. 53 54 Returns: 55 Project instance. 56 57 Raises: 58 KeyError: Raises an exception. 59 """ 60 root_dir = os.path.abspath(root_dir) 61 dvc_dir = os.path.join(root_dir, Project.DVC_DIR) 62 os.mkdir(dvc_dir) 63 64 config = Config.init(dvc_dir) 65 cache = Cache.init(dvc_dir) 66 state = State.init(root_dir, dvc_dir) 67 lock = Lock(dvc_dir) 68 69 scm = SCM(root_dir) 70 scm.ignore_list([cache.cache_dir, 71 state.state_file, 72 lock.lock_file]) 73 74 ignore_file = os.path.join(dvc_dir, scm.ignore_file()) 75 scm.add([config.config_file, ignore_file]) 76 77 return Project(root_dir) 78 79 def to_dvc_path(self, path): 80 return os.path.relpath(path, self.root_dir) 81 82 def add(self, fname): 83 out = os.path.basename(fname) 84 stage_fname = out + Stage.STAGE_FILE_SUFFIX 85 cwd = os.path.dirname(os.path.abspath(fname)) 86 stage = Stage.loads(project=self, 87 cmd=None, 88 deps=[], 89 outs=[out], 90 fname=stage_fname, 91 cwd=cwd) 92 93 stage.save() 94 stage.dump() 95 return stage 96 97 def remove(self, target): 98 if not Stage.is_stage_file(target): 99 raise StageNotFoundError(target) 100 101 stage = Stage.load(self, target) 102 for out in stage.outs: 103 out.remove() 104 105 return stage 106 107 def run(self, 108 cmd=None, 109 deps=[], 110 outs=[], 111 outs_no_cache=[], 112 fname=Stage.STAGE_FILE, 113 cwd=os.curdir, 114 no_exec=False): 115 stage = Stage.loads(project=self, 116 fname=fname, 117 cmd=cmd, 118 cwd=cwd, 119 outs=outs, 120 outs_no_cache=outs_no_cache, 121 deps=deps) 122 if not no_exec: 123 stage.run() 124 stage.dump() 125 return stage 126 127 def _reproduce_stage(self, stages, node, force): 128 if not stages[node].changed(): 129 return [] 130 131 stages[node].reproduce(force=force) 132 stages[node].dump() 133 return [stages[node]] 134 135 def reproduce(self, target, recursive=True, force=False): 136 stages = nx.get_node_attributes(self.graph(), 'stage') 137 node = os.path.relpath(os.path.abspath(target), self.root_dir) 138 if node not in stages: 139 raise StageNotFoundError(target) 140 141 if recursive: 142 return self._reproduce_stages(stages, node, force) 143 144 return self._reproduce_stage(stages, node, force) 145 146 def _reproduce_stages(self, stages, node, force): 147 result = [] 148 for n in nx.dfs_postorder_nodes(self.graph(), node): 149 try: 150 result += self._reproduce_stage(stages, n, force) 151 except Exception as ex: 152 raise ReproductionError(stages[n].relpath, ex) 153 return result 154 155 def _remove_untracked_hardlinks(self): 156 untracked = self.scm.untracked_files() 157 cache = dict((System.inode(c), c) for c in self.cache.all()) 158 for file in untracked: 159 inode = System.inode(file) 160 if inode not in cache.keys(): 161 continue 162 163 Logger.info(u'Remove \'{}\''.format(file)) 164 os.remove(file) 165 166 dir = os.path.dirname(file) 167 if len(dir) != 0 and not os.listdir(dir): 168 Logger.info(u'Remove empty directory \'{}\''.format(dir)) 169 os.removedirs(dir) 170 171 def checkout(self): 172 self._remove_untracked_hardlinks() 173 for stage in self.stages(): 174 stage.checkout() 175 176 def _used_cache(self, target=None): 177 cache_set = set() 178 179 if target: 180 stages = [Stage.load(self, target)] 181 else: 182 stages = self.stages() 183 184 for stage in stages: 185 for out in stage.outs: 186 if not out.use_cache: 187 continue 188 cache_set |= set([out.cache]) 189 if out.is_dir_cache(out.cache) and os.path.isfile(out.cache): 190 dir_cache = out.dir_cache() 191 cache_set |= set(dir_cache.values()) 192 193 return list(cache_set) 194 195 def gc(self): 196 clist = self._used_cache() 197 for cache in self.cache.all(): 198 if cache in clist: 199 continue 200 os.unlink(cache) 201 self.logger.info(u'\'{}\' was removed'.format(self.to_dvc_path(cache))) 202 203 def push(self, target=None, jobs=1): 204 return self.cloud.push(self._used_cache(target), jobs) 205 206 def fetch(self, target=None, jobs=1): 207 return self.cloud.pull(self._used_cache(target), jobs) 208 209 def pull(self, target=None, jobs=1): 210 ret = self.fetch(target, jobs) 211 self.checkout() 212 return ret 213 214 def _local_status(self, target=None): 215 status = {} 216 217 if target: 218 stages = [Stage.load(self, target)] 219 else: 220 stages = self.stages() 221 222 for stage in self.stages(): 223 status.update(stage.status()) 224 225 return status 226 227 def _cloud_status(self, target=None, jobs=1): 228 status = {} 229 for target, ret in self.cloud.status(self._used_cache(target), jobs): 230 if ret == cloud.STATUS_UNKNOWN or ret == cloud.STATUS_OK: 231 continue 232 233 prefix_map = { 234 cloud.STATUS_DELETED: 'deleted', 235 cloud.STATUS_MODIFIED: 'modified', 236 cloud.STATUS_NEW: 'new', 237 } 238 239 path = os.path.relpath(target, self.cache.cache_dir) 240 241 status[path] = prefix_map[ret] 242 243 return status 244 245 def status(self, target=None, jobs=1, cloud=False): 246 if cloud: 247 return self._cloud_status(target, jobs) 248 return self._local_status(target) 249 250 def graph(self): 251 G = nx.DiGraph() 252 253 for stage in self.stages(): 254 node = os.path.relpath(stage.path, self.root_dir) 255 G.add_node(node, stage=stage) 256 for dep in stage.deps: 257 dep_stage = dep.stage() 258 if not dep_stage: 259 continue 260 dep_node = os.path.relpath(dep_stage.path, self.root_dir) 261 G.add_node(dep_node, stage=dep_stage) 262 G.add_edge(node, dep_node) 263 264 return G 265 266 def stages(self): 267 stages = [] 268 for root, dirs, files in os.walk(self.root_dir): 269 for fname in files: 270 path = os.path.join(root, fname) 271 if not Stage.is_stage_file(path): 272 continue 273 stages.append(Stage.load(self, path)) 274 return stages 275 276 def outs(self): 277 outs = [] 278 for stage in self.stages(): 279 outs += stage.outs 280 return outs 281 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/project.py b/dvc/project.py --- a/dvc/project.py +++ b/dvc/project.py @@ -125,7 +125,7 @@ return stage def _reproduce_stage(self, stages, node, force): - if not stages[node].changed(): + if not stages[node].changed() and not force: return [] stages[node].reproduce(force=force)
{"golden_diff": "diff --git a/dvc/project.py b/dvc/project.py\n--- a/dvc/project.py\n+++ b/dvc/project.py\n@@ -125,7 +125,7 @@\n return stage\n \n def _reproduce_stage(self, stages, node, force):\n- if not stages[node].changed():\n+ if not stages[node].changed() and not force:\n return []\n \n stages[node].reproduce(force=force)\n", "issue": "dvc repro --force does not work\nIt looks like the flag was just ignored.\n", "before_files": [{"content": "import os\nimport stat\nimport networkx as nx\n\nimport dvc.cloud.base as cloud\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.cloud.data_cloud import DataCloud\nfrom dvc.system import System\n\n\nclass StageNotFoundError(DvcException):\n def __init__(self, path):\n msg = 'Stage file {} does not exist'.format(path)\n super(StageNotFoundError, self).__init__(msg)\n\n\nclass ReproductionError(DvcException):\n def __init__(self, dvc_file_name, ex):\n msg = 'Failed to reproduce \\'{}\\''.format(dvc_file_name)\n super(ReproductionError, self).__init__(msg, cause=ex)\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger(self.config._config)\n self.cloud = DataCloud(self.cache, self.config._config)\n\n @staticmethod\n def init(root_dir=os.curdir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n\n return Project(root_dir)\n\n def to_dvc_path(self, path):\n return os.path.relpath(path, self.root_dir)\n\n def add(self, fname):\n out = os.path.basename(fname)\n stage_fname = out + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(os.path.abspath(fname))\n stage = Stage.loads(project=self,\n cmd=None,\n deps=[],\n outs=[out],\n fname=stage_fname,\n cwd=cwd)\n\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, target):\n if not Stage.is_stage_file(target):\n raise StageNotFoundError(target)\n\n stage = Stage.load(self, target)\n for out in stage.outs:\n out.remove()\n\n return stage\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n fname=Stage.STAGE_FILE,\n cwd=os.curdir,\n no_exec=False):\n stage = Stage.loads(project=self,\n fname=fname,\n cmd=cmd,\n cwd=cwd,\n outs=outs,\n outs_no_cache=outs_no_cache,\n deps=deps)\n if not no_exec:\n stage.run()\n stage.dump()\n return stage\n\n def _reproduce_stage(self, stages, node, force):\n if not stages[node].changed():\n return []\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n return [stages[node]]\n\n def reproduce(self, target, recursive=True, force=False):\n stages = nx.get_node_attributes(self.graph(), 'stage')\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n return self._reproduce_stages(stages, node, force)\n\n return self._reproduce_stage(stages, node, force)\n\n def _reproduce_stages(self, stages, node, force):\n result = []\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n try:\n result += self._reproduce_stage(stages, n, force)\n except Exception as ex:\n raise ReproductionError(stages[n].relpath, ex)\n return result\n\n def _remove_untracked_hardlinks(self):\n untracked = self.scm.untracked_files()\n cache = dict((System.inode(c), c) for c in self.cache.all())\n for file in untracked:\n inode = System.inode(file)\n if inode not in cache.keys():\n continue\n\n Logger.info(u'Remove \\'{}\\''.format(file))\n os.remove(file)\n\n dir = os.path.dirname(file)\n if len(dir) != 0 and not os.listdir(dir):\n Logger.info(u'Remove empty directory \\'{}\\''.format(dir))\n os.removedirs(dir)\n\n def checkout(self):\n self._remove_untracked_hardlinks()\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self, target=None):\n cache_set = set()\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in stages:\n for out in stage.outs:\n if not out.use_cache:\n continue\n cache_set |= set([out.cache])\n if out.is_dir_cache(out.cache) and os.path.isfile(out.cache):\n dir_cache = out.dir_cache()\n cache_set |= set(dir_cache.values())\n\n return list(cache_set)\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info(u'\\'{}\\' was removed'.format(self.to_dvc_path(cache)))\n\n def push(self, target=None, jobs=1):\n return self.cloud.push(self._used_cache(target), jobs)\n\n def fetch(self, target=None, jobs=1):\n return self.cloud.pull(self._used_cache(target), jobs)\n\n def pull(self, target=None, jobs=1):\n ret = self.fetch(target, jobs)\n self.checkout()\n return ret\n\n def _local_status(self, target=None):\n status = {}\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in self.stages():\n status.update(stage.status())\n\n return status\n\n def _cloud_status(self, target=None, jobs=1):\n status = {}\n for target, ret in self.cloud.status(self._used_cache(target), jobs):\n if ret == cloud.STATUS_UNKNOWN or ret == cloud.STATUS_OK:\n continue\n\n prefix_map = {\n cloud.STATUS_DELETED: 'deleted',\n cloud.STATUS_MODIFIED: 'modified',\n cloud.STATUS_NEW: 'new',\n }\n\n path = os.path.relpath(target, self.cache.cache_dir)\n\n status[path] = prefix_map[ret]\n\n return status\n\n def status(self, target=None, jobs=1, cloud=False):\n if cloud:\n return self._cloud_status(target, jobs)\n return self._local_status(target)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n", "path": "dvc/project.py"}], "after_files": [{"content": "import os\nimport stat\nimport networkx as nx\n\nimport dvc.cloud.base as cloud\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.cloud.data_cloud import DataCloud\nfrom dvc.system import System\n\n\nclass StageNotFoundError(DvcException):\n def __init__(self, path):\n msg = 'Stage file {} does not exist'.format(path)\n super(StageNotFoundError, self).__init__(msg)\n\n\nclass ReproductionError(DvcException):\n def __init__(self, dvc_file_name, ex):\n msg = 'Failed to reproduce \\'{}\\''.format(dvc_file_name)\n super(ReproductionError, self).__init__(msg, cause=ex)\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger(self.config._config)\n self.cloud = DataCloud(self.cache, self.config._config)\n\n @staticmethod\n def init(root_dir=os.curdir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n\n return Project(root_dir)\n\n def to_dvc_path(self, path):\n return os.path.relpath(path, self.root_dir)\n\n def add(self, fname):\n out = os.path.basename(fname)\n stage_fname = out + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(os.path.abspath(fname))\n stage = Stage.loads(project=self,\n cmd=None,\n deps=[],\n outs=[out],\n fname=stage_fname,\n cwd=cwd)\n\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, target):\n if not Stage.is_stage_file(target):\n raise StageNotFoundError(target)\n\n stage = Stage.load(self, target)\n for out in stage.outs:\n out.remove()\n\n return stage\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n fname=Stage.STAGE_FILE,\n cwd=os.curdir,\n no_exec=False):\n stage = Stage.loads(project=self,\n fname=fname,\n cmd=cmd,\n cwd=cwd,\n outs=outs,\n outs_no_cache=outs_no_cache,\n deps=deps)\n if not no_exec:\n stage.run()\n stage.dump()\n return stage\n\n def _reproduce_stage(self, stages, node, force):\n if not stages[node].changed() and not force:\n return []\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n return [stages[node]]\n\n def reproduce(self, target, recursive=True, force=False):\n stages = nx.get_node_attributes(self.graph(), 'stage')\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n return self._reproduce_stages(stages, node, force)\n\n return self._reproduce_stage(stages, node, force)\n\n def _reproduce_stages(self, stages, node, force):\n result = []\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n try:\n result += self._reproduce_stage(stages, n, force)\n except Exception as ex:\n raise ReproductionError(stages[n].relpath, ex)\n return result\n\n def _remove_untracked_hardlinks(self):\n untracked = self.scm.untracked_files()\n cache = dict((System.inode(c), c) for c in self.cache.all())\n for file in untracked:\n inode = System.inode(file)\n if inode not in cache.keys():\n continue\n\n Logger.info(u'Remove \\'{}\\''.format(file))\n os.remove(file)\n\n dir = os.path.dirname(file)\n if len(dir) != 0 and not os.listdir(dir):\n Logger.info(u'Remove empty directory \\'{}\\''.format(dir))\n os.removedirs(dir)\n\n def checkout(self):\n self._remove_untracked_hardlinks()\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self, target=None):\n cache_set = set()\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in stages:\n for out in stage.outs:\n if not out.use_cache:\n continue\n cache_set |= set([out.cache])\n if out.is_dir_cache(out.cache) and os.path.isfile(out.cache):\n dir_cache = out.dir_cache()\n cache_set |= set(dir_cache.values())\n\n return list(cache_set)\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info(u'\\'{}\\' was removed'.format(self.to_dvc_path(cache)))\n\n def push(self, target=None, jobs=1):\n return self.cloud.push(self._used_cache(target), jobs)\n\n def fetch(self, target=None, jobs=1):\n return self.cloud.pull(self._used_cache(target), jobs)\n\n def pull(self, target=None, jobs=1):\n ret = self.fetch(target, jobs)\n self.checkout()\n return ret\n\n def _local_status(self, target=None):\n status = {}\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in self.stages():\n status.update(stage.status())\n\n return status\n\n def _cloud_status(self, target=None, jobs=1):\n status = {}\n for target, ret in self.cloud.status(self._used_cache(target), jobs):\n if ret == cloud.STATUS_UNKNOWN or ret == cloud.STATUS_OK:\n continue\n\n prefix_map = {\n cloud.STATUS_DELETED: 'deleted',\n cloud.STATUS_MODIFIED: 'modified',\n cloud.STATUS_NEW: 'new',\n }\n\n path = os.path.relpath(target, self.cache.cache_dir)\n\n status[path] = prefix_map[ret]\n\n return status\n\n def status(self, target=None, jobs=1, cloud=False):\n if cloud:\n return self._cloud_status(target, jobs)\n return self._local_status(target)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n", "path": "dvc/project.py"}]}
2,858
96
gh_patches_debug_31680
rasdani/github-patches
git_diff
pymeasure__pymeasure-813
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Migrating Prologix / AR488 from 0.10. to 0.11. VI_ERROR_RSRC_NFOUND Hi, I'm having an issue after updating to 0.11, I understand that the implementation of the PrologixAdapter has changed, previously I was creating an adapter with this code: `adapter = PrologixAdapter(port='/dev/cu.usbserial-AC008NYI',baudrate=115200,serial_timeout=1)` now this doesn't work anymore and as far as I understand from the tutorial it should be updated to: `adapter = PrologixAdapter('/dev/cu.usbserial-AC008NYI')` This doesn't work and cannot find a solution, this is the error that I get: `--------------------------------------------------------------------------- VisaIOError Traceback (most recent call last) /var/folders/s_/k_qsdmpn09ng7xgzq1lrkjbm0000gn/T/ipykernel_13441/652802221.py in <module> 6 import math 7 import time ----> 8 adapter = PrologixAdapter('/dev/cu.usbserial-AC008NYI') 9 #adapter = PrologixAdapter(port='/dev/cu.usbserial-AC008NYI',baudrate=115200,serial_timeout=1) 10 sourcemeter = Keithley2000(adapter.gpib(16)) ~/opt/anaconda3/lib/python3.9/site-packages/pymeasure/adapters/prologix.py in __init__(self, resource_name, address, rw_delay, serial_timeout, preprocess_reply, **kwargs) 82 warn("Use 'timeout' in ms instead", FutureWarning) 83 kwargs['timeout'] = serial_timeout ---> 84 super().__init__(resource_name, 85 asrl={ 86 'timeout': 500, ~/opt/anaconda3/lib/python3.9/site-packages/pymeasure/adapters/visa.py in __init__(self, resource_name, visa_library, preprocess_reply, query_delay, log, **kwargs) 106 107 # Clean up kwargs considering the interface type matching resource_name --> 108 if_type = self.manager.resource_info(self.resource_name).interface_type 109 for key in list(kwargs.keys()): # iterate over a copy of the keys as we modify kwargs 110 # Remove all interface-specific kwargs: ~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/highlevel.py in resource_info(self, resource_name, extended) 3172 3173 if extended: -> 3174 ret, err = self.visalib.parse_resource_extended(self.session, resource_name) 3175 else: 3176 ret, err = self.visalib.parse_resource(self.session, resource_name) ~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/ctwrapper/functions.py in parse_resource_extended(library, session, resource_name) 2075 # [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar] 2076 # ViRsrc converts from (str, unicode, bytes) to bytes -> 2077 ret = library.viParseRsrcEx( 2078 session, 2079 resource_name, ~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/ctwrapper/highlevel.py in _return_handler(self, ret_value, func, arguments) 224 session = None 225 --> 226 return self.handle_return_value(session, ret_value) # type: ignore 227 228 def list_resources( ~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/highlevel.py in handle_return_value(self, session, status_code) 249 250 if rv < 0: --> 251 raise errors.VisaIOError(rv) 252 253 if rv in self.issue_warning_on: VisaIOError: VI_ERROR_RSRC_NFOUND (-1073807343): Insufficient location information or the requested device or resource is not present in the system.` Thanks for the help! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pymeasure/adapters/prologix.py` Content: ``` 1 # 2 # This file is part of the PyMeasure package. 3 # 4 # Copyright (c) 2013-2022 PyMeasure Developers 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining a copy 7 # of this software and associated documentation files (the "Software"), to deal 8 # in the Software without restriction, including without limitation the rights 9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 # copies of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included in 14 # all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 # THE SOFTWARE. 23 # 24 import time 25 from warnings import warn 26 27 from pymeasure.adapters import VISAAdapter 28 29 30 class PrologixAdapter(VISAAdapter): 31 """ Encapsulates the additional commands necessary 32 to communicate over a Prologix GPIB-USB Adapter, 33 using the :class:`VISAAdapter`. 34 35 Each PrologixAdapter is constructed based on a serial port or 36 connection and the GPIB address to be communicated to. 37 Serial connection sharing is achieved by using the :meth:`.gpib` 38 method to spawn new PrologixAdapters for different GPIB addresses. 39 40 :param port: The Serial port name or a connection object 41 :param address: Integer GPIB address of the desired instrument 42 :param rw_delay: An optional delay to set between a write and read call for 43 slow to respond instruments. 44 45 .. deprecated:: 0.11 46 Implement it in the instrument's `wait_until_read` method instead. 47 48 :param preprocess_reply: optional callable used to preprocess 49 strings received from the instrument. The callable returns the 50 processed string. 51 52 .. deprecated:: 0.11 53 Implement it in the instrument's `read` method instead. 54 55 :param kwargs: Key-word arguments if constructing a new serial object 56 57 :ivar address: Integer GPIB address of the desired instrument 58 59 To allow user access to the Prologix adapter in Linux, create the file: 60 :code:`/etc/udev/rules.d/51-prologix.rules`, with contents: 61 62 .. code-block:: bash 63 64 SUBSYSTEMS=="usb",ATTRS{idVendor}=="0403",ATTRS{idProduct}=="6001",MODE="0666" 65 66 Then reload the udev rules with: 67 68 .. code-block:: bash 69 70 sudo udevadm control --reload-rules 71 sudo udevadm trigger 72 73 """ 74 75 def __init__(self, resource_name, address=None, rw_delay=0, serial_timeout=None, 76 preprocess_reply=None, **kwargs): 77 # for legacy rw_delay: prefer new style over old one. 78 if rw_delay: 79 warn("Implement in Instrument's 'wait_until_read' instead.", FutureWarning) 80 kwargs['query_delay'] = rw_delay 81 if serial_timeout: 82 warn("Use 'timeout' in ms instead", FutureWarning) 83 kwargs['timeout'] = serial_timeout 84 super().__init__(resource_name, 85 asrl={ 86 'timeout': 500, 87 'write_termination': "\n", 88 }, 89 preprocess_reply=preprocess_reply, 90 **kwargs) 91 self.address = address 92 if not isinstance(resource_name, PrologixAdapter): 93 self.set_defaults() 94 95 def set_defaults(self): 96 """ Set up the default behavior of the Prologix-GPIB 97 adapter 98 """ 99 self.write("++auto 0") # Turn off auto read-after-write 100 self.write("++eoi 1") # Append end-of-line to commands 101 self.write("++eos 2") # Append line-feed to commands 102 103 def ask(self, command): 104 """ Ask the Prologix controller. 105 106 .. deprecated:: 0.11 107 Call `Instrument.ask` instead. 108 109 :param command: SCPI command string to be sent to instrument 110 """ 111 warn("Do not call `Adapter.ask`, but `Instrument.ask` instead.", 112 FutureWarning) 113 self.write(command) 114 return self.read() 115 116 def write(self, command, **kwargs): 117 """Write a string command to the instrument appending `write_termination`. 118 119 If the GPIB address in :attr:`.address` is defined, it is sent first. 120 121 :param str command: Command string to be sent to the instrument 122 (without termination). 123 :param kwargs: Keyword arguments for the connection itself. 124 """ 125 # Overrides write instead of _write in order to ensure proper logging 126 if self.address is not None and not command.startswith("++"): 127 super().write("++addr %d" % self.address, **kwargs) 128 super().write(command, **kwargs) 129 130 def _format_binary_values(self, values, datatype='f', is_big_endian=False, header_fmt="ieee"): 131 """Format values in binary format, used internally in :meth:`.write_binary_values`. 132 133 :param values: data to be writen to the device. 134 :param datatype: the format string for a single element. See struct module. 135 :param is_big_endian: boolean indicating endianess. 136 :param header_fmt: Format of the header prefixing the data ("ieee", "hp", "empty"). 137 :return: binary string. 138 :rtype: bytes 139 """ 140 block = super()._format_binary_values(values, datatype, is_big_endian, header_fmt) 141 # Prologix needs certian characters to be escaped. 142 # Special care must be taken when sending binary data to instruments. If any of the 143 # following characters occur in the binary data -- CR (ASCII 13), LF (ASCII 10), ESC 144 # (ASCII 27), '+' (ASCII 43) - they must be escaped by preceding them with an ESC 145 # character. 146 special_chars = b'\x0d\x0a\x1b\x2b' 147 new_block = b'' 148 for b in block: 149 escape = b'' 150 if b in special_chars: 151 escape = b'\x1b' 152 new_block += (escape + bytes((b,))) 153 154 return new_block 155 156 def write_binary_values(self, command, values, **kwargs): 157 """ Write binary data to the instrument, e.g. waveform for signal generators. 158 159 values are encoded in a binary format according to 160 IEEE 488.2 Definite Length Arbitrary Block Response Data block. 161 162 :param command: SCPI command to be sent to the instrument 163 :param values: iterable representing the binary values 164 :param kwargs: Key-word arguments to pass onto :meth:`._format_binary_values` 165 :returns: number of bytes written 166 """ 167 if self.address is not None: 168 address_command = "++addr %d\n" % self.address 169 self.write(address_command) 170 super().write_binary_values(command, values, "\n", **kwargs) 171 172 def _read(self, **kwargs): 173 """Read up to (excluding) `read_termination` or the whole read buffer. 174 175 :param kwargs: Keyword arguments for the connection itself. 176 :returns str: ASCII response of the instrument (excluding read_termination). 177 """ 178 self.write("++read eoi") 179 return super()._read() 180 181 def gpib(self, address, **kwargs): 182 """ Return a PrologixAdapter object that references the GPIB 183 address specified, while sharing the Serial connection with other 184 calls of this function 185 186 :param address: Integer GPIB address of the desired instrument 187 :param kwargs: Arguments for the initialization 188 :returns: PrologixAdapter for specific GPIB address 189 """ 190 return PrologixAdapter(self, address, **kwargs) 191 192 def _check_for_srq(self): 193 # it was int(self.ask("++srq")) 194 self.write("++srq") 195 return int(self.read()) 196 197 def wait_for_srq(self, timeout=25, delay=0.1): 198 """ Blocks until a SRQ, and leaves the bit high 199 200 :param timeout: Timeout duration in seconds. 201 :param delay: Time delay between checking SRQ in seconds. 202 :raises TimeoutError: "Waiting for SRQ timed out." 203 """ 204 stop = time.perf_counter() + timeout 205 while self._check_for_srq() != 1: 206 if time.perf_counter() > stop: 207 raise TimeoutError("Waiting for SRQ timed out.") 208 time.sleep(delay) 209 210 def __repr__(self): 211 if self.address is not None: 212 return (f"<PrologixAdapter(resource_name='{self.connection.resource_name}', " 213 f"address={self.address:d})>") 214 else: 215 return f"<PrologixAdapter(resource_name='{self.connection.resource_name}')>" 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pymeasure/adapters/prologix.py b/pymeasure/adapters/prologix.py --- a/pymeasure/adapters/prologix.py +++ b/pymeasure/adapters/prologix.py @@ -32,13 +32,16 @@ to communicate over a Prologix GPIB-USB Adapter, using the :class:`VISAAdapter`. - Each PrologixAdapter is constructed based on a serial port or - connection and the GPIB address to be communicated to. - Serial connection sharing is achieved by using the :meth:`.gpib` + Each PrologixAdapter is constructed based on a connection to the Prologix device + itself and the GPIB address of the instrument to be communicated to. + Connection sharing is achieved by using the :meth:`.gpib` method to spawn new PrologixAdapters for different GPIB addresses. - :param port: The Serial port name or a connection object - :param address: Integer GPIB address of the desired instrument + :param resource_name: A + `VISA resource string <https://pyvisa.readthedocs.io/en/latest/introduction/names.html>`__ + that identifies the connection to the Prologix device itself, for example + "ASRL5" for the 5th COM port. + :param address: Integer GPIB address of the desired instrument. :param rw_delay: An optional delay to set between a write and read call for slow to respond instruments. @@ -54,7 +57,18 @@ :param kwargs: Key-word arguments if constructing a new serial object - :ivar address: Integer GPIB address of the desired instrument + :ivar address: Integer GPIB address of the desired instrument. + + Usage example: + + .. code:: + + adapter = PrologixAdapter("ASRL5::INSTR", 7) + sourcemeter = Keithley2400(adapter) # at GPIB address 7 + # generate another instance with a different GPIB address: + adapter2 = adapter.gpib(9) + multimeter = Keithley2000(adapter2) # at GPIB address 9 + To allow user access to the Prologix adapter in Linux, create the file: :code:`/etc/udev/rules.d/51-prologix.rules`, with contents:
{"golden_diff": "diff --git a/pymeasure/adapters/prologix.py b/pymeasure/adapters/prologix.py\n--- a/pymeasure/adapters/prologix.py\n+++ b/pymeasure/adapters/prologix.py\n@@ -32,13 +32,16 @@\n to communicate over a Prologix GPIB-USB Adapter,\n using the :class:`VISAAdapter`.\n \n- Each PrologixAdapter is constructed based on a serial port or\n- connection and the GPIB address to be communicated to.\n- Serial connection sharing is achieved by using the :meth:`.gpib`\n+ Each PrologixAdapter is constructed based on a connection to the Prologix device\n+ itself and the GPIB address of the instrument to be communicated to.\n+ Connection sharing is achieved by using the :meth:`.gpib`\n method to spawn new PrologixAdapters for different GPIB addresses.\n \n- :param port: The Serial port name or a connection object\n- :param address: Integer GPIB address of the desired instrument\n+ :param resource_name: A\n+ `VISA resource string <https://pyvisa.readthedocs.io/en/latest/introduction/names.html>`__\n+ that identifies the connection to the Prologix device itself, for example\n+ \"ASRL5\" for the 5th COM port.\n+ :param address: Integer GPIB address of the desired instrument.\n :param rw_delay: An optional delay to set between a write and read call for\n slow to respond instruments.\n \n@@ -54,7 +57,18 @@\n \n :param kwargs: Key-word arguments if constructing a new serial object\n \n- :ivar address: Integer GPIB address of the desired instrument\n+ :ivar address: Integer GPIB address of the desired instrument.\n+\n+ Usage example:\n+\n+ .. code::\n+\n+ adapter = PrologixAdapter(\"ASRL5::INSTR\", 7)\n+ sourcemeter = Keithley2400(adapter) # at GPIB address 7\n+ # generate another instance with a different GPIB address:\n+ adapter2 = adapter.gpib(9)\n+ multimeter = Keithley2000(adapter2) # at GPIB address 9\n+\n \n To allow user access to the Prologix adapter in Linux, create the file:\n :code:`/etc/udev/rules.d/51-prologix.rules`, with contents:\n", "issue": "Migrating Prologix / AR488 from 0.10. to 0.11. VI_ERROR_RSRC_NFOUND \nHi, I'm having an issue after updating to 0.11, I understand that the implementation of the PrologixAdapter has changed, previously I was creating an adapter with this code:\r\n\r\n`adapter = PrologixAdapter(port='/dev/cu.usbserial-AC008NYI',baudrate=115200,serial_timeout=1)`\r\nnow this doesn't work anymore and as far as I understand from the tutorial it should be updated to:\r\n`adapter = PrologixAdapter('/dev/cu.usbserial-AC008NYI')`\r\n\r\nThis doesn't work and cannot find a solution, this is the error that I get:\r\n`---------------------------------------------------------------------------\r\nVisaIOError Traceback (most recent call last)\r\n/var/folders/s_/k_qsdmpn09ng7xgzq1lrkjbm0000gn/T/ipykernel_13441/652802221.py in <module>\r\n 6 import math\r\n 7 import time\r\n----> 8 adapter = PrologixAdapter('/dev/cu.usbserial-AC008NYI')\r\n 9 #adapter = PrologixAdapter(port='/dev/cu.usbserial-AC008NYI',baudrate=115200,serial_timeout=1)\r\n 10 sourcemeter = Keithley2000(adapter.gpib(16))\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pymeasure/adapters/prologix.py in __init__(self, resource_name, address, rw_delay, serial_timeout, preprocess_reply, **kwargs)\r\n 82 warn(\"Use 'timeout' in ms instead\", FutureWarning)\r\n 83 kwargs['timeout'] = serial_timeout\r\n---> 84 super().__init__(resource_name,\r\n 85 asrl={\r\n 86 'timeout': 500,\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pymeasure/adapters/visa.py in __init__(self, resource_name, visa_library, preprocess_reply, query_delay, log, **kwargs)\r\n 106 \r\n 107 # Clean up kwargs considering the interface type matching resource_name\r\n--> 108 if_type = self.manager.resource_info(self.resource_name).interface_type\r\n 109 for key in list(kwargs.keys()): # iterate over a copy of the keys as we modify kwargs\r\n 110 # Remove all interface-specific kwargs:\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/highlevel.py in resource_info(self, resource_name, extended)\r\n 3172 \r\n 3173 if extended:\r\n-> 3174 ret, err = self.visalib.parse_resource_extended(self.session, resource_name)\r\n 3175 else:\r\n 3176 ret, err = self.visalib.parse_resource(self.session, resource_name)\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/ctwrapper/functions.py in parse_resource_extended(library, session, resource_name)\r\n 2075 # [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar]\r\n 2076 # ViRsrc converts from (str, unicode, bytes) to bytes\r\n-> 2077 ret = library.viParseRsrcEx(\r\n 2078 session,\r\n 2079 resource_name,\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/ctwrapper/highlevel.py in _return_handler(self, ret_value, func, arguments)\r\n 224 session = None\r\n 225 \r\n--> 226 return self.handle_return_value(session, ret_value) # type: ignore\r\n 227 \r\n 228 def list_resources(\r\n\r\n~/opt/anaconda3/lib/python3.9/site-packages/pyvisa/highlevel.py in handle_return_value(self, session, status_code)\r\n 249 \r\n 250 if rv < 0:\r\n--> 251 raise errors.VisaIOError(rv)\r\n 252 \r\n 253 if rv in self.issue_warning_on:\r\n\r\nVisaIOError: VI_ERROR_RSRC_NFOUND (-1073807343): Insufficient location information or the requested device or resource is not present in the system.`\r\n\r\nThanks for the help! \n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2022 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\nimport time\nfrom warnings import warn\n\nfrom pymeasure.adapters import VISAAdapter\n\n\nclass PrologixAdapter(VISAAdapter):\n \"\"\" Encapsulates the additional commands necessary\n to communicate over a Prologix GPIB-USB Adapter,\n using the :class:`VISAAdapter`.\n\n Each PrologixAdapter is constructed based on a serial port or\n connection and the GPIB address to be communicated to.\n Serial connection sharing is achieved by using the :meth:`.gpib`\n method to spawn new PrologixAdapters for different GPIB addresses.\n\n :param port: The Serial port name or a connection object\n :param address: Integer GPIB address of the desired instrument\n :param rw_delay: An optional delay to set between a write and read call for\n slow to respond instruments.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `wait_until_read` method instead.\n\n :param preprocess_reply: optional callable used to preprocess\n strings received from the instrument. The callable returns the\n processed string.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `read` method instead.\n\n :param kwargs: Key-word arguments if constructing a new serial object\n\n :ivar address: Integer GPIB address of the desired instrument\n\n To allow user access to the Prologix adapter in Linux, create the file:\n :code:`/etc/udev/rules.d/51-prologix.rules`, with contents:\n\n .. code-block:: bash\n\n SUBSYSTEMS==\"usb\",ATTRS{idVendor}==\"0403\",ATTRS{idProduct}==\"6001\",MODE=\"0666\"\n\n Then reload the udev rules with:\n\n .. code-block:: bash\n\n sudo udevadm control --reload-rules\n sudo udevadm trigger\n\n \"\"\"\n\n def __init__(self, resource_name, address=None, rw_delay=0, serial_timeout=None,\n preprocess_reply=None, **kwargs):\n # for legacy rw_delay: prefer new style over old one.\n if rw_delay:\n warn(\"Implement in Instrument's 'wait_until_read' instead.\", FutureWarning)\n kwargs['query_delay'] = rw_delay\n if serial_timeout:\n warn(\"Use 'timeout' in ms instead\", FutureWarning)\n kwargs['timeout'] = serial_timeout\n super().__init__(resource_name,\n asrl={\n 'timeout': 500,\n 'write_termination': \"\\n\",\n },\n preprocess_reply=preprocess_reply,\n **kwargs)\n self.address = address\n if not isinstance(resource_name, PrologixAdapter):\n self.set_defaults()\n\n def set_defaults(self):\n \"\"\" Set up the default behavior of the Prologix-GPIB\n adapter\n \"\"\"\n self.write(\"++auto 0\") # Turn off auto read-after-write\n self.write(\"++eoi 1\") # Append end-of-line to commands\n self.write(\"++eos 2\") # Append line-feed to commands\n\n def ask(self, command):\n \"\"\" Ask the Prologix controller.\n\n .. deprecated:: 0.11\n Call `Instrument.ask` instead.\n\n :param command: SCPI command string to be sent to instrument\n \"\"\"\n warn(\"Do not call `Adapter.ask`, but `Instrument.ask` instead.\",\n FutureWarning)\n self.write(command)\n return self.read()\n\n def write(self, command, **kwargs):\n \"\"\"Write a string command to the instrument appending `write_termination`.\n\n If the GPIB address in :attr:`.address` is defined, it is sent first.\n\n :param str command: Command string to be sent to the instrument\n (without termination).\n :param kwargs: Keyword arguments for the connection itself.\n \"\"\"\n # Overrides write instead of _write in order to ensure proper logging\n if self.address is not None and not command.startswith(\"++\"):\n super().write(\"++addr %d\" % self.address, **kwargs)\n super().write(command, **kwargs)\n\n def _format_binary_values(self, values, datatype='f', is_big_endian=False, header_fmt=\"ieee\"):\n \"\"\"Format values in binary format, used internally in :meth:`.write_binary_values`.\n\n :param values: data to be writen to the device.\n :param datatype: the format string for a single element. See struct module.\n :param is_big_endian: boolean indicating endianess.\n :param header_fmt: Format of the header prefixing the data (\"ieee\", \"hp\", \"empty\").\n :return: binary string.\n :rtype: bytes\n \"\"\"\n block = super()._format_binary_values(values, datatype, is_big_endian, header_fmt)\n # Prologix needs certian characters to be escaped.\n # Special care must be taken when sending binary data to instruments. If any of the\n # following characters occur in the binary data -- CR (ASCII 13), LF (ASCII 10), ESC\n # (ASCII 27), '+' (ASCII 43) - they must be escaped by preceding them with an ESC\n # character.\n special_chars = b'\\x0d\\x0a\\x1b\\x2b'\n new_block = b''\n for b in block:\n escape = b''\n if b in special_chars:\n escape = b'\\x1b'\n new_block += (escape + bytes((b,)))\n\n return new_block\n\n def write_binary_values(self, command, values, **kwargs):\n \"\"\" Write binary data to the instrument, e.g. waveform for signal generators.\n\n values are encoded in a binary format according to\n IEEE 488.2 Definite Length Arbitrary Block Response Data block.\n\n :param command: SCPI command to be sent to the instrument\n :param values: iterable representing the binary values\n :param kwargs: Key-word arguments to pass onto :meth:`._format_binary_values`\n :returns: number of bytes written\n \"\"\"\n if self.address is not None:\n address_command = \"++addr %d\\n\" % self.address\n self.write(address_command)\n super().write_binary_values(command, values, \"\\n\", **kwargs)\n\n def _read(self, **kwargs):\n \"\"\"Read up to (excluding) `read_termination` or the whole read buffer.\n\n :param kwargs: Keyword arguments for the connection itself.\n :returns str: ASCII response of the instrument (excluding read_termination).\n \"\"\"\n self.write(\"++read eoi\")\n return super()._read()\n\n def gpib(self, address, **kwargs):\n \"\"\" Return a PrologixAdapter object that references the GPIB\n address specified, while sharing the Serial connection with other\n calls of this function\n\n :param address: Integer GPIB address of the desired instrument\n :param kwargs: Arguments for the initialization\n :returns: PrologixAdapter for specific GPIB address\n \"\"\"\n return PrologixAdapter(self, address, **kwargs)\n\n def _check_for_srq(self):\n # it was int(self.ask(\"++srq\"))\n self.write(\"++srq\")\n return int(self.read())\n\n def wait_for_srq(self, timeout=25, delay=0.1):\n \"\"\" Blocks until a SRQ, and leaves the bit high\n\n :param timeout: Timeout duration in seconds.\n :param delay: Time delay between checking SRQ in seconds.\n :raises TimeoutError: \"Waiting for SRQ timed out.\"\n \"\"\"\n stop = time.perf_counter() + timeout\n while self._check_for_srq() != 1:\n if time.perf_counter() > stop:\n raise TimeoutError(\"Waiting for SRQ timed out.\")\n time.sleep(delay)\n\n def __repr__(self):\n if self.address is not None:\n return (f\"<PrologixAdapter(resource_name='{self.connection.resource_name}', \"\n f\"address={self.address:d})>\")\n else:\n return f\"<PrologixAdapter(resource_name='{self.connection.resource_name}')>\"\n", "path": "pymeasure/adapters/prologix.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2022 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\nimport time\nfrom warnings import warn\n\nfrom pymeasure.adapters import VISAAdapter\n\n\nclass PrologixAdapter(VISAAdapter):\n \"\"\" Encapsulates the additional commands necessary\n to communicate over a Prologix GPIB-USB Adapter,\n using the :class:`VISAAdapter`.\n\n Each PrologixAdapter is constructed based on a connection to the Prologix device\n itself and the GPIB address of the instrument to be communicated to.\n Connection sharing is achieved by using the :meth:`.gpib`\n method to spawn new PrologixAdapters for different GPIB addresses.\n\n :param resource_name: A\n `VISA resource string <https://pyvisa.readthedocs.io/en/latest/introduction/names.html>`__\n that identifies the connection to the Prologix device itself, for example\n \"ASRL5\" for the 5th COM port.\n :param address: Integer GPIB address of the desired instrument.\n :param rw_delay: An optional delay to set between a write and read call for\n slow to respond instruments.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `wait_until_read` method instead.\n\n :param preprocess_reply: optional callable used to preprocess\n strings received from the instrument. The callable returns the\n processed string.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `read` method instead.\n\n :param kwargs: Key-word arguments if constructing a new serial object\n\n :ivar address: Integer GPIB address of the desired instrument.\n\n Usage example:\n\n .. code::\n\n adapter = PrologixAdapter(\"ASRL5::INSTR\", 7)\n sourcemeter = Keithley2400(adapter) # at GPIB address 7\n # generate another instance with a different GPIB address:\n adapter2 = adapter.gpib(9)\n multimeter = Keithley2000(adapter2) # at GPIB address 9\n\n\n To allow user access to the Prologix adapter in Linux, create the file:\n :code:`/etc/udev/rules.d/51-prologix.rules`, with contents:\n\n .. code-block:: bash\n\n SUBSYSTEMS==\"usb\",ATTRS{idVendor}==\"0403\",ATTRS{idProduct}==\"6001\",MODE=\"0666\"\n\n Then reload the udev rules with:\n\n .. code-block:: bash\n\n sudo udevadm control --reload-rules\n sudo udevadm trigger\n\n \"\"\"\n\n def __init__(self, resource_name, address=None, rw_delay=0, serial_timeout=None,\n preprocess_reply=None, **kwargs):\n # for legacy rw_delay: prefer new style over old one.\n if rw_delay:\n warn(\"Implement in Instrument's 'wait_until_read' instead.\", FutureWarning)\n kwargs['query_delay'] = rw_delay\n if serial_timeout:\n warn(\"Use 'timeout' in ms instead\", FutureWarning)\n kwargs['timeout'] = serial_timeout\n super().__init__(resource_name,\n asrl={\n 'timeout': 500,\n 'write_termination': \"\\n\",\n },\n preprocess_reply=preprocess_reply,\n **kwargs)\n self.address = address\n if not isinstance(resource_name, PrologixAdapter):\n self.set_defaults()\n\n def set_defaults(self):\n \"\"\" Set up the default behavior of the Prologix-GPIB\n adapter\n \"\"\"\n self.write(\"++auto 0\") # Turn off auto read-after-write\n self.write(\"++eoi 1\") # Append end-of-line to commands\n self.write(\"++eos 2\") # Append line-feed to commands\n\n def ask(self, command):\n \"\"\" Ask the Prologix controller.\n\n .. deprecated:: 0.11\n Call `Instrument.ask` instead.\n\n :param command: SCPI command string to be sent to instrument\n \"\"\"\n warn(\"Do not call `Adapter.ask`, but `Instrument.ask` instead.\",\n FutureWarning)\n self.write(command)\n return self.read()\n\n def write(self, command, **kwargs):\n \"\"\"Write a string command to the instrument appending `write_termination`.\n\n If the GPIB address in :attr:`.address` is defined, it is sent first.\n\n :param str command: Command string to be sent to the instrument\n (without termination).\n :param kwargs: Keyword arguments for the connection itself.\n \"\"\"\n # Overrides write instead of _write in order to ensure proper logging\n if self.address is not None and not command.startswith(\"++\"):\n super().write(\"++addr %d\" % self.address, **kwargs)\n super().write(command, **kwargs)\n\n def _format_binary_values(self, values, datatype='f', is_big_endian=False, header_fmt=\"ieee\"):\n \"\"\"Format values in binary format, used internally in :meth:`.write_binary_values`.\n\n :param values: data to be writen to the device.\n :param datatype: the format string for a single element. See struct module.\n :param is_big_endian: boolean indicating endianess.\n :param header_fmt: Format of the header prefixing the data (\"ieee\", \"hp\", \"empty\").\n :return: binary string.\n :rtype: bytes\n \"\"\"\n block = super()._format_binary_values(values, datatype, is_big_endian, header_fmt)\n # Prologix needs certian characters to be escaped.\n # Special care must be taken when sending binary data to instruments. If any of the\n # following characters occur in the binary data -- CR (ASCII 13), LF (ASCII 10), ESC\n # (ASCII 27), '+' (ASCII 43) - they must be escaped by preceding them with an ESC\n # character.\n special_chars = b'\\x0d\\x0a\\x1b\\x2b'\n new_block = b''\n for b in block:\n escape = b''\n if b in special_chars:\n escape = b'\\x1b'\n new_block += (escape + bytes((b,)))\n\n return new_block\n\n def write_binary_values(self, command, values, **kwargs):\n \"\"\" Write binary data to the instrument, e.g. waveform for signal generators.\n\n values are encoded in a binary format according to\n IEEE 488.2 Definite Length Arbitrary Block Response Data block.\n\n :param command: SCPI command to be sent to the instrument\n :param values: iterable representing the binary values\n :param kwargs: Key-word arguments to pass onto :meth:`._format_binary_values`\n :returns: number of bytes written\n \"\"\"\n if self.address is not None:\n address_command = \"++addr %d\\n\" % self.address\n self.write(address_command)\n super().write_binary_values(command, values, \"\\n\", **kwargs)\n\n def _read(self, **kwargs):\n \"\"\"Read up to (excluding) `read_termination` or the whole read buffer.\n\n :param kwargs: Keyword arguments for the connection itself.\n :returns str: ASCII response of the instrument (excluding read_termination).\n \"\"\"\n self.write(\"++read eoi\")\n return super()._read()\n\n def gpib(self, address, **kwargs):\n \"\"\" Return a PrologixAdapter object that references the GPIB\n address specified, while sharing the Serial connection with other\n calls of this function\n\n :param address: Integer GPIB address of the desired instrument\n :param kwargs: Arguments for the initialization\n :returns: PrologixAdapter for specific GPIB address\n \"\"\"\n return PrologixAdapter(self, address, **kwargs)\n\n def _check_for_srq(self):\n # it was int(self.ask(\"++srq\"))\n self.write(\"++srq\")\n return int(self.read())\n\n def wait_for_srq(self, timeout=25, delay=0.1):\n \"\"\" Blocks until a SRQ, and leaves the bit high\n\n :param timeout: Timeout duration in seconds.\n :param delay: Time delay between checking SRQ in seconds.\n :raises TimeoutError: \"Waiting for SRQ timed out.\"\n \"\"\"\n stop = time.perf_counter() + timeout\n while self._check_for_srq() != 1:\n if time.perf_counter() > stop:\n raise TimeoutError(\"Waiting for SRQ timed out.\")\n time.sleep(delay)\n\n def __repr__(self):\n if self.address is not None:\n return (f\"<PrologixAdapter(resource_name='{self.connection.resource_name}', \"\n f\"address={self.address:d})>\")\n else:\n return f\"<PrologixAdapter(resource_name='{self.connection.resource_name}')>\"\n", "path": "pymeasure/adapters/prologix.py"}]}
3,844
542
gh_patches_debug_20856
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-468
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Problem building docs on Window using "make html" # Brief Description Following the instructions on the [contributions](https://pyjanitor.readthedocs.io/contributing.html) page fails when trying to `make html` a symbolic link privilege not held error # System Information <!-- System information helps us. To keep things simple, just let us know the OS and Python version first. You can provide the optional information later. --> - Operating system: Windows <!-- delete the appropriate ones --> - OS details (optional): 10 Enterprise <!-- e.g. version, or Linux distro --> - Python version (required): Python: 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)] # Minimally Reproducible Code <!-- If you provide minimal code that reproduces the problem, this makes it easier for us to debug what's going on. Minimal code should be trivially copy/pastable into a Python interpreter in its entirety. Be sure to include imports. --> after activating the env and traversing to the ..\pyjanitor-dev\docs folder `make html` # Error Messages <!-- If you get an error message, please paste it between the backticks here. --> ``` Traceback (most recent call last): File "C:\conda3x64\envs\pyjanitor-dev\lib\site-packages\sphinx\config.py", line 361, in eval_config_file execfile_(filename, namespace) File "C:\conda3x64\envs\pyjanitor-dev\lib\site-packages\sphinx\util\pycompat.py", line 86, in execfile_ exec(code, _globals) File "c:\workspace\cjmayers\CODE\git_workspaces_external\pyjanitor\docs\conf.py", line 31, in <module> notebooks.symlink_to("../examples/notebooks") File "C:\conda3x64\envs\pyjanitor-dev\lib\pathlib.py", line 1330, in symlink_to self._accessor.symlink(target, self, target_is_directory) OSError: symbolic link privilege not held ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 """Sphinx configuration.""" 2 3 # -*- coding: utf-8 -*- 4 # 5 # Configuration file for the Sphinx documentation builder. 6 # 7 # This file does only contain a selection of the most common options. For a 8 # full list see the documentation: 9 # http://www.sphinx-doc.org/en/stable/config 10 11 # -- Path setup -------------------------------------------------------------- 12 13 import datetime 14 15 # If extensions (or modules to document with autodoc) are in another directory, 16 # add these directories to sys.path here. If the directory is relative to the 17 # documentation root, use os.path.abspath to make it absolute, like shown here. 18 # 19 import os 20 import sys 21 from pathlib import Path 22 23 sys.path.insert(0, os.path.abspath(".")) 24 sys.path.insert(0, os.path.abspath("../examples")) 25 26 # Make a symlink in our sphinx source directory to the top-level 27 # examples/notebooks directory so we can include notebooks in the doc 28 notebooks = Path("./notebooks") 29 if not notebooks.exists(): 30 print("Making symlink to ../examples/notebooks") 31 notebooks.symlink_to("../examples/notebooks") 32 33 34 # -- Project information ----------------------------------------------------- 35 36 project = "pyjanitor" 37 38 39 now = datetime.datetime.now() 40 CurrentYear = str(now.year) 41 copyright = CurrentYear + ", PyJanitor devs" 42 author = "Eric J. Ma" 43 44 # The short X.Y version 45 version = "0.1.0" 46 # The full version, including alpha/beta/rc tags 47 release = "" 48 49 50 # -- General configuration --------------------------------------------------- 51 52 # If your documentation needs a minimal Sphinx version, state it here. 53 # 54 # needs_sphinx = '1.0' 55 56 # Add any Sphinx extension module names here, as strings. They can be 57 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 58 # ones. 59 extensions = [ 60 "sphinx.ext.autodoc", 61 "sphinx.ext.doctest", 62 "sphinx.ext.intersphinx", 63 "sphinx.ext.todo", 64 "sphinx.ext.coverage", 65 "sphinx.ext.viewcode", 66 "sphinx.ext.githubpages", 67 "sphinxcontrib.fulltoc", 68 "nbsphinx", 69 "sphinx.ext.autosummary", 70 ] 71 72 # Add any paths that contain templates here, relative to this directory. 73 templates_path = ["_templates"] 74 75 # The suffix(es) of source filenames. 76 # You can specify multiple suffix as a list of string: 77 # 78 # source_suffix = ['.rst', '.md'] 79 source_suffix = [".md", ".rst", ".ipynb"] 80 81 # The master toctree document. 82 master_doc = "index" 83 84 # The language for content autogenerated by Sphinx. Refer to documentation 85 # for a list of supported languages. 86 # 87 # This is also used if you do content translation via gettext catalogs. 88 # Usually you set "language" from the command line for these cases. 89 language = None 90 91 # List of patterns, relative to source directory, that match files and 92 # directories to ignore when looking for source files. 93 # This pattern also affects html_static_path and html_extra_path . 94 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] 95 96 # The name of the Pygments (syntax highlighting) style to use. 97 pygments_style = "sphinx" 98 99 100 # -- Options for HTML output ------------------------------------------------- 101 102 # The theme to use for HTML and HTML Help pages. See the documentation for 103 # a list of builtin themes. 104 # 105 html_theme = "alabaster" 106 107 # Theme options are theme-specific and customize the look and feel of a theme 108 # further. For a list of options available for each theme, see the 109 # documentation. 110 # 111 html_theme_options = {"logo": "logo_title.svg"} 112 113 # Add any paths that contain custom static files (such as style sheets) here, 114 # relative to this directory. They are copied after the builtin static files, 115 # so a file named "default.css" will overwrite the builtin "default.css". 116 html_static_path = ["_static"] 117 118 # Custom sidebar templates, must be a dictionary that maps document names 119 # to template names. 120 # 121 # The default sidebars (for documents that don't match any pattern) are 122 # defined by theme itself. Builtin themes are using these templates by 123 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 124 # 'searchbox.html']``. 125 # 126 html_sidebars = { 127 "**": ["about.html", "navigation.html", "relations.html", "searchbox.html"] 128 } 129 130 131 # -- Options for HTMLHelp output --------------------------------------------- 132 133 # Output file base name for HTML help builder. 134 htmlhelp_basename = "pyjanitordoc" 135 136 137 # -- Options for LaTeX output ------------------------------------------------ 138 139 latex_elements = { 140 # The paper size ('letterpaper' or 'a4paper'). 141 # 142 # 'papersize': 'letterpaper', 143 # The font size ('10pt', '11pt' or '12pt'). 144 # 145 # 'pointsize': '10pt', 146 # Additional stuff for the LaTeX preamble. 147 # 148 # 'preamble': '', 149 # Latex figure (float) alignment 150 # 151 # 'figure_align': 'htbp', 152 } 153 154 # Grouping the document tree into LaTeX files. List of tuples 155 # (source start file, target name, title, 156 # author, documentclass [howto, manual, or own class]). 157 latex_documents = [ 158 ( 159 master_doc, 160 "pyjanitor.tex", 161 "pyjanitor Documentation", 162 "Eric J. Ma", 163 "manual", 164 ) 165 ] 166 167 168 # -- Options for manual page output ------------------------------------------ 169 170 # One entry per manual page. List of tuples 171 # (source start file, name, description, authors, manual section). 172 man_pages = [(master_doc, "pyjanitor", "pyjanitor Documentation", [author], 1)] 173 174 175 # -- Options for Texinfo output ---------------------------------------------- 176 177 # Grouping the document tree into Texinfo files. List of tuples 178 # (source start file, target name, title, author, 179 # dir menu entry, description, category) 180 texinfo_documents = [ 181 ( 182 master_doc, 183 "pyjanitor", 184 "pyjanitor Documentation", 185 author, 186 "pyjanitor", 187 "One line description of project.", 188 "Miscellaneous", 189 ) 190 ] 191 192 193 # -- Extension configuration ------------------------------------------------- 194 195 # -- Options for intersphinx extension --------------------------------------- 196 197 # Example configuration for intersphinx: refer to the Python standard library. 198 intersphinx_mapping = { 199 "https://docs.python.org/": None, 200 "https://pandas.pydata.org/pandas-docs/stable": None, 201 } 202 203 # -- Options for todo extension ---------------------------------------------- 204 205 # If true, `todo` and `todoList` produce output, else they produce nothing. 206 todo_include_todos = True 207 208 209 # -- Other options ----------------------------------------------------------- 210 211 autosummary_generate = True # Make _autosummary files and include them 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -17,6 +17,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import os +import platform import sys from pathlib import Path @@ -26,10 +27,16 @@ # Make a symlink in our sphinx source directory to the top-level # examples/notebooks directory so we can include notebooks in the doc notebooks = Path("./notebooks") -if not notebooks.exists(): - print("Making symlink to ../examples/notebooks") - notebooks.symlink_to("../examples/notebooks") +if platform.system() == "Windows": + # Only for windows + os.system("mklink /J notebooks ..\\examples\\notebooks") +else: + try: + print("Making symlink to ../examples/notebooks") + notebooks.symlink_to("../examples/notebooks") + except FileExistsError as e: + print(f"{notebooks} directory already exists. Not creating..") # -- Project information -----------------------------------------------------
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -17,6 +17,7 @@\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n #\n import os\n+import platform\n import sys\n from pathlib import Path\n \n@@ -26,10 +27,16 @@\n # Make a symlink in our sphinx source directory to the top-level\n # examples/notebooks directory so we can include notebooks in the doc\n notebooks = Path(\"./notebooks\")\n-if not notebooks.exists():\n- print(\"Making symlink to ../examples/notebooks\")\n- notebooks.symlink_to(\"../examples/notebooks\")\n \n+if platform.system() == \"Windows\":\n+ # Only for windows\n+ os.system(\"mklink /J notebooks ..\\\\examples\\\\notebooks\")\n+else:\n+ try:\n+ print(\"Making symlink to ../examples/notebooks\")\n+ notebooks.symlink_to(\"../examples/notebooks\")\n+ except FileExistsError as e:\n+ print(f\"{notebooks} directory already exists. Not creating..\")\n \n # -- Project information -----------------------------------------------------\n", "issue": "[BUG] Problem building docs on Window using \"make html\"\n# Brief Description\r\n\r\nFollowing the instructions on the [contributions](https://pyjanitor.readthedocs.io/contributing.html) page fails when trying to `make html` a symbolic link privilege not held error\r\n\r\n# System Information\r\n\r\n<!-- System information helps us. To keep things simple, just let us know the OS and Python version first.\r\nYou can provide the optional information later. -->\r\n\r\n- Operating system: Windows <!-- delete the appropriate ones -->\r\n- OS details (optional): 10 Enterprise <!-- e.g. version, or Linux distro -->\r\n- Python version (required): Python: 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]\r\n\r\n# Minimally Reproducible Code\r\n\r\n<!-- If you provide minimal code that reproduces the problem, this makes it easier for us to debug what's going on.\r\n\r\nMinimal code should be trivially copy/pastable into a Python interpreter in its entirety. Be sure to include imports.\r\n-->\r\nafter activating the env and traversing to the ..\\pyjanitor-dev\\docs folder\r\n`make html`\r\n\r\n# Error Messages\r\n\r\n<!-- If you get an error message, please paste it between the backticks here. -->\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\conda3x64\\envs\\pyjanitor-dev\\lib\\site-packages\\sphinx\\config.py\", line 361, in eval_config_file\r\n execfile_(filename, namespace)\r\n File \"C:\\conda3x64\\envs\\pyjanitor-dev\\lib\\site-packages\\sphinx\\util\\pycompat.py\", line 86, in execfile_\r\n exec(code, _globals)\r\n File \"c:\\workspace\\cjmayers\\CODE\\git_workspaces_external\\pyjanitor\\docs\\conf.py\", line 31, in <module>\r\n notebooks.symlink_to(\"../examples/notebooks\")\r\n File \"C:\\conda3x64\\envs\\pyjanitor-dev\\lib\\pathlib.py\", line 1330, in symlink_to\r\n self._accessor.symlink(target, self, target_is_directory)\r\nOSError: symbolic link privilege not held\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\n\n\nnow = datetime.datetime.now()\nCurrentYear = str(now.year)\ncopyright = CurrentYear + \", PyJanitor devs\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\n\nif platform.system() == \"Windows\":\n # Only for windows\n os.system(\"mklink /J notebooks ..\\\\examples\\\\notebooks\")\nelse:\n try:\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n except FileExistsError as e:\n print(f\"{notebooks} directory already exists. Not creating..\")\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\n\n\nnow = datetime.datetime.now()\nCurrentYear = str(now.year)\ncopyright = CurrentYear + \", PyJanitor devs\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}]}
2,767
248
gh_patches_debug_21133
rasdani/github-patches
git_diff
borgbackup__borg-5600
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cythonize crashes on macOS with py39 Found this issue on the macOS vagrant machine after upgrading python to 3.9.1: https://github.com/cython/cython/issues/3262 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # borgbackup - main setup code (see also other setup_*.py files) 2 3 import os 4 import sys 5 from collections import defaultdict 6 from glob import glob 7 8 try: 9 import multiprocessing 10 except ImportError: 11 multiprocessing = None 12 13 from setuptools.command.build_ext import build_ext 14 from setuptools import setup, find_packages, Extension 15 from setuptools.command.sdist import sdist 16 from distutils.command.clean import clean 17 18 try: 19 from Cython.Build import cythonize 20 except ImportError: 21 cythonize = None 22 23 import setup_checksums 24 import setup_compress 25 import setup_crypto 26 import setup_docs 27 28 is_win32 = sys.platform.startswith('win32') 29 30 # How the build process finds the system libs / uses the bundled code: 31 # 32 # 1. it will try to use (system) libs (see 1.1. and 1.2.), 33 # except if you use these env vars to force using the bundled code: 34 # BORG_USE_BUNDLED_XXX undefined --> try using system lib 35 # BORG_USE_BUNDLED_XXX=YES --> use the bundled code 36 # Note: do not use =NO, that is not supported! 37 # 1.1. if BORG_LIBXXX_PREFIX is set, it will use headers and libs from there. 38 # 1.2. if not and pkg-config can locate the lib, the lib located by 39 # pkg-config will be used. We use the pkg-config tool via the pkgconfig 40 # python package, which must be installed before invoking setup.py. 41 # if pkgconfig is not installed, this step is skipped. 42 # 2. if no system lib could be located via 1.1. or 1.2., it will fall back 43 # to using the bundled code. 44 45 # OpenSSL is required as a (system) lib in any case as we do not bundle it. 46 # Thus, only step 1.1. and 1.2. apply to openssl (but not 1. and 2.). 47 # needed: openssl >=1.0.2 or >=1.1.0 (or compatible) 48 system_prefix_openssl = os.environ.get('BORG_OPENSSL_PREFIX') 49 50 # needed: blake2 (>= 0.98.1) 51 prefer_system_libb2 = not bool(os.environ.get('BORG_USE_BUNDLED_B2')) 52 system_prefix_libb2 = os.environ.get('BORG_LIBB2_PREFIX') 53 54 # needed: lz4 (>= 1.7.0 / r129) 55 prefer_system_liblz4 = not bool(os.environ.get('BORG_USE_BUNDLED_LZ4')) 56 system_prefix_liblz4 = os.environ.get('BORG_LIBLZ4_PREFIX') 57 58 # needed: zstd (>= 1.3.0) 59 prefer_system_libzstd = not bool(os.environ.get('BORG_USE_BUNDLED_ZSTD')) 60 system_prefix_libzstd = os.environ.get('BORG_LIBZSTD_PREFIX') 61 62 prefer_system_libxxhash = not bool(os.environ.get('BORG_USE_BUNDLED_XXHASH')) 63 system_prefix_libxxhash = os.environ.get('BORG_LIBXXHASH_PREFIX') 64 65 # Number of threads to use for cythonize, not used on windows 66 cpu_threads = multiprocessing.cpu_count() if multiprocessing else 1 67 68 # Are we building on ReadTheDocs? 69 on_rtd = os.environ.get('READTHEDOCS') 70 71 install_requires = [ 72 # we are rather picky about msgpack versions, because a good working msgpack is 73 # very important for borg, see: https://github.com/borgbackup/borg/issues/3753 74 'msgpack >=0.5.6, <=1.0.0', 75 # Please note: 76 # using any other version is not supported by borg development and 77 # any feedback related to issues caused by this will be ignored. 78 ] 79 80 # note for package maintainers: if you package borgbackup for distribution, 81 # please (if available) add pyfuse3 (preferably) or llfuse (not maintained any more) 82 # as a *requirement*. "borg mount" needs one of them to work. 83 # if neither is available, do not require it, most of borgbackup will work. 84 extras_require = { 85 'llfuse': [ 86 'llfuse >= 1.3.8', 87 ], 88 'pyfuse3': [ 89 'pyfuse3 >= 3.1.1', 90 ], 91 'nofuse': [], 92 } 93 94 compress_source = 'src/borg/compress.pyx' 95 crypto_ll_source = 'src/borg/crypto/low_level.pyx' 96 crypto_helpers = 'src/borg/crypto/_crypto_helpers.c' 97 chunker_source = 'src/borg/chunker.pyx' 98 hashindex_source = 'src/borg/hashindex.pyx' 99 item_source = 'src/borg/item.pyx' 100 checksums_source = 'src/borg/algorithms/checksums.pyx' 101 platform_posix_source = 'src/borg/platform/posix.pyx' 102 platform_linux_source = 'src/borg/platform/linux.pyx' 103 platform_syncfilerange_source = 'src/borg/platform/syncfilerange.pyx' 104 platform_darwin_source = 'src/borg/platform/darwin.pyx' 105 platform_freebsd_source = 'src/borg/platform/freebsd.pyx' 106 platform_windows_source = 'src/borg/platform/windows.pyx' 107 108 cython_sources = [ 109 compress_source, 110 crypto_ll_source, 111 chunker_source, 112 hashindex_source, 113 item_source, 114 checksums_source, 115 116 platform_posix_source, 117 platform_linux_source, 118 platform_syncfilerange_source, 119 platform_freebsd_source, 120 platform_darwin_source, 121 platform_windows_source, 122 ] 123 124 if cythonize: 125 Sdist = sdist 126 else: 127 class Sdist(sdist): 128 def __init__(self, *args, **kwargs): 129 raise Exception('Cython is required to run sdist') 130 131 cython_c_files = [fn.replace('.pyx', '.c') for fn in cython_sources] 132 if not on_rtd and not all(os.path.exists(path) for path in cython_c_files): 133 raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.') 134 135 136 def rm(file): 137 try: 138 os.unlink(file) 139 print('rm', file) 140 except FileNotFoundError: 141 pass 142 143 144 class Clean(clean): 145 def run(self): 146 super().run() 147 for source in cython_sources: 148 genc = source.replace('.pyx', '.c') 149 rm(genc) 150 compiled_glob = source.replace('.pyx', '.cpython*') 151 for compiled in sorted(glob(compiled_glob)): 152 rm(compiled) 153 154 155 cmdclass = { 156 'build_ext': build_ext, 157 'build_usage': setup_docs.build_usage, 158 'build_man': setup_docs.build_man, 159 'sdist': Sdist, 160 'clean': Clean, 161 } 162 163 ext_modules = [] 164 if not on_rtd: 165 166 def members_appended(*ds): 167 result = defaultdict(list) 168 for d in ds: 169 for k, v in d.items(): 170 assert isinstance(v, list) 171 result[k].extend(v) 172 return result 173 174 try: 175 import pkgconfig as pc 176 except ImportError: 177 print('Warning: can not import pkgconfig python package.') 178 pc = None 179 180 crypto_ext_kwargs = members_appended( 181 dict(sources=[crypto_ll_source, crypto_helpers]), 182 setup_crypto.crypto_ext_kwargs(pc, system_prefix_openssl), 183 setup_crypto.b2_ext_kwargs(pc, prefer_system_libb2, system_prefix_libb2), 184 ) 185 186 compress_ext_kwargs = members_appended( 187 dict(sources=[compress_source]), 188 setup_compress.lz4_ext_kwargs(pc, prefer_system_liblz4, system_prefix_liblz4), 189 setup_compress.zstd_ext_kwargs(pc, prefer_system_libzstd, system_prefix_libzstd, 190 multithreaded=False, legacy=False), 191 ) 192 193 checksums_ext_kwargs = members_appended( 194 dict(sources=[checksums_source]), 195 setup_checksums.xxhash_ext_kwargs(pc, prefer_system_libxxhash, system_prefix_libxxhash), 196 ) 197 198 ext_modules += [ 199 Extension('borg.crypto.low_level', **crypto_ext_kwargs), 200 Extension('borg.compress', **compress_ext_kwargs), 201 Extension('borg.hashindex', [hashindex_source]), 202 Extension('borg.item', [item_source]), 203 Extension('borg.chunker', [chunker_source]), 204 Extension('borg.algorithms.checksums', **checksums_ext_kwargs), 205 ] 206 207 posix_ext = Extension('borg.platform.posix', [platform_posix_source]) 208 linux_ext = Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']) 209 syncfilerange_ext = Extension('borg.platform.syncfilerange', [platform_syncfilerange_source]) 210 freebsd_ext = Extension('borg.platform.freebsd', [platform_freebsd_source]) 211 darwin_ext = Extension('borg.platform.darwin', [platform_darwin_source]) 212 windows_ext = Extension('borg.platform.windows', [platform_windows_source]) 213 214 if not is_win32: 215 ext_modules.append(posix_ext) 216 else: 217 ext_modules.append(windows_ext) 218 if sys.platform == 'linux': 219 ext_modules.append(linux_ext) 220 ext_modules.append(syncfilerange_ext) 221 elif sys.platform.startswith('freebsd'): 222 ext_modules.append(freebsd_ext) 223 elif sys.platform == 'darwin': 224 ext_modules.append(darwin_ext) 225 226 # sometimes there's no need to cythonize 227 # this breaks chained commands like 'clean sdist' 228 cythonizing = len(sys.argv) > 1 and sys.argv[1] not in ('clean', 'egg_info', '--help-commands', '--version') \ 229 and '--help' not in sys.argv[1:] 230 231 if cythonize and cythonizing: 232 cython_opts = dict( 233 # default language_level will be '3str' starting from Cython 3.0.0, 234 # but old cython versions (< 0.29) do not know that, thus we use 3 for now. 235 compiler_directives={'language_level': 3}, 236 ) 237 if not is_win32: 238 # compile .pyx extensions to .c in parallel, does not work on windows 239 cython_opts['nthreads'] = cpu_threads + 1 240 241 # generate C code from Cython for ALL supported platforms, so we have them in the sdist. 242 # the sdist does not require Cython at install time, so we need all as C. 243 cythonize([posix_ext, linux_ext, syncfilerange_ext, freebsd_ext, darwin_ext, windows_ext], **cython_opts) 244 # generate C code from Cython for THIS platform (and for all platform-independent Cython parts). 245 ext_modules = cythonize(ext_modules, **cython_opts) 246 247 248 setup( 249 name='borgbackup', 250 use_scm_version={ 251 'write_to': 'src/borg/_version.py', 252 }, 253 author='The Borg Collective (see AUTHORS file)', 254 author_email='borgbackup@python.org', 255 url='https://borgbackup.readthedocs.io/', 256 description='Deduplicated, encrypted, authenticated and compressed backups', 257 long_description=setup_docs.long_desc_from_readme(), 258 license='BSD', 259 platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ], 260 classifiers=[ 261 'Development Status :: 3 - Alpha', 262 'Environment :: Console', 263 'Intended Audience :: System Administrators', 264 'License :: OSI Approved :: BSD License', 265 'Operating System :: POSIX :: BSD :: FreeBSD', 266 'Operating System :: POSIX :: BSD :: OpenBSD', 267 'Operating System :: POSIX :: BSD :: NetBSD', 268 'Operating System :: MacOS :: MacOS X', 269 'Operating System :: POSIX :: Linux', 270 'Programming Language :: Python', 271 'Programming Language :: Python :: 3', 272 'Programming Language :: Python :: 3.6', 273 'Programming Language :: Python :: 3.7', 274 'Programming Language :: Python :: 3.8', 275 'Programming Language :: Python :: 3.9', 276 'Topic :: Security :: Cryptography', 277 'Topic :: System :: Archiving :: Backup', 278 ], 279 packages=find_packages('src'), 280 package_dir={'': 'src'}, 281 zip_safe=False, 282 entry_points={ 283 'console_scripts': [ 284 'borg = borg.archiver:main', 285 'borgfs = borg.archiver:main', 286 ] 287 }, 288 # See also the MANIFEST.in file. 289 # We want to install all the files in the package directories... 290 include_package_data=True, 291 # ...except the source files which have been compiled (C extensions): 292 exclude_package_data={ 293 '': ['*.c', '*.h', '*.pyx', ], 294 }, 295 cmdclass=cmdclass, 296 ext_modules=ext_modules, 297 setup_requires=['setuptools_scm>=1.7'], 298 install_requires=install_requires, 299 extras_require=extras_require, 300 python_requires='>=3.6', 301 ) 302 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ system_prefix_libxxhash = os.environ.get('BORG_LIBXXHASH_PREFIX') # Number of threads to use for cythonize, not used on windows -cpu_threads = multiprocessing.cpu_count() if multiprocessing else 1 +cpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing.get_start_method() != 'spawn' else None # Are we building on ReadTheDocs? on_rtd = os.environ.get('READTHEDOCS') @@ -236,7 +236,7 @@ ) if not is_win32: # compile .pyx extensions to .c in parallel, does not work on windows - cython_opts['nthreads'] = cpu_threads + 1 + cython_opts['nthreads'] = cpu_threads # generate C code from Cython for ALL supported platforms, so we have them in the sdist. # the sdist does not require Cython at install time, so we need all as C.
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n system_prefix_libxxhash = os.environ.get('BORG_LIBXXHASH_PREFIX')\n \n # Number of threads to use for cythonize, not used on windows\n-cpu_threads = multiprocessing.cpu_count() if multiprocessing else 1\n+cpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing.get_start_method() != 'spawn' else None\n \n # Are we building on ReadTheDocs?\n on_rtd = os.environ.get('READTHEDOCS')\n@@ -236,7 +236,7 @@\n )\n if not is_win32:\n # compile .pyx extensions to .c in parallel, does not work on windows\n- cython_opts['nthreads'] = cpu_threads + 1\n+ cython_opts['nthreads'] = cpu_threads\n \n # generate C code from Cython for ALL supported platforms, so we have them in the sdist.\n # the sdist does not require Cython at install time, so we need all as C.\n", "issue": "cythonize crashes on macOS with py39\nFound this issue on the macOS vagrant machine after upgrading python to 3.9.1:\r\n\r\nhttps://github.com/cython/cython/issues/3262\r\n\n", "before_files": [{"content": "# borgbackup - main setup code (see also other setup_*.py files)\n\nimport os\nimport sys\nfrom collections import defaultdict\nfrom glob import glob\n\ntry:\n import multiprocessing\nexcept ImportError:\n multiprocessing = None\n\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.sdist import sdist\nfrom distutils.command.clean import clean\n\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\nimport setup_checksums\nimport setup_compress\nimport setup_crypto\nimport setup_docs\n\nis_win32 = sys.platform.startswith('win32')\n\n# How the build process finds the system libs / uses the bundled code:\n#\n# 1. it will try to use (system) libs (see 1.1. and 1.2.),\n# except if you use these env vars to force using the bundled code:\n# BORG_USE_BUNDLED_XXX undefined --> try using system lib\n# BORG_USE_BUNDLED_XXX=YES --> use the bundled code\n# Note: do not use =NO, that is not supported!\n# 1.1. if BORG_LIBXXX_PREFIX is set, it will use headers and libs from there.\n# 1.2. if not and pkg-config can locate the lib, the lib located by\n# pkg-config will be used. We use the pkg-config tool via the pkgconfig\n# python package, which must be installed before invoking setup.py.\n# if pkgconfig is not installed, this step is skipped.\n# 2. if no system lib could be located via 1.1. or 1.2., it will fall back\n# to using the bundled code.\n\n# OpenSSL is required as a (system) lib in any case as we do not bundle it.\n# Thus, only step 1.1. and 1.2. apply to openssl (but not 1. and 2.).\n# needed: openssl >=1.0.2 or >=1.1.0 (or compatible)\nsystem_prefix_openssl = os.environ.get('BORG_OPENSSL_PREFIX')\n\n# needed: blake2 (>= 0.98.1)\nprefer_system_libb2 = not bool(os.environ.get('BORG_USE_BUNDLED_B2'))\nsystem_prefix_libb2 = os.environ.get('BORG_LIBB2_PREFIX')\n\n# needed: lz4 (>= 1.7.0 / r129)\nprefer_system_liblz4 = not bool(os.environ.get('BORG_USE_BUNDLED_LZ4'))\nsystem_prefix_liblz4 = os.environ.get('BORG_LIBLZ4_PREFIX')\n\n# needed: zstd (>= 1.3.0)\nprefer_system_libzstd = not bool(os.environ.get('BORG_USE_BUNDLED_ZSTD'))\nsystem_prefix_libzstd = os.environ.get('BORG_LIBZSTD_PREFIX')\n\nprefer_system_libxxhash = not bool(os.environ.get('BORG_USE_BUNDLED_XXHASH'))\nsystem_prefix_libxxhash = os.environ.get('BORG_LIBXXHASH_PREFIX')\n\n# Number of threads to use for cythonize, not used on windows\ncpu_threads = multiprocessing.cpu_count() if multiprocessing else 1\n\n# Are we building on ReadTheDocs?\non_rtd = os.environ.get('READTHEDOCS')\n\ninstall_requires = [\n # we are rather picky about msgpack versions, because a good working msgpack is\n # very important for borg, see: https://github.com/borgbackup/borg/issues/3753\n 'msgpack >=0.5.6, <=1.0.0',\n # Please note:\n # using any other version is not supported by borg development and\n # any feedback related to issues caused by this will be ignored.\n]\n\n# note for package maintainers: if you package borgbackup for distribution,\n# please (if available) add pyfuse3 (preferably) or llfuse (not maintained any more)\n# as a *requirement*. \"borg mount\" needs one of them to work.\n# if neither is available, do not require it, most of borgbackup will work.\nextras_require = {\n 'llfuse': [\n 'llfuse >= 1.3.8',\n ],\n 'pyfuse3': [\n 'pyfuse3 >= 3.1.1',\n ],\n 'nofuse': [],\n}\n\ncompress_source = 'src/borg/compress.pyx'\ncrypto_ll_source = 'src/borg/crypto/low_level.pyx'\ncrypto_helpers = 'src/borg/crypto/_crypto_helpers.c'\nchunker_source = 'src/borg/chunker.pyx'\nhashindex_source = 'src/borg/hashindex.pyx'\nitem_source = 'src/borg/item.pyx'\nchecksums_source = 'src/borg/algorithms/checksums.pyx'\nplatform_posix_source = 'src/borg/platform/posix.pyx'\nplatform_linux_source = 'src/borg/platform/linux.pyx'\nplatform_syncfilerange_source = 'src/borg/platform/syncfilerange.pyx'\nplatform_darwin_source = 'src/borg/platform/darwin.pyx'\nplatform_freebsd_source = 'src/borg/platform/freebsd.pyx'\nplatform_windows_source = 'src/borg/platform/windows.pyx'\n\ncython_sources = [\n compress_source,\n crypto_ll_source,\n chunker_source,\n hashindex_source,\n item_source,\n checksums_source,\n\n platform_posix_source,\n platform_linux_source,\n platform_syncfilerange_source,\n platform_freebsd_source,\n platform_darwin_source,\n platform_windows_source,\n]\n\nif cythonize:\n Sdist = sdist\nelse:\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n raise Exception('Cython is required to run sdist')\n\n cython_c_files = [fn.replace('.pyx', '.c') for fn in cython_sources]\n if not on_rtd and not all(os.path.exists(path) for path in cython_c_files):\n raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')\n\n\ndef rm(file):\n try:\n os.unlink(file)\n print('rm', file)\n except FileNotFoundError:\n pass\n\n\nclass Clean(clean):\n def run(self):\n super().run()\n for source in cython_sources:\n genc = source.replace('.pyx', '.c')\n rm(genc)\n compiled_glob = source.replace('.pyx', '.cpython*')\n for compiled in sorted(glob(compiled_glob)):\n rm(compiled)\n\n\ncmdclass = {\n 'build_ext': build_ext,\n 'build_usage': setup_docs.build_usage,\n 'build_man': setup_docs.build_man,\n 'sdist': Sdist,\n 'clean': Clean,\n}\n\next_modules = []\nif not on_rtd:\n\n def members_appended(*ds):\n result = defaultdict(list)\n for d in ds:\n for k, v in d.items():\n assert isinstance(v, list)\n result[k].extend(v)\n return result\n\n try:\n import pkgconfig as pc\n except ImportError:\n print('Warning: can not import pkgconfig python package.')\n pc = None\n\n crypto_ext_kwargs = members_appended(\n dict(sources=[crypto_ll_source, crypto_helpers]),\n setup_crypto.crypto_ext_kwargs(pc, system_prefix_openssl),\n setup_crypto.b2_ext_kwargs(pc, prefer_system_libb2, system_prefix_libb2),\n )\n\n compress_ext_kwargs = members_appended(\n dict(sources=[compress_source]),\n setup_compress.lz4_ext_kwargs(pc, prefer_system_liblz4, system_prefix_liblz4),\n setup_compress.zstd_ext_kwargs(pc, prefer_system_libzstd, system_prefix_libzstd,\n multithreaded=False, legacy=False),\n )\n\n checksums_ext_kwargs = members_appended(\n dict(sources=[checksums_source]),\n setup_checksums.xxhash_ext_kwargs(pc, prefer_system_libxxhash, system_prefix_libxxhash),\n )\n\n ext_modules += [\n Extension('borg.crypto.low_level', **crypto_ext_kwargs),\n Extension('borg.compress', **compress_ext_kwargs),\n Extension('borg.hashindex', [hashindex_source]),\n Extension('borg.item', [item_source]),\n Extension('borg.chunker', [chunker_source]),\n Extension('borg.algorithms.checksums', **checksums_ext_kwargs),\n ]\n\n posix_ext = Extension('borg.platform.posix', [platform_posix_source])\n linux_ext = Extension('borg.platform.linux', [platform_linux_source], libraries=['acl'])\n syncfilerange_ext = Extension('borg.platform.syncfilerange', [platform_syncfilerange_source])\n freebsd_ext = Extension('borg.platform.freebsd', [platform_freebsd_source])\n darwin_ext = Extension('borg.platform.darwin', [platform_darwin_source])\n windows_ext = Extension('borg.platform.windows', [platform_windows_source])\n\n if not is_win32:\n ext_modules.append(posix_ext)\n else:\n ext_modules.append(windows_ext)\n if sys.platform == 'linux':\n ext_modules.append(linux_ext)\n ext_modules.append(syncfilerange_ext)\n elif sys.platform.startswith('freebsd'):\n ext_modules.append(freebsd_ext)\n elif sys.platform == 'darwin':\n ext_modules.append(darwin_ext)\n\n # sometimes there's no need to cythonize\n # this breaks chained commands like 'clean sdist'\n cythonizing = len(sys.argv) > 1 and sys.argv[1] not in ('clean', 'egg_info', '--help-commands', '--version') \\\n and '--help' not in sys.argv[1:]\n\n if cythonize and cythonizing:\n cython_opts = dict(\n # default language_level will be '3str' starting from Cython 3.0.0,\n # but old cython versions (< 0.29) do not know that, thus we use 3 for now.\n compiler_directives={'language_level': 3},\n )\n if not is_win32:\n # compile .pyx extensions to .c in parallel, does not work on windows\n cython_opts['nthreads'] = cpu_threads + 1\n\n # generate C code from Cython for ALL supported platforms, so we have them in the sdist.\n # the sdist does not require Cython at install time, so we need all as C.\n cythonize([posix_ext, linux_ext, syncfilerange_ext, freebsd_ext, darwin_ext, windows_ext], **cython_opts)\n # generate C code from Cython for THIS platform (and for all platform-independent Cython parts).\n ext_modules = cythonize(ext_modules, **cython_opts)\n\n\nsetup(\n name='borgbackup',\n use_scm_version={\n 'write_to': 'src/borg/_version.py',\n },\n author='The Borg Collective (see AUTHORS file)',\n author_email='borgbackup@python.org',\n url='https://borgbackup.readthedocs.io/',\n description='Deduplicated, encrypted, authenticated and compressed backups',\n long_description=setup_docs.long_desc_from_readme(),\n license='BSD',\n platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: BSD :: OpenBSD',\n 'Operating System :: POSIX :: BSD :: NetBSD',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security :: Cryptography',\n 'Topic :: System :: Archiving :: Backup',\n ],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'borg = borg.archiver:main',\n 'borgfs = borg.archiver:main',\n ]\n },\n # See also the MANIFEST.in file.\n # We want to install all the files in the package directories...\n include_package_data=True,\n # ...except the source files which have been compiled (C extensions):\n exclude_package_data={\n '': ['*.c', '*.h', '*.pyx', ],\n },\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n setup_requires=['setuptools_scm>=1.7'],\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}], "after_files": [{"content": "# borgbackup - main setup code (see also other setup_*.py files)\n\nimport os\nimport sys\nfrom collections import defaultdict\nfrom glob import glob\n\ntry:\n import multiprocessing\nexcept ImportError:\n multiprocessing = None\n\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.sdist import sdist\nfrom distutils.command.clean import clean\n\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\nimport setup_checksums\nimport setup_compress\nimport setup_crypto\nimport setup_docs\n\nis_win32 = sys.platform.startswith('win32')\n\n# How the build process finds the system libs / uses the bundled code:\n#\n# 1. it will try to use (system) libs (see 1.1. and 1.2.),\n# except if you use these env vars to force using the bundled code:\n# BORG_USE_BUNDLED_XXX undefined --> try using system lib\n# BORG_USE_BUNDLED_XXX=YES --> use the bundled code\n# Note: do not use =NO, that is not supported!\n# 1.1. if BORG_LIBXXX_PREFIX is set, it will use headers and libs from there.\n# 1.2. if not and pkg-config can locate the lib, the lib located by\n# pkg-config will be used. We use the pkg-config tool via the pkgconfig\n# python package, which must be installed before invoking setup.py.\n# if pkgconfig is not installed, this step is skipped.\n# 2. if no system lib could be located via 1.1. or 1.2., it will fall back\n# to using the bundled code.\n\n# OpenSSL is required as a (system) lib in any case as we do not bundle it.\n# Thus, only step 1.1. and 1.2. apply to openssl (but not 1. and 2.).\n# needed: openssl >=1.0.2 or >=1.1.0 (or compatible)\nsystem_prefix_openssl = os.environ.get('BORG_OPENSSL_PREFIX')\n\n# needed: blake2 (>= 0.98.1)\nprefer_system_libb2 = not bool(os.environ.get('BORG_USE_BUNDLED_B2'))\nsystem_prefix_libb2 = os.environ.get('BORG_LIBB2_PREFIX')\n\n# needed: lz4 (>= 1.7.0 / r129)\nprefer_system_liblz4 = not bool(os.environ.get('BORG_USE_BUNDLED_LZ4'))\nsystem_prefix_liblz4 = os.environ.get('BORG_LIBLZ4_PREFIX')\n\n# needed: zstd (>= 1.3.0)\nprefer_system_libzstd = not bool(os.environ.get('BORG_USE_BUNDLED_ZSTD'))\nsystem_prefix_libzstd = os.environ.get('BORG_LIBZSTD_PREFIX')\n\nprefer_system_libxxhash = not bool(os.environ.get('BORG_USE_BUNDLED_XXHASH'))\nsystem_prefix_libxxhash = os.environ.get('BORG_LIBXXHASH_PREFIX')\n\n# Number of threads to use for cythonize, not used on windows\ncpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing.get_start_method() != 'spawn' else None\n\n# Are we building on ReadTheDocs?\non_rtd = os.environ.get('READTHEDOCS')\n\ninstall_requires = [\n # we are rather picky about msgpack versions, because a good working msgpack is\n # very important for borg, see: https://github.com/borgbackup/borg/issues/3753\n 'msgpack >=0.5.6, <=1.0.0',\n # Please note:\n # using any other version is not supported by borg development and\n # any feedback related to issues caused by this will be ignored.\n]\n\n# note for package maintainers: if you package borgbackup for distribution,\n# please (if available) add pyfuse3 (preferably) or llfuse (not maintained any more)\n# as a *requirement*. \"borg mount\" needs one of them to work.\n# if neither is available, do not require it, most of borgbackup will work.\nextras_require = {\n 'llfuse': [\n 'llfuse >= 1.3.8',\n ],\n 'pyfuse3': [\n 'pyfuse3 >= 3.1.1',\n ],\n 'nofuse': [],\n}\n\ncompress_source = 'src/borg/compress.pyx'\ncrypto_ll_source = 'src/borg/crypto/low_level.pyx'\ncrypto_helpers = 'src/borg/crypto/_crypto_helpers.c'\nchunker_source = 'src/borg/chunker.pyx'\nhashindex_source = 'src/borg/hashindex.pyx'\nitem_source = 'src/borg/item.pyx'\nchecksums_source = 'src/borg/algorithms/checksums.pyx'\nplatform_posix_source = 'src/borg/platform/posix.pyx'\nplatform_linux_source = 'src/borg/platform/linux.pyx'\nplatform_syncfilerange_source = 'src/borg/platform/syncfilerange.pyx'\nplatform_darwin_source = 'src/borg/platform/darwin.pyx'\nplatform_freebsd_source = 'src/borg/platform/freebsd.pyx'\nplatform_windows_source = 'src/borg/platform/windows.pyx'\n\ncython_sources = [\n compress_source,\n crypto_ll_source,\n chunker_source,\n hashindex_source,\n item_source,\n checksums_source,\n\n platform_posix_source,\n platform_linux_source,\n platform_syncfilerange_source,\n platform_freebsd_source,\n platform_darwin_source,\n platform_windows_source,\n]\n\nif cythonize:\n Sdist = sdist\nelse:\n class Sdist(sdist):\n def __init__(self, *args, **kwargs):\n raise Exception('Cython is required to run sdist')\n\n cython_c_files = [fn.replace('.pyx', '.c') for fn in cython_sources]\n if not on_rtd and not all(os.path.exists(path) for path in cython_c_files):\n raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')\n\n\ndef rm(file):\n try:\n os.unlink(file)\n print('rm', file)\n except FileNotFoundError:\n pass\n\n\nclass Clean(clean):\n def run(self):\n super().run()\n for source in cython_sources:\n genc = source.replace('.pyx', '.c')\n rm(genc)\n compiled_glob = source.replace('.pyx', '.cpython*')\n for compiled in sorted(glob(compiled_glob)):\n rm(compiled)\n\n\ncmdclass = {\n 'build_ext': build_ext,\n 'build_usage': setup_docs.build_usage,\n 'build_man': setup_docs.build_man,\n 'sdist': Sdist,\n 'clean': Clean,\n}\n\next_modules = []\nif not on_rtd:\n\n def members_appended(*ds):\n result = defaultdict(list)\n for d in ds:\n for k, v in d.items():\n assert isinstance(v, list)\n result[k].extend(v)\n return result\n\n try:\n import pkgconfig as pc\n except ImportError:\n print('Warning: can not import pkgconfig python package.')\n pc = None\n\n crypto_ext_kwargs = members_appended(\n dict(sources=[crypto_ll_source, crypto_helpers]),\n setup_crypto.crypto_ext_kwargs(pc, system_prefix_openssl),\n setup_crypto.b2_ext_kwargs(pc, prefer_system_libb2, system_prefix_libb2),\n )\n\n compress_ext_kwargs = members_appended(\n dict(sources=[compress_source]),\n setup_compress.lz4_ext_kwargs(pc, prefer_system_liblz4, system_prefix_liblz4),\n setup_compress.zstd_ext_kwargs(pc, prefer_system_libzstd, system_prefix_libzstd,\n multithreaded=False, legacy=False),\n )\n\n checksums_ext_kwargs = members_appended(\n dict(sources=[checksums_source]),\n setup_checksums.xxhash_ext_kwargs(pc, prefer_system_libxxhash, system_prefix_libxxhash),\n )\n\n ext_modules += [\n Extension('borg.crypto.low_level', **crypto_ext_kwargs),\n Extension('borg.compress', **compress_ext_kwargs),\n Extension('borg.hashindex', [hashindex_source]),\n Extension('borg.item', [item_source]),\n Extension('borg.chunker', [chunker_source]),\n Extension('borg.algorithms.checksums', **checksums_ext_kwargs),\n ]\n\n posix_ext = Extension('borg.platform.posix', [platform_posix_source])\n linux_ext = Extension('borg.platform.linux', [platform_linux_source], libraries=['acl'])\n syncfilerange_ext = Extension('borg.platform.syncfilerange', [platform_syncfilerange_source])\n freebsd_ext = Extension('borg.platform.freebsd', [platform_freebsd_source])\n darwin_ext = Extension('borg.platform.darwin', [platform_darwin_source])\n windows_ext = Extension('borg.platform.windows', [platform_windows_source])\n\n if not is_win32:\n ext_modules.append(posix_ext)\n else:\n ext_modules.append(windows_ext)\n if sys.platform == 'linux':\n ext_modules.append(linux_ext)\n ext_modules.append(syncfilerange_ext)\n elif sys.platform.startswith('freebsd'):\n ext_modules.append(freebsd_ext)\n elif sys.platform == 'darwin':\n ext_modules.append(darwin_ext)\n\n # sometimes there's no need to cythonize\n # this breaks chained commands like 'clean sdist'\n cythonizing = len(sys.argv) > 1 and sys.argv[1] not in ('clean', 'egg_info', '--help-commands', '--version') \\\n and '--help' not in sys.argv[1:]\n\n if cythonize and cythonizing:\n cython_opts = dict(\n # default language_level will be '3str' starting from Cython 3.0.0,\n # but old cython versions (< 0.29) do not know that, thus we use 3 for now.\n compiler_directives={'language_level': 3},\n )\n if not is_win32:\n # compile .pyx extensions to .c in parallel, does not work on windows\n cython_opts['nthreads'] = cpu_threads\n\n # generate C code from Cython for ALL supported platforms, so we have them in the sdist.\n # the sdist does not require Cython at install time, so we need all as C.\n cythonize([posix_ext, linux_ext, syncfilerange_ext, freebsd_ext, darwin_ext, windows_ext], **cython_opts)\n # generate C code from Cython for THIS platform (and for all platform-independent Cython parts).\n ext_modules = cythonize(ext_modules, **cython_opts)\n\n\nsetup(\n name='borgbackup',\n use_scm_version={\n 'write_to': 'src/borg/_version.py',\n },\n author='The Borg Collective (see AUTHORS file)',\n author_email='borgbackup@python.org',\n url='https://borgbackup.readthedocs.io/',\n description='Deduplicated, encrypted, authenticated and compressed backups',\n long_description=setup_docs.long_desc_from_readme(),\n license='BSD',\n platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: BSD :: OpenBSD',\n 'Operating System :: POSIX :: BSD :: NetBSD',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security :: Cryptography',\n 'Topic :: System :: Archiving :: Backup',\n ],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'borg = borg.archiver:main',\n 'borgfs = borg.archiver:main',\n ]\n },\n # See also the MANIFEST.in file.\n # We want to install all the files in the package directories...\n include_package_data=True,\n # ...except the source files which have been compiled (C extensions):\n exclude_package_data={\n '': ['*.c', '*.h', '*.pyx', ],\n },\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n setup_requires=['setuptools_scm>=1.7'],\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]}
3,916
240
gh_patches_debug_3412
rasdani/github-patches
git_diff
dynaconf__dynaconf-767
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [bug] filter_strategy config not working **Describe the bug** It seems that the `filter_strategy` config (which by the way is misspelled int the [docs](https://github.com/rochacbruno/dynaconf/blob/master/docs/configuration.md)) is not taken into account when used in the `Dynaconf` constructor. **To Reproduce** Steps to reproduce the behavior: 1. Having the following folder structure Just a plain python script. 2. Having the following config files: No config file, just using env variables 4. Having the following app code: <details> <summary> Code </summary> **/test/test.py** ```python import dynaconf class CustomFilter: def __call__(self, data): print("this is never called") return { k: v for k, v in data.items() if k.startswith("PREFIX") } if __name__ == "__main__": dc = dynaconf.Dynaconf( envvar_prefix=False, filter_strategy=CustomFilter(), ) print(dc.as_dict()) ``` </details> 5. Executing under the following environment <details> <summary> Execution </summary> ```bash PREFIX_VAR="HELLO" OTHER_VAR="WORLD" python test.py ``` </details> **Expected behavior** `CustomFilter` should be called ("this is never called" should be displayed) and only the `PREFIX_VAR` should be in dict, not `OTHER_VAR` **Environment (please complete the following information):** - OS: Linux version 5.10.60.1-microsoft-standard-WSL2 - Dynaconf Version 3.1.9 - Framework: None **Context** I was looking for a way to filter out empty environment variables. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dynaconf/loaders/env_loader.py` Content: ``` 1 from __future__ import annotations 2 3 from os import environ 4 5 from dynaconf.utils import missing 6 from dynaconf.utils import upperfy 7 from dynaconf.utils.parse_conf import parse_conf_data 8 from dynaconf.vendor.dotenv import cli as dotenv_cli 9 10 11 IDENTIFIER = "env" 12 13 14 def load(obj, env=None, silent=True, key=None): 15 """Loads envvars with prefixes: 16 17 `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_` 18 """ 19 global_prefix = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") 20 if global_prefix is False or global_prefix.upper() != "DYNACONF": 21 load_from_env(obj, "DYNACONF", key, silent, IDENTIFIER + "_global") 22 23 # Load the global env if exists and overwrite everything 24 load_from_env(obj, global_prefix, key, silent, IDENTIFIER + "_global") 25 26 27 def load_from_env( 28 obj, 29 prefix=False, 30 key=None, 31 silent=False, 32 identifier=IDENTIFIER, 33 env=False, # backwards compatibility bc renamed param 34 ): 35 if prefix is False and env is not False: 36 prefix = env 37 38 env_ = "" 39 if prefix is not False: 40 if not isinstance(prefix, str): 41 raise TypeError("`prefix/env` must be str or False") 42 43 prefix = prefix.upper() 44 env_ = f"{prefix}_" 45 46 # Load a single environment variable explicitly. 47 if key: 48 key = upperfy(key) 49 value = environ.get(f"{env_}{key}") 50 if value: 51 try: # obj is a Settings 52 obj.set(key, value, loader_identifier=identifier, tomlfy=True) 53 except AttributeError: # obj is a dict 54 obj[key] = parse_conf_data( 55 value, tomlfy=True, box_settings=obj 56 ) 57 58 # Load environment variables in bulk (when matching). 59 else: 60 # Only known variables should be loaded from environment? 61 ignore_unknown = obj.get("IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF") 62 63 trim_len = len(env_) 64 data = { 65 key[trim_len:]: parse_conf_data( 66 data, tomlfy=True, box_settings=obj 67 ) 68 for key, data in environ.items() 69 if key.startswith(env_) 70 and not ( 71 # Ignore environment variables that haven't been 72 # pre-defined in settings space. 73 ignore_unknown 74 and obj.get(key[trim_len:], default=missing) is missing 75 ) 76 } 77 # Update the settings space based on gathered data from environment. 78 if data: 79 obj.update(data, loader_identifier=identifier) 80 81 82 def write(settings_path, settings_data, **kwargs): 83 """Write data to .env file""" 84 for key, value in settings_data.items(): 85 quote_mode = ( 86 isinstance(value, str) 87 and (value.startswith("'") or value.startswith('"')) 88 ) or isinstance(value, (list, dict)) 89 dotenv_cli.set_key( 90 str(settings_path), 91 key, 92 str(value), 93 quote_mode="always" if quote_mode else "none", 94 ) 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dynaconf/loaders/env_loader.py b/dynaconf/loaders/env_loader.py --- a/dynaconf/loaders/env_loader.py +++ b/dynaconf/loaders/env_loader.py @@ -76,6 +76,9 @@ } # Update the settings space based on gathered data from environment. if data: + filter_strategy = obj.get("FILTER_STRATEGY") + if filter_strategy: + data = filter_strategy(data) obj.update(data, loader_identifier=identifier)
{"golden_diff": "diff --git a/dynaconf/loaders/env_loader.py b/dynaconf/loaders/env_loader.py\n--- a/dynaconf/loaders/env_loader.py\n+++ b/dynaconf/loaders/env_loader.py\n@@ -76,6 +76,9 @@\n }\n # Update the settings space based on gathered data from environment.\n if data:\n+ filter_strategy = obj.get(\"FILTER_STRATEGY\")\n+ if filter_strategy:\n+ data = filter_strategy(data)\n obj.update(data, loader_identifier=identifier)\n", "issue": "[bug] filter_strategy config not working\n**Describe the bug**\r\nIt seems that the `filter_strategy` config (which by the way is misspelled int the [docs](https://github.com/rochacbruno/dynaconf/blob/master/docs/configuration.md)) is not taken into account when used in the `Dynaconf` constructor.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Having the following folder structure\r\nJust a plain python script.\r\n\r\n2. Having the following config files:\r\nNo config file, just using env variables\r\n\r\n4. Having the following app code:\r\n\r\n<details>\r\n<summary> Code </summary>\r\n\r\n**/test/test.py**\r\n```python\r\nimport dynaconf\r\n\r\nclass CustomFilter:\r\n def __call__(self, data):\r\n print(\"this is never called\")\r\n return {\r\n k: v\r\n for k, v in data.items()\r\n if k.startswith(\"PREFIX\")\r\n }\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dc = dynaconf.Dynaconf(\r\n envvar_prefix=False,\r\n filter_strategy=CustomFilter(),\r\n )\r\n print(dc.as_dict())\r\n```\r\n\r\n</details>\r\n\r\n5. Executing under the following environment\r\n\r\n<details>\r\n<summary> Execution </summary>\r\n\r\n```bash\r\nPREFIX_VAR=\"HELLO\" OTHER_VAR=\"WORLD\" python test.py\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\n`CustomFilter` should be called (\"this is never called\" should be displayed) and only the `PREFIX_VAR` should be in dict, not `OTHER_VAR`\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux version 5.10.60.1-microsoft-standard-WSL2 \r\n - Dynaconf Version 3.1.9\r\n - Framework: None\r\n\r\n**Context**\r\n\r\nI was looking for a way to filter out empty environment variables.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom os import environ\n\nfrom dynaconf.utils import missing\nfrom dynaconf.utils import upperfy\nfrom dynaconf.utils.parse_conf import parse_conf_data\nfrom dynaconf.vendor.dotenv import cli as dotenv_cli\n\n\nIDENTIFIER = \"env\"\n\n\ndef load(obj, env=None, silent=True, key=None):\n \"\"\"Loads envvars with prefixes:\n\n `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`\n \"\"\"\n global_prefix = obj.get(\"ENVVAR_PREFIX_FOR_DYNACONF\")\n if global_prefix is False or global_prefix.upper() != \"DYNACONF\":\n load_from_env(obj, \"DYNACONF\", key, silent, IDENTIFIER + \"_global\")\n\n # Load the global env if exists and overwrite everything\n load_from_env(obj, global_prefix, key, silent, IDENTIFIER + \"_global\")\n\n\ndef load_from_env(\n obj,\n prefix=False,\n key=None,\n silent=False,\n identifier=IDENTIFIER,\n env=False, # backwards compatibility bc renamed param\n):\n if prefix is False and env is not False:\n prefix = env\n\n env_ = \"\"\n if prefix is not False:\n if not isinstance(prefix, str):\n raise TypeError(\"`prefix/env` must be str or False\")\n\n prefix = prefix.upper()\n env_ = f\"{prefix}_\"\n\n # Load a single environment variable explicitly.\n if key:\n key = upperfy(key)\n value = environ.get(f\"{env_}{key}\")\n if value:\n try: # obj is a Settings\n obj.set(key, value, loader_identifier=identifier, tomlfy=True)\n except AttributeError: # obj is a dict\n obj[key] = parse_conf_data(\n value, tomlfy=True, box_settings=obj\n )\n\n # Load environment variables in bulk (when matching).\n else:\n # Only known variables should be loaded from environment?\n ignore_unknown = obj.get(\"IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF\")\n\n trim_len = len(env_)\n data = {\n key[trim_len:]: parse_conf_data(\n data, tomlfy=True, box_settings=obj\n )\n for key, data in environ.items()\n if key.startswith(env_)\n and not (\n # Ignore environment variables that haven't been\n # pre-defined in settings space.\n ignore_unknown\n and obj.get(key[trim_len:], default=missing) is missing\n )\n }\n # Update the settings space based on gathered data from environment.\n if data:\n obj.update(data, loader_identifier=identifier)\n\n\ndef write(settings_path, settings_data, **kwargs):\n \"\"\"Write data to .env file\"\"\"\n for key, value in settings_data.items():\n quote_mode = (\n isinstance(value, str)\n and (value.startswith(\"'\") or value.startswith('\"'))\n ) or isinstance(value, (list, dict))\n dotenv_cli.set_key(\n str(settings_path),\n key,\n str(value),\n quote_mode=\"always\" if quote_mode else \"none\",\n )\n", "path": "dynaconf/loaders/env_loader.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom os import environ\n\nfrom dynaconf.utils import missing\nfrom dynaconf.utils import upperfy\nfrom dynaconf.utils.parse_conf import parse_conf_data\nfrom dynaconf.vendor.dotenv import cli as dotenv_cli\n\n\nIDENTIFIER = \"env\"\n\n\ndef load(obj, env=None, silent=True, key=None):\n \"\"\"Loads envvars with prefixes:\n\n `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`\n \"\"\"\n global_prefix = obj.get(\"ENVVAR_PREFIX_FOR_DYNACONF\")\n if global_prefix is False or global_prefix.upper() != \"DYNACONF\":\n load_from_env(obj, \"DYNACONF\", key, silent, IDENTIFIER + \"_global\")\n\n # Load the global env if exists and overwrite everything\n load_from_env(obj, global_prefix, key, silent, IDENTIFIER + \"_global\")\n\n\ndef load_from_env(\n obj,\n prefix=False,\n key=None,\n silent=False,\n identifier=IDENTIFIER,\n env=False, # backwards compatibility bc renamed param\n):\n if prefix is False and env is not False:\n prefix = env\n\n env_ = \"\"\n if prefix is not False:\n if not isinstance(prefix, str):\n raise TypeError(\"`prefix/env` must be str or False\")\n\n prefix = prefix.upper()\n env_ = f\"{prefix}_\"\n\n # Load a single environment variable explicitly.\n if key:\n key = upperfy(key)\n value = environ.get(f\"{env_}{key}\")\n if value:\n try: # obj is a Settings\n obj.set(key, value, loader_identifier=identifier, tomlfy=True)\n except AttributeError: # obj is a dict\n obj[key] = parse_conf_data(\n value, tomlfy=True, box_settings=obj\n )\n\n # Load environment variables in bulk (when matching).\n else:\n # Only known variables should be loaded from environment?\n ignore_unknown = obj.get(\"IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF\")\n\n trim_len = len(env_)\n data = {\n key[trim_len:]: parse_conf_data(\n data, tomlfy=True, box_settings=obj\n )\n for key, data in environ.items()\n if key.startswith(env_)\n and not (\n # Ignore environment variables that haven't been\n # pre-defined in settings space.\n ignore_unknown\n and obj.get(key[trim_len:], default=missing) is missing\n )\n }\n # Update the settings space based on gathered data from environment.\n if data:\n filter_strategy = obj.get(\"FILTER_STRATEGY\")\n if filter_strategy:\n data = filter_strategy(data)\n obj.update(data, loader_identifier=identifier)\n\n\ndef write(settings_path, settings_data, **kwargs):\n \"\"\"Write data to .env file\"\"\"\n for key, value in settings_data.items():\n quote_mode = (\n isinstance(value, str)\n and (value.startswith(\"'\") or value.startswith('\"'))\n ) or isinstance(value, (list, dict))\n dotenv_cli.set_key(\n str(settings_path),\n key,\n str(value),\n quote_mode=\"always\" if quote_mode else \"none\",\n )\n", "path": "dynaconf/loaders/env_loader.py"}]}
1,514
111
gh_patches_debug_37526
rasdani/github-patches
git_diff
freqtrade__freqtrade-8273
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- stoploss_from_open has some ambiguous with leverage <!-- Have you searched for similar issues before posting it? If you have discovered a bug in the bot, please [search the issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). If it hasn't been reported, please create a new issue. Please do not use bug reports to request new features. --> ## Describe your environment * Operating system: ____ Linux * Python Version: _____ (`python -V`) 3.10 * CCXT version: _____ (`pip freeze | grep ccxt`) 1.95 * Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker) Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out. ## Describe the problem: In the doc explanation, profit and stoploss is calculated with leverage. And this function `stoploss_from_open` (in freqtrade/strategy/strategy_helper.py) may wrong when enter short with leverage. Because it's profit_rate may large than 1 , and but open_relative_stop still less than 1, and stoploss be always negative, and function return 0.0. But in fact, it has positive profit. *Explain the problem you have encountered* ### Steps to reproduce: 1. _____ 2. _____ 3. _____ ### Observed Results: * What happened? stoploss_from_open may have ambiguous when used leverage * What did you expect to happen? In function dive leverage, or argument explicitly explanates without leverage ### Relevant code exceptions or logs ``` Python def stoploss_from_open( open_relative_stop: float, current_profit: float, is_short: bool = False ) -> float: """ Given the current profit, and a desired stop loss value relative to the open price, return a stop loss value that is relative to the current price, and which can be returned from `custom_stoploss`. The requested stop can be positive for a stop above the open price, or negative for a stop below the open price. The return value is always >= 0. Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price :param open_relative_stop: Desired stop loss percentage relative to open price :param current_profit: The current profit percentage :param is_short: When true, perform the calculation for short instead of long :return: Stop loss value relative to current price """ # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value if (current_profit == -1 and not is_short) or (is_short and current_profit == 1): return 1 if is_short is True: stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit)) else: stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit)) # negative stoploss values indicate the requested stop price is higher/lower # (long/short) than the current price return max(stoploss, 0.0) ``` Note: Please copy/paste text of the messages, no screenshots of logs please. ``` // paste your log here ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `freqtrade/strategy/strategy_helper.py` Content: ``` 1 from typing import Optional 2 3 import pandas as pd 4 5 from freqtrade.exchange import timeframe_to_minutes 6 7 8 def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, 9 timeframe: str, timeframe_inf: str, ffill: bool = True, 10 append_timeframe: bool = True, 11 date_column: str = 'date', 12 suffix: Optional[str] = None) -> pd.DataFrame: 13 """ 14 Correctly merge informative samples to the original dataframe, avoiding lookahead bias. 15 16 Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a 17 1h candle that starts at 15:00 will result in all candles to know the close at 16:00 18 which they should not know. 19 20 Moves the date of the informative pair by 1 time interval forward. 21 This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the 22 last candle that's closed at 15:00, 15:15, 15:30 or 15:45. 23 24 Assuming inf_tf = '1d' - then the resulting columns will be: 25 date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d 26 27 :param dataframe: Original dataframe 28 :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe 29 :param timeframe: Timeframe of the original pair sample. 30 :param timeframe_inf: Timeframe of the informative pair sample. 31 :param ffill: Forwardfill missing values - optional but usually required 32 :param append_timeframe: Rename columns by appending timeframe. 33 :param date_column: A custom date column name. 34 :param suffix: A string suffix to add at the end of the informative columns. If specified, 35 append_timeframe must be false. 36 :return: Merged dataframe 37 :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe 38 """ 39 40 minutes_inf = timeframe_to_minutes(timeframe_inf) 41 minutes = timeframe_to_minutes(timeframe) 42 if minutes == minutes_inf: 43 # No need to forwardshift if the timeframes are identical 44 informative['date_merge'] = informative[date_column] 45 elif minutes < minutes_inf: 46 # Subtract "small" timeframe so merging is not delayed by 1 small candle 47 # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073 48 informative['date_merge'] = ( 49 informative[date_column] + pd.to_timedelta(minutes_inf, 'm') - 50 pd.to_timedelta(minutes, 'm') 51 ) 52 else: 53 raise ValueError("Tried to merge a faster timeframe to a slower timeframe." 54 "This would create new rows, and can throw off your regular indicators.") 55 56 # Rename columns to be unique 57 date_merge = 'date_merge' 58 if suffix and append_timeframe: 59 raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") 60 elif append_timeframe: 61 date_merge = f'date_merge_{timeframe_inf}' 62 informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] 63 64 elif suffix: 65 date_merge = f'date_merge_{suffix}' 66 informative.columns = [f"{col}_{suffix}" for col in informative.columns] 67 68 # Combine the 2 dataframes 69 # all indicators on the informative sample MUST be calculated before this point 70 if ffill: 71 # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging 72 # merge_ordered - ffill method is 2.5x faster than seperate ffill() 73 dataframe = pd.merge_ordered(dataframe, informative, fill_method="ffill", left_on='date', 74 right_on=date_merge, how='left') 75 else: 76 dataframe = pd.merge(dataframe, informative, left_on='date', 77 right_on=date_merge, how='left') 78 dataframe = dataframe.drop(date_merge, axis=1) 79 80 # if ffill: 81 # dataframe = dataframe.ffill() 82 83 return dataframe 84 85 86 def stoploss_from_open( 87 open_relative_stop: float, 88 current_profit: float, 89 is_short: bool = False 90 ) -> float: 91 """ 92 93 Given the current profit, and a desired stop loss value relative to the open price, 94 return a stop loss value that is relative to the current price, and which can be 95 returned from `custom_stoploss`. 96 97 The requested stop can be positive for a stop above the open price, or negative for 98 a stop below the open price. The return value is always >= 0. 99 100 Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price 101 102 :param open_relative_stop: Desired stop loss percentage relative to open price 103 :param current_profit: The current profit percentage 104 :param is_short: When true, perform the calculation for short instead of long 105 :return: Stop loss value relative to current price 106 """ 107 108 # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value 109 if (current_profit == -1 and not is_short) or (is_short and current_profit == 1): 110 return 1 111 112 if is_short is True: 113 stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit)) 114 else: 115 stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit)) 116 117 # negative stoploss values indicate the requested stop price is higher/lower 118 # (long/short) than the current price 119 return max(stoploss, 0.0) 120 121 122 def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False) -> float: 123 """ 124 Given current price and desired stop price, return a stop loss value that is relative to current 125 price. 126 127 The requested stop can be positive for a stop above the open price, or negative for 128 a stop below the open price. The return value is always >= 0. 129 130 Returns 0 if the resulting stop price would be above the current price. 131 132 :param stop_rate: Stop loss price. 133 :param current_rate: Current asset price. 134 :param is_short: When true, perform the calculation for short instead of long 135 :return: Positive stop loss value relative to current price 136 """ 137 138 # formula is undefined for current_rate 0, return maximum value 139 if current_rate == 0: 140 return 1 141 142 stoploss = 1 - (stop_rate / current_rate) 143 if is_short: 144 stoploss = -stoploss 145 146 # negative stoploss values indicate the requested stop price is higher/lower 147 # (long/short) than the current price 148 # shorts can yield stoploss values higher than 1, so limit that as well 149 return max(min(stoploss, 1.0), 0.0) 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py --- a/freqtrade/strategy/strategy_helper.py +++ b/freqtrade/strategy/strategy_helper.py @@ -86,37 +86,41 @@ def stoploss_from_open( open_relative_stop: float, current_profit: float, - is_short: bool = False + is_short: bool = False, + leverage: float = 1.0 ) -> float: """ - - Given the current profit, and a desired stop loss value relative to the open price, + Given the current profit, and a desired stop loss value relative to the trade entry price, return a stop loss value that is relative to the current price, and which can be returned from `custom_stoploss`. The requested stop can be positive for a stop above the open price, or negative for a stop below the open price. The return value is always >= 0. + `open_relative_stop` will be considered as adjusted for leverage if leverage is provided.. Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price - :param open_relative_stop: Desired stop loss percentage relative to open price + :param open_relative_stop: Desired stop loss percentage, relative to the open price, + adjusted for leverage :param current_profit: The current profit percentage :param is_short: When true, perform the calculation for short instead of long + :param leverage: Leverage to use for the calculation :return: Stop loss value relative to current price """ # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value - if (current_profit == -1 and not is_short) or (is_short and current_profit == 1): + _current_profit = current_profit / leverage + if (_current_profit == -1 and not is_short) or (is_short and _current_profit == 1): return 1 if is_short is True: - stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit)) + stoploss = -1 + ((1 - open_relative_stop / leverage) / (1 - _current_profit)) else: - stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit)) + stoploss = 1 - ((1 + open_relative_stop / leverage) / (1 + _current_profit)) # negative stoploss values indicate the requested stop price is higher/lower # (long/short) than the current price - return max(stoploss, 0.0) + return max(stoploss * leverage, 0.0) def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False) -> float:
{"golden_diff": "diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py\n--- a/freqtrade/strategy/strategy_helper.py\n+++ b/freqtrade/strategy/strategy_helper.py\n@@ -86,37 +86,41 @@\n def stoploss_from_open(\n open_relative_stop: float,\n current_profit: float,\n- is_short: bool = False\n+ is_short: bool = False,\n+ leverage: float = 1.0\n ) -> float:\n \"\"\"\n-\n- Given the current profit, and a desired stop loss value relative to the open price,\n+ Given the current profit, and a desired stop loss value relative to the trade entry price,\n return a stop loss value that is relative to the current price, and which can be\n returned from `custom_stoploss`.\n \n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n+ `open_relative_stop` will be considered as adjusted for leverage if leverage is provided..\n \n Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price\n \n- :param open_relative_stop: Desired stop loss percentage relative to open price\n+ :param open_relative_stop: Desired stop loss percentage, relative to the open price,\n+ adjusted for leverage\n :param current_profit: The current profit percentage\n :param is_short: When true, perform the calculation for short instead of long\n+ :param leverage: Leverage to use for the calculation\n :return: Stop loss value relative to current price\n \"\"\"\n \n # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value\n- if (current_profit == -1 and not is_short) or (is_short and current_profit == 1):\n+ _current_profit = current_profit / leverage\n+ if (_current_profit == -1 and not is_short) or (is_short and _current_profit == 1):\n return 1\n \n if is_short is True:\n- stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit))\n+ stoploss = -1 + ((1 - open_relative_stop / leverage) / (1 - _current_profit))\n else:\n- stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit))\n+ stoploss = 1 - ((1 + open_relative_stop / leverage) / (1 + _current_profit))\n \n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n- return max(stoploss, 0.0)\n+ return max(stoploss * leverage, 0.0)\n \n \n def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False) -> float:\n", "issue": "stoploss_from_open has some ambiguous with leverage\n<!-- \r\nHave you searched for similar issues before posting it?\r\n\r\nIf you have discovered a bug in the bot, please [search the issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\nPlease do not use bug reports to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: ____ Linux\r\n * Python Version: _____ (`python -V`) 3.10\r\n * CCXT version: _____ (`pip freeze | grep ccxt`) 1.95\r\n * Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)\r\n \r\nNote: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.\r\n\r\n## Describe the problem:\r\n\r\nIn the doc explanation, profit and stoploss is calculated with leverage.\r\n\r\nAnd this function `stoploss_from_open` (in freqtrade/strategy/strategy_helper.py) may wrong when enter short with leverage. Because it's profit_rate may large than 1 , and but open_relative_stop still less than 1, and stoploss be always negative, and function return 0.0. But in fact, it has positive profit.\r\n\r\n*Explain the problem you have encountered*\r\n\r\n### Steps to reproduce:\r\n\r\n 1. _____\r\n 2. _____\r\n 3. _____\r\n \r\n### Observed Results:\r\n\r\n * What happened? \r\n stoploss_from_open may have ambiguous when used leverage\r\n * What did you expect to happen?\r\n In function dive leverage, or argument explicitly explanates without leverage\r\n### Relevant code exceptions or logs\r\n``` Python\r\n def stoploss_from_open(\r\n open_relative_stop: float,\r\n current_profit: float,\r\n is_short: bool = False\r\n ) -> float:\r\n \"\"\"\r\n Given the current profit, and a desired stop loss value relative to the open price,\r\n return a stop loss value that is relative to the current price, and which can be\r\n returned from `custom_stoploss`.\r\n \r\n The requested stop can be positive for a stop above the open price, or negative for\r\n a stop below the open price. The return value is always >= 0.\r\n \r\n Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price\r\n \r\n :param open_relative_stop: Desired stop loss percentage relative to open price\r\n :param current_profit: The current profit percentage\r\n :param is_short: When true, perform the calculation for short instead of long\r\n :return: Stop loss value relative to current price\r\n \"\"\"\r\n \r\n # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value\r\n if (current_profit == -1 and not is_short) or (is_short and current_profit == 1):\r\n return 1\r\n \r\n if is_short is True:\r\n stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit))\r\n else:\r\n stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit))\r\n \r\n # negative stoploss values indicate the requested stop price is higher/lower\r\n # (long/short) than the current price\r\n return max(stoploss, 0.0)\r\n```\r\nNote: Please copy/paste text of the messages, no screenshots of logs please.\r\n\r\n ```\r\n // paste your log here\r\n ```\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True,\n append_timeframe: bool = True,\n date_column: str = 'date',\n suffix: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n :param append_timeframe: Rename columns by appending timeframe.\n :param date_column: A custom date column name.\n :param suffix: A string suffix to add at the end of the informative columns. If specified,\n append_timeframe must be false.\n :return: Merged dataframe\n :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[date_column]\n elif minutes < minutes_inf:\n # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n informative['date_merge'] = (\n informative[date_column] + pd.to_timedelta(minutes_inf, 'm') -\n pd.to_timedelta(minutes, 'm')\n )\n else:\n raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n \"This would create new rows, and can throw off your regular indicators.\")\n\n # Rename columns to be unique\n date_merge = 'date_merge'\n if suffix and append_timeframe:\n raise ValueError(\"You can not specify `append_timeframe` as True and a `suffix`.\")\n elif append_timeframe:\n date_merge = f'date_merge_{timeframe_inf}'\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n elif suffix:\n date_merge = f'date_merge_{suffix}'\n informative.columns = [f\"{col}_{suffix}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n if ffill:\n # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging\n # merge_ordered - ffill method is 2.5x faster than seperate ffill()\n dataframe = pd.merge_ordered(dataframe, informative, fill_method=\"ffill\", left_on='date',\n right_on=date_merge, how='left')\n else:\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=date_merge, how='left')\n dataframe = dataframe.drop(date_merge, axis=1)\n\n # if ffill:\n # dataframe = dataframe.ffill()\n\n return dataframe\n\n\ndef stoploss_from_open(\n open_relative_stop: float,\n current_profit: float,\n is_short: bool = False\n) -> float:\n \"\"\"\n\n Given the current profit, and a desired stop loss value relative to the open price,\n return a stop loss value that is relative to the current price, and which can be\n returned from `custom_stoploss`.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n\n Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price\n\n :param open_relative_stop: Desired stop loss percentage relative to open price\n :param current_profit: The current profit percentage\n :param is_short: When true, perform the calculation for short instead of long\n :return: Stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value\n if (current_profit == -1 and not is_short) or (is_short and current_profit == 1):\n return 1\n\n if is_short is True:\n stoploss = -1 + ((1 - open_relative_stop) / (1 - current_profit))\n else:\n stoploss = 1 - ((1 + open_relative_stop) / (1 + current_profit))\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n return max(stoploss, 0.0)\n\n\ndef stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False) -> float:\n \"\"\"\n Given current price and desired stop price, return a stop loss value that is relative to current\n price.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n\n Returns 0 if the resulting stop price would be above the current price.\n\n :param stop_rate: Stop loss price.\n :param current_rate: Current asset price.\n :param is_short: When true, perform the calculation for short instead of long\n :return: Positive stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_rate 0, return maximum value\n if current_rate == 0:\n return 1\n\n stoploss = 1 - (stop_rate / current_rate)\n if is_short:\n stoploss = -stoploss\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n # shorts can yield stoploss values higher than 1, so limit that as well\n return max(min(stoploss, 1.0), 0.0)\n", "path": "freqtrade/strategy/strategy_helper.py"}], "after_files": [{"content": "from typing import Optional\n\nimport pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True,\n append_timeframe: bool = True,\n date_column: str = 'date',\n suffix: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n :param append_timeframe: Rename columns by appending timeframe.\n :param date_column: A custom date column name.\n :param suffix: A string suffix to add at the end of the informative columns. If specified,\n append_timeframe must be false.\n :return: Merged dataframe\n :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[date_column]\n elif minutes < minutes_inf:\n # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n informative['date_merge'] = (\n informative[date_column] + pd.to_timedelta(minutes_inf, 'm') -\n pd.to_timedelta(minutes, 'm')\n )\n else:\n raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n \"This would create new rows, and can throw off your regular indicators.\")\n\n # Rename columns to be unique\n date_merge = 'date_merge'\n if suffix and append_timeframe:\n raise ValueError(\"You can not specify `append_timeframe` as True and a `suffix`.\")\n elif append_timeframe:\n date_merge = f'date_merge_{timeframe_inf}'\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n elif suffix:\n date_merge = f'date_merge_{suffix}'\n informative.columns = [f\"{col}_{suffix}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n if ffill:\n # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging\n # merge_ordered - ffill method is 2.5x faster than seperate ffill()\n dataframe = pd.merge_ordered(dataframe, informative, fill_method=\"ffill\", left_on='date',\n right_on=date_merge, how='left')\n else:\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=date_merge, how='left')\n dataframe = dataframe.drop(date_merge, axis=1)\n\n # if ffill:\n # dataframe = dataframe.ffill()\n\n return dataframe\n\n\ndef stoploss_from_open(\n open_relative_stop: float,\n current_profit: float,\n is_short: bool = False,\n leverage: float = 1.0\n) -> float:\n \"\"\"\n Given the current profit, and a desired stop loss value relative to the trade entry price,\n return a stop loss value that is relative to the current price, and which can be\n returned from `custom_stoploss`.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n `open_relative_stop` will be considered as adjusted for leverage if leverage is provided..\n\n Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price\n\n :param open_relative_stop: Desired stop loss percentage, relative to the open price,\n adjusted for leverage\n :param current_profit: The current profit percentage\n :param is_short: When true, perform the calculation for short instead of long\n :param leverage: Leverage to use for the calculation\n :return: Stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value\n _current_profit = current_profit / leverage\n if (_current_profit == -1 and not is_short) or (is_short and _current_profit == 1):\n return 1\n\n if is_short is True:\n stoploss = -1 + ((1 - open_relative_stop / leverage) / (1 - _current_profit))\n else:\n stoploss = 1 - ((1 + open_relative_stop / leverage) / (1 + _current_profit))\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n return max(stoploss * leverage, 0.0)\n\n\ndef stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False) -> float:\n \"\"\"\n Given current price and desired stop price, return a stop loss value that is relative to current\n price.\n\n The requested stop can be positive for a stop above the open price, or negative for\n a stop below the open price. The return value is always >= 0.\n\n Returns 0 if the resulting stop price would be above the current price.\n\n :param stop_rate: Stop loss price.\n :param current_rate: Current asset price.\n :param is_short: When true, perform the calculation for short instead of long\n :return: Positive stop loss value relative to current price\n \"\"\"\n\n # formula is undefined for current_rate 0, return maximum value\n if current_rate == 0:\n return 1\n\n stoploss = 1 - (stop_rate / current_rate)\n if is_short:\n stoploss = -stoploss\n\n # negative stoploss values indicate the requested stop price is higher/lower\n # (long/short) than the current price\n # shorts can yield stoploss values higher than 1, so limit that as well\n return max(min(stoploss, 1.0), 0.0)\n", "path": "freqtrade/strategy/strategy_helper.py"}]}
2,934
642
gh_patches_debug_16832
rasdani/github-patches
git_diff
pantsbuild__pants-20984
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `stevedore_namespace` documentation shows `str`'s doc string **Describe the bug** The `stevedore_namespace` BUILD file symbol has a doc-string, but it isn't shown in `pants help-all`. It instead shows what looks like the doc string for `str`. https://github.com/pantsbuild/pants/blob/ec86d19cd954cd49a9562880a7c0dbc45632778c/src/python/pants/backend/python/framework/stevedore/target_types.py#L13-L30 To reproduce, enable the stevedore backend and look at `help` or `help-all`: ```shell PANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help stevedore_namespace ``` ``` `stevedore_namespace` BUILD file symbol --------------------------------------- str(object='') -> str str(bytes_or_buffer[, encoding[, errors]]) -> str Create a new string object from the given object. If encoding or errors is specified, then the object must expose a data buffer that will be decoded using the given encoding and error handler. Otherwise, returns the result of object.__str__() (if defined) or repr(object). encoding defaults to sys.getdefaultencoding(). errors defaults to 'strict'. ``` ```shell PANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help-all | \ jq .name_to_build_file_info.stevedore_namespace ``` ```json { "documentation": "str(object='') -> str\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\n\nCreate a new string object from the given object. If encoding or\nerrors is specified, then the object must expose a data buffer\nthat will be decoded using the given encoding and error handler.\nOtherwise, returns the result of object.__str__() (if defined)\nor repr(object).\nencoding defaults to sys.getdefaultencoding().\nerrors defaults to 'strict'.", "is_target": false, "name": "stevedore_namespace", "signature": null } ``` **Pants version** Seems to be visible in 2.16 through to the currently latest. **OS** both **Additional info** - Will appear in online docs too after https://github.com/pantsbuild/pantsbuild.org/pull/216 - Relevant issues: - https://github.com/pantsbuild/pants/discussions/18117 - https://github.com/pantsbuild/pants/issues/14832 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/python/framework/stevedore/target_types.py` Content: ``` 1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import annotations 5 6 from dataclasses import dataclass 7 8 from pants.backend.python.target_types import PythonDistribution 9 from pants.engine.target import StringSequenceField, Targets 10 from pants.util.strutil import help_text 11 12 13 class StevedoreNamespace(str): 14 f"""Tag a namespace in entry_points as a stevedore namespace. 15 16 This is required for the entry_point to be visible to dep inference 17 based on the `stevedore_namespaces` field. 18 19 For example: 20 {PythonDistribution.alias}( 21 ... 22 entry_points={{ 23 stevedore_namespace("a.b.c"): {{ 24 "plugin_name": "some.entry:point", 25 }}, 26 }}, 27 ) 28 """ 29 30 alias = "stevedore_namespace" 31 32 33 # This is a lot like a SpecialCasedDependencies field, but it doesn't list targets directly. 34 class StevedoreNamespacesField(StringSequenceField): 35 alias = "stevedore_namespaces" 36 help = help_text( 37 f""" 38 List the stevedore namespaces required by this target. 39 40 Code for all `entry_points` on `{PythonDistribution.alias}` targets with 41 these namespaces will be added as dependencies so that they are 42 available on PYTHONPATH during tests. Note that this is only a subset 43 of the `{PythonDistribution.alias}`s dependencies, so the `entry_points` 44 only need to be defined on one `{PythonDistribution.alias}` even if the 45 test only needs some of the `entry_points` namespaces on it. 46 47 Plus, an `entry_points.txt` file will be generated in the sandbox so that 48 each of the `{PythonDistribution.alias}`s appear to be "installed". The 49 `entry_points.txt` file will only include the namespaces requested on this 50 field. Without this, stevedore would not be able to look up plugins in 51 the setuptools `entry_points` metadata. 52 53 NOTE: Each `{PythonDistribution.alias}` must opt-in to being included in 54 this repo-wide inference by tagging the namespaces with 55 `{StevedoreNamespace.alias}("my.stevedore.extension")`. 56 57 The stevedore namespace format (`my.stevedore.extension`) is similar 58 to a Python namespace. 59 """ 60 ) 61 62 63 class AllStevedoreExtensionTargets(Targets): 64 pass 65 66 67 @dataclass(frozen=True) 68 class StevedoreNamespacesProviderTargetsRequest: 69 stevedore_namespaces: StevedoreNamespacesField 70 71 72 class StevedoreExtensionTargets(Targets): 73 pass 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/python/framework/stevedore/target_types.py b/src/python/pants/backend/python/framework/stevedore/target_types.py --- a/src/python/pants/backend/python/framework/stevedore/target_types.py +++ b/src/python/pants/backend/python/framework/stevedore/target_types.py @@ -11,20 +11,22 @@ class StevedoreNamespace(str): - f"""Tag a namespace in entry_points as a stevedore namespace. + """Tag a namespace in entry_points as a stevedore namespace. This is required for the entry_point to be visible to dep inference based on the `stevedore_namespaces` field. For example: - {PythonDistribution.alias}( - ... - entry_points={{ - stevedore_namespace("a.b.c"): {{ - "plugin_name": "some.entry:point", - }}, - }}, - ) + ```python + python_distribution( + ... + entry_points={ + stevedore_namespace("a.b.c"): { + "plugin_name": "some.entry:point", + }, + }, + ) + ``` """ alias = "stevedore_namespace"
{"golden_diff": "diff --git a/src/python/pants/backend/python/framework/stevedore/target_types.py b/src/python/pants/backend/python/framework/stevedore/target_types.py\n--- a/src/python/pants/backend/python/framework/stevedore/target_types.py\n+++ b/src/python/pants/backend/python/framework/stevedore/target_types.py\n@@ -11,20 +11,22 @@\n \n \n class StevedoreNamespace(str):\n- f\"\"\"Tag a namespace in entry_points as a stevedore namespace.\n+ \"\"\"Tag a namespace in entry_points as a stevedore namespace.\n \n This is required for the entry_point to be visible to dep inference\n based on the `stevedore_namespaces` field.\n \n For example:\n- {PythonDistribution.alias}(\n- ...\n- entry_points={{\n- stevedore_namespace(\"a.b.c\"): {{\n- \"plugin_name\": \"some.entry:point\",\n- }},\n- }},\n- )\n+ ```python\n+ python_distribution(\n+ ...\n+ entry_points={\n+ stevedore_namespace(\"a.b.c\"): {\n+ \"plugin_name\": \"some.entry:point\",\n+ },\n+ },\n+ )\n+ ```\n \"\"\"\n \n alias = \"stevedore_namespace\"\n", "issue": "`stevedore_namespace` documentation shows `str`'s doc string\n**Describe the bug**\r\n\r\nThe `stevedore_namespace` BUILD file symbol has a doc-string, but it isn't shown in `pants help-all`. It instead shows what looks like the doc string for `str`.\r\n\r\nhttps://github.com/pantsbuild/pants/blob/ec86d19cd954cd49a9562880a7c0dbc45632778c/src/python/pants/backend/python/framework/stevedore/target_types.py#L13-L30\r\n\r\nTo reproduce, enable the stevedore backend and look at `help` or `help-all`:\r\n\r\n```shell\r\nPANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help stevedore_namespace\r\n```\r\n```\r\n`stevedore_namespace` BUILD file symbol\r\n---------------------------------------\r\n\r\nstr(object='') -> str\r\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\r\n\r\nCreate a new string object from the given object. If encoding or\r\nerrors is specified, then the object must expose a data buffer\r\nthat will be decoded using the given encoding and error handler.\r\nOtherwise, returns the result of object.__str__() (if defined)\r\nor repr(object).\r\nencoding defaults to sys.getdefaultencoding().\r\nerrors defaults to 'strict'.\r\n```\r\n\r\n```shell\r\nPANTS_VERSION=2.22.0.dev3 pants --backend-packages=pants.backend.experimental.python.framework.stevedore help-all | \\\r\n jq .name_to_build_file_info.stevedore_namespace\r\n```\r\n```json\r\n{\r\n \"documentation\": \"str(object='') -> str\\nstr(bytes_or_buffer[, encoding[, errors]]) -> str\\n\\nCreate a new string object from the given object. If encoding or\\nerrors is specified, then the object must expose a data buffer\\nthat will be decoded using the given encoding and error handler.\\nOtherwise, returns the result of object.__str__() (if defined)\\nor repr(object).\\nencoding defaults to sys.getdefaultencoding().\\nerrors defaults to 'strict'.\",\r\n \"is_target\": false,\r\n \"name\": \"stevedore_namespace\",\r\n \"signature\": null\r\n}\r\n```\r\n\r\n**Pants version**\r\nSeems to be visible in 2.16 through to the currently latest.\r\n\r\n**OS**\r\nboth\r\n\r\n**Additional info**\r\n\r\n- Will appear in online docs too after https://github.com/pantsbuild/pantsbuild.org/pull/216\r\n- Relevant issues:\r\n - https://github.com/pantsbuild/pants/discussions/18117\r\n - https://github.com/pantsbuild/pants/issues/14832\n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.backend.python.target_types import PythonDistribution\nfrom pants.engine.target import StringSequenceField, Targets\nfrom pants.util.strutil import help_text\n\n\nclass StevedoreNamespace(str):\n f\"\"\"Tag a namespace in entry_points as a stevedore namespace.\n\n This is required for the entry_point to be visible to dep inference\n based on the `stevedore_namespaces` field.\n\n For example:\n {PythonDistribution.alias}(\n ...\n entry_points={{\n stevedore_namespace(\"a.b.c\"): {{\n \"plugin_name\": \"some.entry:point\",\n }},\n }},\n )\n \"\"\"\n\n alias = \"stevedore_namespace\"\n\n\n# This is a lot like a SpecialCasedDependencies field, but it doesn't list targets directly.\nclass StevedoreNamespacesField(StringSequenceField):\n alias = \"stevedore_namespaces\"\n help = help_text(\n f\"\"\"\n List the stevedore namespaces required by this target.\n\n Code for all `entry_points` on `{PythonDistribution.alias}` targets with\n these namespaces will be added as dependencies so that they are\n available on PYTHONPATH during tests. Note that this is only a subset\n of the `{PythonDistribution.alias}`s dependencies, so the `entry_points`\n only need to be defined on one `{PythonDistribution.alias}` even if the\n test only needs some of the `entry_points` namespaces on it.\n\n Plus, an `entry_points.txt` file will be generated in the sandbox so that\n each of the `{PythonDistribution.alias}`s appear to be \"installed\". The\n `entry_points.txt` file will only include the namespaces requested on this\n field. Without this, stevedore would not be able to look up plugins in\n the setuptools `entry_points` metadata.\n\n NOTE: Each `{PythonDistribution.alias}` must opt-in to being included in\n this repo-wide inference by tagging the namespaces with\n `{StevedoreNamespace.alias}(\"my.stevedore.extension\")`.\n\n The stevedore namespace format (`my.stevedore.extension`) is similar\n to a Python namespace.\n \"\"\"\n )\n\n\nclass AllStevedoreExtensionTargets(Targets):\n pass\n\n\n@dataclass(frozen=True)\nclass StevedoreNamespacesProviderTargetsRequest:\n stevedore_namespaces: StevedoreNamespacesField\n\n\nclass StevedoreExtensionTargets(Targets):\n pass\n", "path": "src/python/pants/backend/python/framework/stevedore/target_types.py"}], "after_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.backend.python.target_types import PythonDistribution\nfrom pants.engine.target import StringSequenceField, Targets\nfrom pants.util.strutil import help_text\n\n\nclass StevedoreNamespace(str):\n \"\"\"Tag a namespace in entry_points as a stevedore namespace.\n\n This is required for the entry_point to be visible to dep inference\n based on the `stevedore_namespaces` field.\n\n For example:\n ```python\n python_distribution(\n ...\n entry_points={\n stevedore_namespace(\"a.b.c\"): {\n \"plugin_name\": \"some.entry:point\",\n },\n },\n )\n ```\n \"\"\"\n\n alias = \"stevedore_namespace\"\n\n\n# This is a lot like a SpecialCasedDependencies field, but it doesn't list targets directly.\nclass StevedoreNamespacesField(StringSequenceField):\n alias = \"stevedore_namespaces\"\n help = help_text(\n f\"\"\"\n List the stevedore namespaces required by this target.\n\n Code for all `entry_points` on `{PythonDistribution.alias}` targets with\n these namespaces will be added as dependencies so that they are\n available on PYTHONPATH during tests. Note that this is only a subset\n of the `{PythonDistribution.alias}`s dependencies, so the `entry_points`\n only need to be defined on one `{PythonDistribution.alias}` even if the\n test only needs some of the `entry_points` namespaces on it.\n\n Plus, an `entry_points.txt` file will be generated in the sandbox so that\n each of the `{PythonDistribution.alias}`s appear to be \"installed\". The\n `entry_points.txt` file will only include the namespaces requested on this\n field. Without this, stevedore would not be able to look up plugins in\n the setuptools `entry_points` metadata.\n\n NOTE: Each `{PythonDistribution.alias}` must opt-in to being included in\n this repo-wide inference by tagging the namespaces with\n `{StevedoreNamespace.alias}(\"my.stevedore.extension\")`.\n\n The stevedore namespace format (`my.stevedore.extension`) is similar\n to a Python namespace.\n \"\"\"\n )\n\n\nclass AllStevedoreExtensionTargets(Targets):\n pass\n\n\n@dataclass(frozen=True)\nclass StevedoreNamespacesProviderTargetsRequest:\n stevedore_namespaces: StevedoreNamespacesField\n\n\nclass StevedoreExtensionTargets(Targets):\n pass\n", "path": "src/python/pants/backend/python/framework/stevedore/target_types.py"}]}
1,552
272
gh_patches_debug_37847
rasdani/github-patches
git_diff
kubeflow__pipelines-3912
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mount_pvc failing when pipeline includes ResourceOps ### What steps did you take: Created a persistent volume claim using a `VolumeOp`: ```python vop = dsl.VolumeOp( name="shared-data", resource_name="shared-data", size="100G", storage_class="nfs", ).set_display_name("Prepare volume") ``` Attempted to mount the PVC to every operation in a pipeline using: ```python dsl.get_pipeline_conf().add_op_transformer( mount_pvc(pvc_name=vop.volume.persistent_volume_claim.claim_name, volume_mount_path=MOUNT_PATH) ) ``` ### What happened: During compilation, the `add_op_transformer` attempts to invoke the `add_volume_mount` method on the `VolumeOp`, producing an error: ``` AttributeError: 'VolumeOp' object has no attribute 'add_volume_mount' ``` ### What did you expect to happen: The documentation states that transfomers take and return a `ContainerOp`: https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.dsl.html#kfp.dsl.PipelineConf.add_op_transformer Given that, I would expect them to be bypassed for `VolumeOp` (or other `ResourceOp` operations), and hence this compilation should succeed. ### Environment: <!-- Please fill in those that seem relevant. --> KFP version: Build commit: 9c16e12 KFP SDK version: <!-- Please attach the output of this shell command: $pip list | grep kfp --> kfp 0.5.1 kfp-server-api 0.3.0 ### Anything else you would like to add: [Miscellaneous information that will assist in solving the issue.] /kind bug <!-- Please include labels by uncommenting them to help us better triage issues, choose from the following --> <!-- // /area frontend // /area backend // /area sdk // /area testing // /area engprod --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/kfp/dsl/_pipeline.py` Content: ``` 1 # Copyright 2018-2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 from . import _container_op 17 from . import _resource_op 18 from . import _ops_group 19 from ._component_bridge import _create_container_op_from_component_and_arguments 20 from ..components import _components 21 from ..components._naming import _make_name_unique_by_adding_index 22 import sys 23 24 25 # This handler is called whenever the @pipeline decorator is applied. 26 # It can be used by command-line DSL compiler to inject code that runs for every pipeline definition. 27 _pipeline_decorator_handler = None 28 29 30 def pipeline(name : str = None, description : str = None): 31 """Decorator of pipeline functions. 32 33 Usage: 34 ```python 35 @pipeline( 36 name='my awesome pipeline', 37 description='Is it really awesome?' 38 ) 39 def my_pipeline(a: PipelineParam, b: PipelineParam): 40 ... 41 ``` 42 """ 43 def _pipeline(func): 44 if name: 45 func._component_human_name = name 46 if description: 47 func._component_description = description 48 49 if _pipeline_decorator_handler: 50 return _pipeline_decorator_handler(func) or func 51 else: 52 return func 53 54 return _pipeline 55 56 class PipelineConf(): 57 """PipelineConf contains pipeline level settings 58 """ 59 def __init__(self): 60 self.image_pull_secrets = [] 61 self.timeout = 0 62 self.ttl_seconds_after_finished = -1 63 self.op_transformers = [] 64 self.default_pod_node_selector = {} 65 self.image_pull_policy = None 66 self.parallelism = None 67 68 def set_image_pull_secrets(self, image_pull_secrets): 69 """Configures the pipeline level imagepullsecret 70 71 Args: 72 image_pull_secrets: a list of Kubernetes V1LocalObjectReference 73 For detailed description, check Kubernetes V1LocalObjectReference definition 74 https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1LocalObjectReference.md 75 """ 76 self.image_pull_secrets = image_pull_secrets 77 return self 78 79 def set_timeout(self, seconds: int): 80 """Configures the pipeline level timeout 81 82 Args: 83 seconds: number of seconds for timeout 84 """ 85 self.timeout = seconds 86 return self 87 88 def set_parallelism(self, max_num_pods: int): 89 """Configures the max number of total parallel pods that can execute at the same time in a workflow. 90 91 Args: 92 max_num_pods (int): max number of total parallel pods. 93 """ 94 self.parallelism = max_num_pods 95 return self 96 97 def set_ttl_seconds_after_finished(self, seconds: int): 98 """Configures the ttl after the pipeline has finished. 99 100 Args: 101 seconds: number of seconds for the workflow to be garbage collected after it is finished. 102 """ 103 self.ttl_seconds_after_finished = seconds 104 return self 105 106 def set_default_pod_node_selector(self, label_name: str, value: str): 107 """Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the 108 container to be eligible to run on a node, the node must have each of the constraints appeared 109 as labels. 110 111 Args: 112 label_name: The name of the constraint label. 113 value: The value of the constraint label. 114 """ 115 self.default_pod_node_selector[label_name] = value 116 return self 117 118 119 def set_image_pull_policy(self, policy: str): 120 """Configures the default image pull policy 121 122 Args: 123 policy: the pull policy, has to be one of: Always, Never, IfNotPresent. 124 For more info: https://github.com/kubernetes-client/python/blob/10a7f95435c0b94a6d949ba98375f8cc85a70e5a/kubernetes/docs/V1Container.md 125 """ 126 self.image_pull_policy = policy 127 return self 128 129 def add_op_transformer(self, transformer): 130 """Configures the op_transformers which will be applied to all ops in the pipeline. 131 132 Args: 133 transformer: a function that takes a ContainOp as input and returns a ContainerOp 134 """ 135 self.op_transformers.append(transformer) 136 137 138 def get_pipeline_conf(): 139 """Configure the pipeline level setting to the current pipeline 140 Note: call the function inside the user defined pipeline function. 141 """ 142 return Pipeline.get_default_pipeline().conf 143 144 #TODO: Pipeline is in fact an opsgroup, refactor the code. 145 class Pipeline(): 146 """A pipeline contains a list of operators. 147 148 This class is not supposed to be used by pipeline authors since pipeline authors can use 149 pipeline functions (decorated with @pipeline) to reference their pipelines. This class 150 is useful for implementing a compiler. For example, the compiler can use the following 151 to get the pipeline object and its ops: 152 153 ```python 154 with Pipeline() as p: 155 pipeline_func(*args_list) 156 157 traverse(p.ops) 158 ``` 159 """ 160 161 # _default_pipeline is set when it (usually a compiler) runs "with Pipeline()" 162 _default_pipeline = None 163 164 @staticmethod 165 def get_default_pipeline(): 166 """Get default pipeline. """ 167 return Pipeline._default_pipeline 168 169 @staticmethod 170 def add_pipeline(name, description, func): 171 """Add a pipeline function with the specified name and description.""" 172 # Applying the @pipeline decorator to the pipeline function 173 func = pipeline(name=name, description=description)(func) 174 175 def __init__(self, name: str): 176 """Create a new instance of Pipeline. 177 178 Args: 179 name: the name of the pipeline. Once deployed, the name will show up in Pipeline System UI. 180 """ 181 self.name = name 182 self.ops = {} 183 # Add the root group. 184 self.groups = [_ops_group.OpsGroup('pipeline', name=name)] 185 self.group_id = 0 186 self.conf = PipelineConf() 187 self._metadata = None 188 189 def __enter__(self): 190 if Pipeline._default_pipeline: 191 raise Exception('Nested pipelines are not allowed.') 192 193 Pipeline._default_pipeline = self 194 self._old_container_task_constructor = _components._container_task_constructor 195 _components._container_task_constructor = _create_container_op_from_component_and_arguments 196 197 def register_op_and_generate_id(op): 198 return self.add_op(op, op.is_exit_handler) 199 200 self._old__register_op_handler = _container_op._register_op_handler 201 _container_op._register_op_handler = register_op_and_generate_id 202 return self 203 204 def __exit__(self, *args): 205 Pipeline._default_pipeline = None 206 _container_op._register_op_handler = self._old__register_op_handler 207 _components._container_task_constructor = self._old_container_task_constructor 208 209 def add_op(self, op: _container_op.BaseOp, define_only: bool): 210 """Add a new operator. 211 212 Args: 213 op: An operator of ContainerOp, ResourceOp or their inherited types. 214 215 Returns 216 op_name: a unique op name. 217 """ 218 #If there is an existing op with this name then generate a new name. 219 op_name = _make_name_unique_by_adding_index(op.human_name, list(self.ops.keys()), ' ') 220 221 self.ops[op_name] = op 222 if not define_only: 223 self.groups[-1].ops.append(op) 224 225 return op_name 226 227 def push_ops_group(self, group: _ops_group.OpsGroup): 228 """Push an OpsGroup into the stack. 229 230 Args: 231 group: An OpsGroup. Typically it is one of ExitHandler, Branch, and Loop. 232 """ 233 self.groups[-1].groups.append(group) 234 self.groups.append(group) 235 236 def pop_ops_group(self): 237 """Remove the current OpsGroup from the stack.""" 238 del self.groups[-1] 239 240 def remove_op_from_groups(self, op): 241 for group in self.groups: 242 group.remove_op_recursive(op) 243 244 def get_next_group_id(self): 245 """Get next id for a new group. """ 246 247 self.group_id += 1 248 return self.group_id 249 250 def _set_metadata(self, metadata): 251 '''_set_metadata passes the containerop the metadata information 252 Args: 253 metadata (ComponentMeta): component metadata 254 ''' 255 self._metadata = metadata 256 ``` Path: `sdk/python/kfp/onprem.py` Content: ``` 1 2 def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'): 3 """ 4 Modifier function to apply to a Container Op to simplify volume, volume mount addition and 5 enable better reuse of volumes, volume claims across container ops. 6 Usage: 7 train = train_op(...) 8 train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) 9 """ 10 def _mount_pvc(task): 11 from kubernetes import client as k8s_client 12 local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) 13 return ( 14 task 15 .add_volume( 16 k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc) 17 ) 18 .add_volume_mount( 19 k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name) 20 ) 21 ) 22 return _mount_pvc 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/kfp/dsl/_pipeline.py b/sdk/python/kfp/dsl/_pipeline.py --- a/sdk/python/kfp/dsl/_pipeline.py +++ b/sdk/python/kfp/dsl/_pipeline.py @@ -102,9 +102,9 @@ """ self.ttl_seconds_after_finished = seconds return self - - def set_default_pod_node_selector(self, label_name: str, value: str): - """Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the + + def set_default_pod_node_selector(self, label_name: str, value: str): + """Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the container to be eligible to run on a node, the node must have each of the constraints appeared as labels. @@ -114,7 +114,7 @@ """ self.default_pod_node_selector[label_name] = value return self - + def set_image_pull_policy(self, policy: str): """Configures the default image pull policy @@ -128,9 +128,10 @@ def add_op_transformer(self, transformer): """Configures the op_transformers which will be applied to all ops in the pipeline. + The ops can be ResourceOp, VolumenOp, or ContainerOp. Args: - transformer: a function that takes a ContainOp as input and returns a ContainerOp + transformer: a function that takes a kfp Op as input and returns a kfp Op """ self.op_transformers.append(transformer) diff --git a/sdk/python/kfp/onprem.py b/sdk/python/kfp/onprem.py --- a/sdk/python/kfp/onprem.py +++ b/sdk/python/kfp/onprem.py @@ -1,7 +1,7 @@ def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'): """ - Modifier function to apply to a Container Op to simplify volume, volume mount addition and + Modifier function to apply to a Container Op to simplify volume, volume mount addition and enable better reuse of volumes, volume claims across container ops. Usage: train = train_op(...) @@ -9,6 +9,10 @@ """ def _mount_pvc(task): from kubernetes import client as k8s_client + # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp) + # refer to #3906 + if not hasattr(task, "add_volume") or not hasattr(task, "add_volume_mount"): + return task local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) return ( task
{"golden_diff": "diff --git a/sdk/python/kfp/dsl/_pipeline.py b/sdk/python/kfp/dsl/_pipeline.py\n--- a/sdk/python/kfp/dsl/_pipeline.py\n+++ b/sdk/python/kfp/dsl/_pipeline.py\n@@ -102,9 +102,9 @@\n \"\"\"\n self.ttl_seconds_after_finished = seconds\n return self\n- \n- def set_default_pod_node_selector(self, label_name: str, value: str): \n- \"\"\"Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the \n+\n+ def set_default_pod_node_selector(self, label_name: str, value: str):\n+ \"\"\"Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the\n container to be eligible to run on a node, the node must have each of the constraints appeared\n as labels.\n \n@@ -114,7 +114,7 @@\n \"\"\"\n self.default_pod_node_selector[label_name] = value\n return self\n- \n+\n \n def set_image_pull_policy(self, policy: str):\n \"\"\"Configures the default image pull policy\n@@ -128,9 +128,10 @@\n \n def add_op_transformer(self, transformer):\n \"\"\"Configures the op_transformers which will be applied to all ops in the pipeline.\n+ The ops can be ResourceOp, VolumenOp, or ContainerOp.\n \n Args:\n- transformer: a function that takes a ContainOp as input and returns a ContainerOp\n+ transformer: a function that takes a kfp Op as input and returns a kfp Op\n \"\"\"\n self.op_transformers.append(transformer)\n \ndiff --git a/sdk/python/kfp/onprem.py b/sdk/python/kfp/onprem.py\n--- a/sdk/python/kfp/onprem.py\n+++ b/sdk/python/kfp/onprem.py\n@@ -1,7 +1,7 @@\n \n def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):\n \"\"\"\n- Modifier function to apply to a Container Op to simplify volume, volume mount addition and \n+ Modifier function to apply to a Container Op to simplify volume, volume mount addition and\n enable better reuse of volumes, volume claims across container ops.\n Usage:\n train = train_op(...)\n@@ -9,6 +9,10 @@\n \"\"\"\n def _mount_pvc(task):\n from kubernetes import client as k8s_client\n+ # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp)\n+ # refer to #3906\n+ if not hasattr(task, \"add_volume\") or not hasattr(task, \"add_volume_mount\"):\n+ return task\n local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)\n return (\n task\n", "issue": "mount_pvc failing when pipeline includes ResourceOps\n### What steps did you take:\r\n\r\nCreated a persistent volume claim using a `VolumeOp`:\r\n\r\n```python\r\n vop = dsl.VolumeOp(\r\n name=\"shared-data\",\r\n resource_name=\"shared-data\",\r\n size=\"100G\",\r\n storage_class=\"nfs\",\r\n ).set_display_name(\"Prepare volume\")\r\n```\r\n\r\nAttempted to mount the PVC to every operation in a pipeline using:\r\n\r\n```python\r\n dsl.get_pipeline_conf().add_op_transformer(\r\n mount_pvc(pvc_name=vop.volume.persistent_volume_claim.claim_name, volume_mount_path=MOUNT_PATH)\r\n )\r\n```\r\n\r\n### What happened:\r\nDuring compilation, the `add_op_transformer` attempts to invoke the `add_volume_mount` method on the `VolumeOp`, producing an error:\r\n\r\n```\r\nAttributeError: 'VolumeOp' object has no attribute 'add_volume_mount'\r\n```\r\n\r\n### What did you expect to happen:\r\n\r\nThe documentation states that transfomers take and return a `ContainerOp`:\r\n\r\nhttps://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.dsl.html#kfp.dsl.PipelineConf.add_op_transformer\r\n\r\nGiven that, I would expect them to be bypassed for `VolumeOp` (or other `ResourceOp` operations), and hence this compilation should succeed.\r\n\r\n### Environment:\r\n<!-- Please fill in those that seem relevant. -->\r\n\r\nKFP version: Build commit: 9c16e12\r\n\r\nKFP SDK version: <!-- Please attach the output of this shell command: $pip list | grep kfp -->\r\nkfp 0.5.1\r\nkfp-server-api 0.3.0\r\n\r\n### Anything else you would like to add:\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n/kind bug\r\n<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->\r\n<!--\r\n// /area frontend\r\n// /area backend\r\n// /area sdk\r\n// /area testing\r\n// /area engprod\r\n-->\r\n\n", "before_files": [{"content": "# Copyright 2018-2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom . import _container_op\nfrom . import _resource_op\nfrom . import _ops_group\nfrom ._component_bridge import _create_container_op_from_component_and_arguments\nfrom ..components import _components\nfrom ..components._naming import _make_name_unique_by_adding_index\nimport sys\n\n\n# This handler is called whenever the @pipeline decorator is applied.\n# It can be used by command-line DSL compiler to inject code that runs for every pipeline definition.\n_pipeline_decorator_handler = None\n\n\ndef pipeline(name : str = None, description : str = None):\n \"\"\"Decorator of pipeline functions.\n\n Usage:\n ```python\n @pipeline(\n name='my awesome pipeline',\n description='Is it really awesome?'\n )\n def my_pipeline(a: PipelineParam, b: PipelineParam):\n ...\n ```\n \"\"\"\n def _pipeline(func):\n if name:\n func._component_human_name = name\n if description:\n func._component_description = description\n\n if _pipeline_decorator_handler:\n return _pipeline_decorator_handler(func) or func\n else:\n return func\n\n return _pipeline\n\nclass PipelineConf():\n \"\"\"PipelineConf contains pipeline level settings\n \"\"\"\n def __init__(self):\n self.image_pull_secrets = []\n self.timeout = 0\n self.ttl_seconds_after_finished = -1\n self.op_transformers = []\n self.default_pod_node_selector = {}\n self.image_pull_policy = None\n self.parallelism = None\n\n def set_image_pull_secrets(self, image_pull_secrets):\n \"\"\"Configures the pipeline level imagepullsecret\n\n Args:\n image_pull_secrets: a list of Kubernetes V1LocalObjectReference\n For detailed description, check Kubernetes V1LocalObjectReference definition\n https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1LocalObjectReference.md\n \"\"\"\n self.image_pull_secrets = image_pull_secrets\n return self\n\n def set_timeout(self, seconds: int):\n \"\"\"Configures the pipeline level timeout\n\n Args:\n seconds: number of seconds for timeout\n \"\"\"\n self.timeout = seconds\n return self\n\n def set_parallelism(self, max_num_pods: int):\n \"\"\"Configures the max number of total parallel pods that can execute at the same time in a workflow.\n\n Args:\n max_num_pods (int): max number of total parallel pods.\n \"\"\"\n self.parallelism = max_num_pods\n return self\n\n def set_ttl_seconds_after_finished(self, seconds: int):\n \"\"\"Configures the ttl after the pipeline has finished.\n\n Args:\n seconds: number of seconds for the workflow to be garbage collected after it is finished.\n \"\"\"\n self.ttl_seconds_after_finished = seconds\n return self\n \n def set_default_pod_node_selector(self, label_name: str, value: str): \n \"\"\"Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the \n container to be eligible to run on a node, the node must have each of the constraints appeared\n as labels.\n\n Args:\n label_name: The name of the constraint label.\n value: The value of the constraint label.\n \"\"\"\n self.default_pod_node_selector[label_name] = value\n return self\n \n\n def set_image_pull_policy(self, policy: str):\n \"\"\"Configures the default image pull policy\n\n Args:\n policy: the pull policy, has to be one of: Always, Never, IfNotPresent.\n For more info: https://github.com/kubernetes-client/python/blob/10a7f95435c0b94a6d949ba98375f8cc85a70e5a/kubernetes/docs/V1Container.md\n \"\"\"\n self.image_pull_policy = policy\n return self\n\n def add_op_transformer(self, transformer):\n \"\"\"Configures the op_transformers which will be applied to all ops in the pipeline.\n\n Args:\n transformer: a function that takes a ContainOp as input and returns a ContainerOp\n \"\"\"\n self.op_transformers.append(transformer)\n\n\ndef get_pipeline_conf():\n \"\"\"Configure the pipeline level setting to the current pipeline\n Note: call the function inside the user defined pipeline function.\n \"\"\"\n return Pipeline.get_default_pipeline().conf\n\n#TODO: Pipeline is in fact an opsgroup, refactor the code.\nclass Pipeline():\n \"\"\"A pipeline contains a list of operators.\n\n This class is not supposed to be used by pipeline authors since pipeline authors can use\n pipeline functions (decorated with @pipeline) to reference their pipelines. This class\n is useful for implementing a compiler. For example, the compiler can use the following\n to get the pipeline object and its ops:\n\n ```python\n with Pipeline() as p:\n pipeline_func(*args_list)\n\n traverse(p.ops)\n ```\n \"\"\"\n\n # _default_pipeline is set when it (usually a compiler) runs \"with Pipeline()\"\n _default_pipeline = None\n\n @staticmethod\n def get_default_pipeline():\n \"\"\"Get default pipeline. \"\"\"\n return Pipeline._default_pipeline\n\n @staticmethod\n def add_pipeline(name, description, func):\n \"\"\"Add a pipeline function with the specified name and description.\"\"\"\n # Applying the @pipeline decorator to the pipeline function\n func = pipeline(name=name, description=description)(func)\n\n def __init__(self, name: str):\n \"\"\"Create a new instance of Pipeline.\n\n Args:\n name: the name of the pipeline. Once deployed, the name will show up in Pipeline System UI.\n \"\"\"\n self.name = name\n self.ops = {}\n # Add the root group.\n self.groups = [_ops_group.OpsGroup('pipeline', name=name)]\n self.group_id = 0\n self.conf = PipelineConf()\n self._metadata = None\n\n def __enter__(self):\n if Pipeline._default_pipeline:\n raise Exception('Nested pipelines are not allowed.')\n\n Pipeline._default_pipeline = self\n self._old_container_task_constructor = _components._container_task_constructor\n _components._container_task_constructor = _create_container_op_from_component_and_arguments\n\n def register_op_and_generate_id(op):\n return self.add_op(op, op.is_exit_handler)\n\n self._old__register_op_handler = _container_op._register_op_handler\n _container_op._register_op_handler = register_op_and_generate_id\n return self\n\n def __exit__(self, *args):\n Pipeline._default_pipeline = None\n _container_op._register_op_handler = self._old__register_op_handler\n _components._container_task_constructor = self._old_container_task_constructor\n\n def add_op(self, op: _container_op.BaseOp, define_only: bool):\n \"\"\"Add a new operator.\n\n Args:\n op: An operator of ContainerOp, ResourceOp or their inherited types.\n\n Returns\n op_name: a unique op name.\n \"\"\"\n #If there is an existing op with this name then generate a new name.\n op_name = _make_name_unique_by_adding_index(op.human_name, list(self.ops.keys()), ' ')\n\n self.ops[op_name] = op\n if not define_only:\n self.groups[-1].ops.append(op)\n\n return op_name\n\n def push_ops_group(self, group: _ops_group.OpsGroup):\n \"\"\"Push an OpsGroup into the stack.\n\n Args:\n group: An OpsGroup. Typically it is one of ExitHandler, Branch, and Loop.\n \"\"\"\n self.groups[-1].groups.append(group)\n self.groups.append(group)\n\n def pop_ops_group(self):\n \"\"\"Remove the current OpsGroup from the stack.\"\"\"\n del self.groups[-1]\n\n def remove_op_from_groups(self, op):\n for group in self.groups:\n group.remove_op_recursive(op)\n\n def get_next_group_id(self):\n \"\"\"Get next id for a new group. \"\"\"\n\n self.group_id += 1\n return self.group_id\n\n def _set_metadata(self, metadata):\n '''_set_metadata passes the containerop the metadata information\n Args:\n metadata (ComponentMeta): component metadata\n '''\n self._metadata = metadata\n", "path": "sdk/python/kfp/dsl/_pipeline.py"}, {"content": "\ndef mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):\n \"\"\"\n Modifier function to apply to a Container Op to simplify volume, volume mount addition and \n enable better reuse of volumes, volume claims across container ops.\n Usage:\n train = train_op(...)\n train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))\n \"\"\"\n def _mount_pvc(task):\n from kubernetes import client as k8s_client\n local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)\n return (\n task\n .add_volume(\n k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc)\n )\n .add_volume_mount(\n k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name)\n )\n )\n return _mount_pvc\n", "path": "sdk/python/kfp/onprem.py"}], "after_files": [{"content": "# Copyright 2018-2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom . import _container_op\nfrom . import _resource_op\nfrom . import _ops_group\nfrom ._component_bridge import _create_container_op_from_component_and_arguments\nfrom ..components import _components\nfrom ..components._naming import _make_name_unique_by_adding_index\nimport sys\n\n\n# This handler is called whenever the @pipeline decorator is applied.\n# It can be used by command-line DSL compiler to inject code that runs for every pipeline definition.\n_pipeline_decorator_handler = None\n\n\ndef pipeline(name : str = None, description : str = None):\n \"\"\"Decorator of pipeline functions.\n\n Usage:\n ```python\n @pipeline(\n name='my awesome pipeline',\n description='Is it really awesome?'\n )\n def my_pipeline(a: PipelineParam, b: PipelineParam):\n ...\n ```\n \"\"\"\n def _pipeline(func):\n if name:\n func._component_human_name = name\n if description:\n func._component_description = description\n\n if _pipeline_decorator_handler:\n return _pipeline_decorator_handler(func) or func\n else:\n return func\n\n return _pipeline\n\nclass PipelineConf():\n \"\"\"PipelineConf contains pipeline level settings\n \"\"\"\n def __init__(self):\n self.image_pull_secrets = []\n self.timeout = 0\n self.ttl_seconds_after_finished = -1\n self.op_transformers = []\n self.default_pod_node_selector = {}\n self.image_pull_policy = None\n self.parallelism = None\n\n def set_image_pull_secrets(self, image_pull_secrets):\n \"\"\"Configures the pipeline level imagepullsecret\n\n Args:\n image_pull_secrets: a list of Kubernetes V1LocalObjectReference\n For detailed description, check Kubernetes V1LocalObjectReference definition\n https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1LocalObjectReference.md\n \"\"\"\n self.image_pull_secrets = image_pull_secrets\n return self\n\n def set_timeout(self, seconds: int):\n \"\"\"Configures the pipeline level timeout\n\n Args:\n seconds: number of seconds for timeout\n \"\"\"\n self.timeout = seconds\n return self\n\n def set_parallelism(self, max_num_pods: int):\n \"\"\"Configures the max number of total parallel pods that can execute at the same time in a workflow.\n\n Args:\n max_num_pods (int): max number of total parallel pods.\n \"\"\"\n self.parallelism = max_num_pods\n return self\n\n def set_ttl_seconds_after_finished(self, seconds: int):\n \"\"\"Configures the ttl after the pipeline has finished.\n\n Args:\n seconds: number of seconds for the workflow to be garbage collected after it is finished.\n \"\"\"\n self.ttl_seconds_after_finished = seconds\n return self\n\n def set_default_pod_node_selector(self, label_name: str, value: str):\n \"\"\"Add a constraint for nodeSelector for a pipeline. Each constraint is a key-value pair label. For the\n container to be eligible to run on a node, the node must have each of the constraints appeared\n as labels.\n\n Args:\n label_name: The name of the constraint label.\n value: The value of the constraint label.\n \"\"\"\n self.default_pod_node_selector[label_name] = value\n return self\n\n\n def set_image_pull_policy(self, policy: str):\n \"\"\"Configures the default image pull policy\n\n Args:\n policy: the pull policy, has to be one of: Always, Never, IfNotPresent.\n For more info: https://github.com/kubernetes-client/python/blob/10a7f95435c0b94a6d949ba98375f8cc85a70e5a/kubernetes/docs/V1Container.md\n \"\"\"\n self.image_pull_policy = policy\n return self\n\n def add_op_transformer(self, transformer):\n \"\"\"Configures the op_transformers which will be applied to all ops in the pipeline.\n The ops can be ResourceOp, VolumenOp, or ContainerOp.\n\n Args:\n transformer: a function that takes a kfp Op as input and returns a kfp Op\n \"\"\"\n self.op_transformers.append(transformer)\n\n\ndef get_pipeline_conf():\n \"\"\"Configure the pipeline level setting to the current pipeline\n Note: call the function inside the user defined pipeline function.\n \"\"\"\n return Pipeline.get_default_pipeline().conf\n\n#TODO: Pipeline is in fact an opsgroup, refactor the code.\nclass Pipeline():\n \"\"\"A pipeline contains a list of operators.\n\n This class is not supposed to be used by pipeline authors since pipeline authors can use\n pipeline functions (decorated with @pipeline) to reference their pipelines. This class\n is useful for implementing a compiler. For example, the compiler can use the following\n to get the pipeline object and its ops:\n\n ```python\n with Pipeline() as p:\n pipeline_func(*args_list)\n\n traverse(p.ops)\n ```\n \"\"\"\n\n # _default_pipeline is set when it (usually a compiler) runs \"with Pipeline()\"\n _default_pipeline = None\n\n @staticmethod\n def get_default_pipeline():\n \"\"\"Get default pipeline. \"\"\"\n return Pipeline._default_pipeline\n\n @staticmethod\n def add_pipeline(name, description, func):\n \"\"\"Add a pipeline function with the specified name and description.\"\"\"\n # Applying the @pipeline decorator to the pipeline function\n func = pipeline(name=name, description=description)(func)\n\n def __init__(self, name: str):\n \"\"\"Create a new instance of Pipeline.\n\n Args:\n name: the name of the pipeline. Once deployed, the name will show up in Pipeline System UI.\n \"\"\"\n self.name = name\n self.ops = {}\n # Add the root group.\n self.groups = [_ops_group.OpsGroup('pipeline', name=name)]\n self.group_id = 0\n self.conf = PipelineConf()\n self._metadata = None\n\n def __enter__(self):\n if Pipeline._default_pipeline:\n raise Exception('Nested pipelines are not allowed.')\n\n Pipeline._default_pipeline = self\n self._old_container_task_constructor = _components._container_task_constructor\n _components._container_task_constructor = _create_container_op_from_component_and_arguments\n\n def register_op_and_generate_id(op):\n return self.add_op(op, op.is_exit_handler)\n\n self._old__register_op_handler = _container_op._register_op_handler\n _container_op._register_op_handler = register_op_and_generate_id\n return self\n\n def __exit__(self, *args):\n Pipeline._default_pipeline = None\n _container_op._register_op_handler = self._old__register_op_handler\n _components._container_task_constructor = self._old_container_task_constructor\n\n def add_op(self, op: _container_op.BaseOp, define_only: bool):\n \"\"\"Add a new operator.\n\n Args:\n op: An operator of ContainerOp, ResourceOp or their inherited types.\n\n Returns\n op_name: a unique op name.\n \"\"\"\n #If there is an existing op with this name then generate a new name.\n op_name = _make_name_unique_by_adding_index(op.human_name, list(self.ops.keys()), ' ')\n\n self.ops[op_name] = op\n if not define_only:\n self.groups[-1].ops.append(op)\n\n return op_name\n\n def push_ops_group(self, group: _ops_group.OpsGroup):\n \"\"\"Push an OpsGroup into the stack.\n\n Args:\n group: An OpsGroup. Typically it is one of ExitHandler, Branch, and Loop.\n \"\"\"\n self.groups[-1].groups.append(group)\n self.groups.append(group)\n\n def pop_ops_group(self):\n \"\"\"Remove the current OpsGroup from the stack.\"\"\"\n del self.groups[-1]\n\n def remove_op_from_groups(self, op):\n for group in self.groups:\n group.remove_op_recursive(op)\n\n def get_next_group_id(self):\n \"\"\"Get next id for a new group. \"\"\"\n\n self.group_id += 1\n return self.group_id\n\n def _set_metadata(self, metadata):\n '''_set_metadata passes the containerop the metadata information\n Args:\n metadata (ComponentMeta): component metadata\n '''\n self._metadata = metadata\n", "path": "sdk/python/kfp/dsl/_pipeline.py"}, {"content": "\ndef mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):\n \"\"\"\n Modifier function to apply to a Container Op to simplify volume, volume mount addition and\n enable better reuse of volumes, volume claims across container ops.\n Usage:\n train = train_op(...)\n train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))\n \"\"\"\n def _mount_pvc(task):\n from kubernetes import client as k8s_client\n # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp)\n # refer to #3906\n if not hasattr(task, \"add_volume\") or not hasattr(task, \"add_volume_mount\"):\n return task\n local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)\n return (\n task\n .add_volume(\n k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc)\n )\n .add_volume_mount(\n k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name)\n )\n )\n return _mount_pvc\n", "path": "sdk/python/kfp/onprem.py"}]}
3,531
636
gh_patches_debug_18816
rasdani/github-patches
git_diff
encode__uvicorn-646
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add support for --reload to monitor additional file types. The "reload" process currently only monitors ".py" files in various directories. I have a changes that will pass in a list of additional "reload_suffixes" that the process will monitor. This allows the service to monitor data files in addition to code files. Any feedback on whether this is useful to others? <!-- POLAR PLEDGE BADGE START --> > [!IMPORTANT] > - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue. > - We receive the funding once the issue is completed & confirmed by you. > - Thank you in advance for helping prioritize & fund our backlog. <a href="https://polar.sh/encode/uvicorn/issues/528"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `uvicorn/supervisors/statreload.py` Content: ``` 1 import logging 2 import os 3 from pathlib import Path 4 5 from uvicorn.supervisors.basereload import BaseReload 6 7 logger = logging.getLogger("uvicorn.error") 8 9 10 class StatReload(BaseReload): 11 def __init__(self, config, target, sockets): 12 super().__init__(config, target, sockets) 13 self.reloader_name = "statreload" 14 self.mtimes = {} 15 16 def should_restart(self): 17 for filename in self.iter_py_files(): 18 try: 19 mtime = os.path.getmtime(filename) 20 except OSError: # pragma: nocover 21 continue 22 23 old_time = self.mtimes.get(filename) 24 if old_time is None: 25 self.mtimes[filename] = mtime 26 continue 27 elif mtime > old_time: 28 display_path = os.path.normpath(filename) 29 if Path.cwd() in Path(filename).parents: 30 display_path = os.path.normpath(os.path.relpath(filename)) 31 message = "Detected file change in '%s'. Reloading..." 32 logger.warning(message, display_path) 33 return True 34 return False 35 36 def iter_py_files(self): 37 for reload_dir in self.config.reload_dirs: 38 for subdir, dirs, files in os.walk(reload_dir): 39 for file in files: 40 if file.endswith(".py"): 41 yield subdir + os.sep + file 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/uvicorn/supervisors/statreload.py b/uvicorn/supervisors/statreload.py --- a/uvicorn/supervisors/statreload.py +++ b/uvicorn/supervisors/statreload.py @@ -14,7 +14,7 @@ self.mtimes = {} def should_restart(self): - for filename in self.iter_py_files(): + for filename in self.iter_files(): try: mtime = os.path.getmtime(filename) except OSError: # pragma: nocover @@ -33,9 +33,9 @@ return True return False - def iter_py_files(self): + def iter_files(self): for reload_dir in self.config.reload_dirs: for subdir, dirs, files in os.walk(reload_dir): for file in files: - if file.endswith(".py"): + if not file.startswith("."): yield subdir + os.sep + file
{"golden_diff": "diff --git a/uvicorn/supervisors/statreload.py b/uvicorn/supervisors/statreload.py\n--- a/uvicorn/supervisors/statreload.py\n+++ b/uvicorn/supervisors/statreload.py\n@@ -14,7 +14,7 @@\n self.mtimes = {}\n \n def should_restart(self):\n- for filename in self.iter_py_files():\n+ for filename in self.iter_files():\n try:\n mtime = os.path.getmtime(filename)\n except OSError: # pragma: nocover\n@@ -33,9 +33,9 @@\n return True\n return False\n \n- def iter_py_files(self):\n+ def iter_files(self):\n for reload_dir in self.config.reload_dirs:\n for subdir, dirs, files in os.walk(reload_dir):\n for file in files:\n- if file.endswith(\".py\"):\n+ if not file.startswith(\".\"):\n yield subdir + os.sep + file\n", "issue": "Add support for --reload to monitor additional file types.\nThe \"reload\" process currently only monitors \".py\" files in various directories. I have a changes that will pass in a list of additional \"reload_suffixes\" that the process will monitor. This allows the service to monitor data files in addition to code files.\r\n\r\nAny feedback on whether this is useful to others?\n\n<!-- POLAR PLEDGE BADGE START -->\n> [!IMPORTANT]\n> - We're using [Polar.sh](https://polar.sh/encode) so you can upvote and help fund this issue.\n> - We receive the funding once the issue is completed & confirmed by you.\n> - Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/encode/uvicorn/issues/528\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/encode/uvicorn/issues/528/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "import logging\nimport os\nfrom pathlib import Path\n\nfrom uvicorn.supervisors.basereload import BaseReload\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass StatReload(BaseReload):\n def __init__(self, config, target, sockets):\n super().__init__(config, target, sockets)\n self.reloader_name = \"statreload\"\n self.mtimes = {}\n\n def should_restart(self):\n for filename in self.iter_py_files():\n try:\n mtime = os.path.getmtime(filename)\n except OSError: # pragma: nocover\n continue\n\n old_time = self.mtimes.get(filename)\n if old_time is None:\n self.mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n display_path = os.path.normpath(filename)\n if Path.cwd() in Path(filename).parents:\n display_path = os.path.normpath(os.path.relpath(filename))\n message = \"Detected file change in '%s'. Reloading...\"\n logger.warning(message, display_path)\n return True\n return False\n\n def iter_py_files(self):\n for reload_dir in self.config.reload_dirs:\n for subdir, dirs, files in os.walk(reload_dir):\n for file in files:\n if file.endswith(\".py\"):\n yield subdir + os.sep + file\n", "path": "uvicorn/supervisors/statreload.py"}], "after_files": [{"content": "import logging\nimport os\nfrom pathlib import Path\n\nfrom uvicorn.supervisors.basereload import BaseReload\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass StatReload(BaseReload):\n def __init__(self, config, target, sockets):\n super().__init__(config, target, sockets)\n self.reloader_name = \"statreload\"\n self.mtimes = {}\n\n def should_restart(self):\n for filename in self.iter_files():\n try:\n mtime = os.path.getmtime(filename)\n except OSError: # pragma: nocover\n continue\n\n old_time = self.mtimes.get(filename)\n if old_time is None:\n self.mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n display_path = os.path.normpath(filename)\n if Path.cwd() in Path(filename).parents:\n display_path = os.path.normpath(os.path.relpath(filename))\n message = \"Detected file change in '%s'. Reloading...\"\n logger.warning(message, display_path)\n return True\n return False\n\n def iter_files(self):\n for reload_dir in self.config.reload_dirs:\n for subdir, dirs, files in os.walk(reload_dir):\n for file in files:\n if not file.startswith(\".\"):\n yield subdir + os.sep + file\n", "path": "uvicorn/supervisors/statreload.py"}]}
887
210
gh_patches_debug_12915
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-792
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add support for PyMySQL v1.x series pymysql instrumentation does not work with 1.x series as it lists <1.0 as the compatible versions. I've tested the instrumentatoin with a simple application that uses PyMySQL 1.0 and it seems to work. We should update the instrumentation to support <2.0. original issue: https://github.com/open-telemetry/opentelemetry-python/issues/2259 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 _instruments = ("PyMySQL ~= 0.10.1",) 17 ``` Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES. 16 # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. 17 18 libraries = { 19 "aiohttp": { 20 "library": "aiohttp ~= 3.0", 21 "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.25b2", 22 }, 23 "aiopg": { 24 "library": "aiopg >= 0.13.0, < 1.3.0", 25 "instrumentation": "opentelemetry-instrumentation-aiopg==0.25b2", 26 }, 27 "asgiref": { 28 "library": "asgiref ~= 3.0", 29 "instrumentation": "opentelemetry-instrumentation-asgi==0.25b2", 30 }, 31 "asyncpg": { 32 "library": "asyncpg >= 0.12.0", 33 "instrumentation": "opentelemetry-instrumentation-asyncpg==0.25b2", 34 }, 35 "boto": { 36 "library": "boto~=2.0", 37 "instrumentation": "opentelemetry-instrumentation-boto==0.25b2", 38 }, 39 "botocore": { 40 "library": "botocore ~= 1.0", 41 "instrumentation": "opentelemetry-instrumentation-botocore==0.25b2", 42 }, 43 "celery": { 44 "library": "celery >= 4.0, < 6.0", 45 "instrumentation": "opentelemetry-instrumentation-celery==0.25b2", 46 }, 47 "django": { 48 "library": "django >= 1.10", 49 "instrumentation": "opentelemetry-instrumentation-django==0.25b2", 50 }, 51 "elasticsearch": { 52 "library": "elasticsearch >= 2.0", 53 "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.25b2", 54 }, 55 "falcon": { 56 "library": "falcon >= 2.0.0, < 4.0.0", 57 "instrumentation": "opentelemetry-instrumentation-falcon==0.25b2", 58 }, 59 "fastapi": { 60 "library": "fastapi ~= 0.58", 61 "instrumentation": "opentelemetry-instrumentation-fastapi==0.25b2", 62 }, 63 "flask": { 64 "library": "flask >= 1.0, < 3.0", 65 "instrumentation": "opentelemetry-instrumentation-flask==0.25b2", 66 }, 67 "grpcio": { 68 "library": "grpcio ~= 1.27", 69 "instrumentation": "opentelemetry-instrumentation-grpc==0.25b2", 70 }, 71 "httpx": { 72 "library": "httpx >= 0.18.0, < 0.19.0", 73 "instrumentation": "opentelemetry-instrumentation-httpx==0.25b2", 74 }, 75 "jinja2": { 76 "library": "jinja2 >= 2.7, < 4.0", 77 "instrumentation": "opentelemetry-instrumentation-jinja2==0.25b2", 78 }, 79 "mysql-connector-python": { 80 "library": "mysql-connector-python ~= 8.0", 81 "instrumentation": "opentelemetry-instrumentation-mysql==0.25b2", 82 }, 83 "pika": { 84 "library": "pika >= 1.1.0", 85 "instrumentation": "opentelemetry-instrumentation-pika==0.25b2", 86 }, 87 "psycopg2": { 88 "library": "psycopg2 >= 2.7.3.1", 89 "instrumentation": "opentelemetry-instrumentation-psycopg2==0.25b2", 90 }, 91 "pymemcache": { 92 "library": "pymemcache ~= 1.3", 93 "instrumentation": "opentelemetry-instrumentation-pymemcache==0.25b2", 94 }, 95 "pymongo": { 96 "library": "pymongo ~= 3.1", 97 "instrumentation": "opentelemetry-instrumentation-pymongo==0.25b2", 98 }, 99 "PyMySQL": { 100 "library": "PyMySQL ~= 0.10.1", 101 "instrumentation": "opentelemetry-instrumentation-pymysql==0.25b2", 102 }, 103 "pyramid": { 104 "library": "pyramid >= 1.7", 105 "instrumentation": "opentelemetry-instrumentation-pyramid==0.25b2", 106 }, 107 "redis": { 108 "library": "redis >= 2.6", 109 "instrumentation": "opentelemetry-instrumentation-redis==0.25b2", 110 }, 111 "requests": { 112 "library": "requests ~= 2.0", 113 "instrumentation": "opentelemetry-instrumentation-requests==0.25b2", 114 }, 115 "scikit-learn": { 116 "library": "scikit-learn ~= 0.24.0", 117 "instrumentation": "opentelemetry-instrumentation-sklearn==0.25b2", 118 }, 119 "sqlalchemy": { 120 "library": "sqlalchemy", 121 "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.25b2", 122 }, 123 "starlette": { 124 "library": "starlette ~= 0.13.0", 125 "instrumentation": "opentelemetry-instrumentation-starlette==0.25b2", 126 }, 127 "tornado": { 128 "library": "tornado >= 6.0", 129 "instrumentation": "opentelemetry-instrumentation-tornado==0.25b2", 130 }, 131 "urllib3": { 132 "library": "urllib3 >= 1.0.0, < 2.0.0", 133 "instrumentation": "opentelemetry-instrumentation-urllib3==0.25b2", 134 }, 135 } 136 default_instrumentations = [ 137 "opentelemetry-instrumentation-aws-lambda==0.25b2", 138 "opentelemetry-instrumentation-dbapi==0.25b2", 139 "opentelemetry-instrumentation-logging==0.25b2", 140 "opentelemetry-instrumentation-sqlite3==0.25b2", 141 "opentelemetry-instrumentation-urllib==0.25b2", 142 "opentelemetry-instrumentation-wsgi==0.25b2", 143 ] 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py b/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py --- a/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py +++ b/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py @@ -13,4 +13,4 @@ # limitations under the License. -_instruments = ("PyMySQL ~= 0.10.1",) +_instruments = ("PyMySQL < 2",) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -97,7 +97,7 @@ "instrumentation": "opentelemetry-instrumentation-pymongo==0.25b2", }, "PyMySQL": { - "library": "PyMySQL ~= 0.10.1", + "library": "PyMySQL < 2", "instrumentation": "opentelemetry-instrumentation-pymysql==0.25b2", }, "pyramid": {
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py b/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py\n--- a/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py\n+++ b/instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py\n@@ -13,4 +13,4 @@\n # limitations under the License.\n \n \n-_instruments = (\"PyMySQL ~= 0.10.1\",)\n+_instruments = (\"PyMySQL < 2\",)\ndiff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n@@ -97,7 +97,7 @@\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.25b2\",\n },\n \"PyMySQL\": {\n- \"library\": \"PyMySQL ~= 0.10.1\",\n+ \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.25b2\",\n },\n \"pyramid\": {\n", "issue": "Add support for PyMySQL v1.x series\npymysql instrumentation does not work with 1.x series as it lists <1.0 as the compatible versions. I've tested the instrumentatoin with a simple application that uses PyMySQL 1.0 and it seems to work. We should update the instrumentation to support <2.0.\r\n\r\noriginal issue: https://github.com/open-telemetry/opentelemetry-python/issues/2259\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n_instruments = (\"PyMySQL ~= 0.10.1\",)\n", "path": "instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.25b2\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 1.3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.25b2\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.25b2\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.25b2\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.25b2\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.25b2\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.25b2\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.25b2\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.25b2\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 2.0.0, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.25b2\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.25b2\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.25b2\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.25b2\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0, < 0.19.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.25b2\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.25b2\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.25b2\",\n },\n \"pika\": {\n \"library\": \"pika >= 1.1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.25b2\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.25b2\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache ~= 1.3\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.25b2\",\n },\n \"pymongo\": {\n \"library\": \"pymongo ~= 3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.25b2\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL ~= 0.10.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.25b2\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.25b2\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.25b2\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.25b2\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.25b2\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.25b2\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.25b2\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.25b2\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.25b2\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.25b2\",\n \"opentelemetry-instrumentation-dbapi==0.25b2\",\n \"opentelemetry-instrumentation-logging==0.25b2\",\n \"opentelemetry-instrumentation-sqlite3==0.25b2\",\n \"opentelemetry-instrumentation-urllib==0.25b2\",\n \"opentelemetry-instrumentation-wsgi==0.25b2\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n_instruments = (\"PyMySQL < 2\",)\n", "path": "instrumentation/opentelemetry-instrumentation-pymysql/src/opentelemetry/instrumentation/pymysql/package.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.25b2\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 1.3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.25b2\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.25b2\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.25b2\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.25b2\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.25b2\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.25b2\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.25b2\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.25b2\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 2.0.0, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.25b2\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.25b2\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.25b2\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.25b2\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0, < 0.19.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.25b2\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.25b2\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.25b2\",\n },\n \"pika\": {\n \"library\": \"pika >= 1.1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.25b2\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.25b2\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache ~= 1.3\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.25b2\",\n },\n \"pymongo\": {\n \"library\": \"pymongo ~= 3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.25b2\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.25b2\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.25b2\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.25b2\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.25b2\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.25b2\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.25b2\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.25b2\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.25b2\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.25b2\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.25b2\",\n \"opentelemetry-instrumentation-dbapi==0.25b2\",\n \"opentelemetry-instrumentation-logging==0.25b2\",\n \"opentelemetry-instrumentation-sqlite3==0.25b2\",\n \"opentelemetry-instrumentation-urllib==0.25b2\",\n \"opentelemetry-instrumentation-wsgi==0.25b2\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}]}
2,547
335
gh_patches_debug_14284
rasdani/github-patches
git_diff
inventree__InvenTree-4721
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "Install Plugin" via GUI fails to add plugin to plugins.txt correctly for first plugin ### Please verify that this bug has NOT been raised before. - [X] I checked and didn't find a similar issue ### Describe the bug* When trying to install a plugin via the GUI function "Install Plugin" without any prior installed plugins, the plugin is appended to the first line of the plugins.txt. As the first line is already populated with `# InvenTree Plugins (uses PIP framework to install)` the result is e.g. `# InvenTree Plugins (uses PIP framework to install)inventree-zebra-plugin@git+https://github.com/SergeoLacruz/inventree-zebra-plugin # Installed 2023-04-28 06:55:40.146423+00:00 by xyz` thus ignoring the plugin in a docker env restart because of the `#`. Subsequently installed plugins are correctly placed in an empty line of the plugins.txt. ### Steps to Reproduce 1. Have a fresh install without installed plugins. 2. Go to "Settings" > "Plugin Settings" > "Plugins" > "Install Plugin" 3. Fill "Package Name" with `inventree-zebra-plugin` 4. Fill "Source URL" with `git+https://github.com/SergeoLacruz/inventree-zebra-plugin` 5. "Submit" 6. Check plugins.txt file ### Expected behaviour The installed plugin should be added below the comment such that it actually will be parsed by the plugin management. ### Deployment Method - [X] Docker - [ ] Bare metal ### Version Information # Version Information: InvenTree-Version: 0.11.0 Django Version: 3.2.18 Commit Hash: 593a716 Commit Date: 2023-04-18 Database: postgresql Debug-Mode: False Deployed using Docker: True Active plugins: [{'name': 'InvenTreeBarcode', 'slug': 'inventreebarcode', 'version': '2.0.0'}, {'name': 'InvenTreeCoreNotificationsPlugin', 'slug': 'inventreecorenotificationsplugin', 'version': '1.0.0'}] ### Relevant log output _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `InvenTree/plugin/serializers.py` Content: ``` 1 """JSON serializers for plugin app.""" 2 3 import subprocess 4 5 from django.conf import settings 6 from django.core.exceptions import ValidationError 7 from django.utils import timezone 8 from django.utils.translation import gettext_lazy as _ 9 10 from rest_framework import serializers 11 12 from common.serializers import GenericReferencedSettingSerializer 13 from InvenTree.tasks import check_for_migrations, offload_task 14 from plugin.models import NotificationUserSetting, PluginConfig, PluginSetting 15 16 17 class MetadataSerializer(serializers.ModelSerializer): 18 """Serializer class for model metadata API access.""" 19 20 metadata = serializers.JSONField(required=True) 21 22 class Meta: 23 """Metaclass options.""" 24 25 fields = [ 26 'metadata', 27 ] 28 29 def __init__(self, model_type, *args, **kwargs): 30 """Initialize the metadata serializer with information on the model type""" 31 self.Meta.model = model_type 32 super().__init__(*args, **kwargs) 33 34 def update(self, instance, data): 35 """Perform update on the metadata field: 36 37 - If this is a partial (PATCH) update, try to 'merge' data in 38 - Else, if it is a PUT update, overwrite any existing metadata 39 """ 40 if self.partial: 41 # Default behaviour is to "merge" new data in 42 metadata = instance.metadata.copy() if instance.metadata else {} 43 metadata.update(data['metadata']) 44 data['metadata'] = metadata 45 46 return super().update(instance, data) 47 48 49 class PluginConfigSerializer(serializers.ModelSerializer): 50 """Serializer for a PluginConfig.""" 51 52 class Meta: 53 """Meta for serializer.""" 54 model = PluginConfig 55 fields = [ 56 'key', 57 'name', 58 'active', 59 'meta', 60 'mixins', 61 ] 62 63 meta = serializers.DictField(read_only=True) 64 mixins = serializers.DictField(read_only=True) 65 66 67 class PluginConfigInstallSerializer(serializers.Serializer): 68 """Serializer for installing a new plugin.""" 69 70 class Meta: 71 """Meta for serializer.""" 72 fields = [ 73 'url', 74 'packagename', 75 'confirm', 76 ] 77 78 url = serializers.CharField( 79 required=False, 80 allow_blank=True, 81 label=_('Source URL'), 82 help_text=_('Source for the package - this can be a custom registry or a VCS path') 83 ) 84 packagename = serializers.CharField( 85 required=False, 86 allow_blank=True, 87 label=_('Package Name'), 88 help_text=_('Name for the Plugin Package - can also contain a version indicator'), 89 ) 90 confirm = serializers.BooleanField( 91 label=_('Confirm plugin installation'), 92 help_text=_('This will install this plugin now into the current instance. The instance will go into maintenance.') 93 ) 94 95 def validate(self, data): 96 """Validate inputs. 97 98 Make sure both confirm and url are provided. 99 """ 100 super().validate(data) 101 102 # check the base requirements are met 103 if not data.get('confirm'): 104 raise ValidationError({'confirm': _('Installation not confirmed')}) 105 if (not data.get('url')) and (not data.get('packagename')): 106 msg = _('Either packagename of URL must be provided') 107 raise ValidationError({'url': msg, 'packagename': msg}) 108 109 return data 110 111 def save(self): 112 """Install a plugin from a package registry and set operational results as instance data.""" 113 data = self.validated_data 114 115 packagename = data.get('packagename', '') 116 url = data.get('url', '') 117 118 # build up the command 119 install_name = [] 120 121 if url: 122 # use custom registration / VCS 123 if True in [identifier in url for identifier in ['git+https', 'hg+https', 'svn+svn', ]]: 124 # using a VCS provider 125 if packagename: 126 install_name.append(f'{packagename}@{url}') 127 else: 128 install_name.append(url) 129 else: # pragma: no cover 130 # using a custom package repositories 131 # This is only for pypa compliant directory services (all current are tested above) 132 # and not covered by tests. 133 install_name.append('-i') 134 install_name.append(url) 135 install_name.append(packagename) 136 137 elif packagename: 138 # use pypi 139 install_name.append(packagename) 140 141 command = 'python -m pip install'.split() 142 command.extend(install_name) 143 ret = {'command': ' '.join(command)} 144 success = False 145 # execute pypi 146 try: 147 result = subprocess.check_output(command, cwd=settings.BASE_DIR.parent) 148 ret['result'] = str(result, 'utf-8') 149 ret['success'] = True 150 success = True 151 except subprocess.CalledProcessError as error: # pragma: no cover 152 ret['result'] = str(error.output, 'utf-8') 153 ret['error'] = True 154 155 # save plugin to plugin_file if installed successfull 156 if success: 157 with open(settings.PLUGIN_FILE, "a") as plugin_file: 158 plugin_file.write(f'{" ".join(install_name)} # Installed {timezone.now()} by {str(self.context["request"].user)}\n') 159 160 # Check for migrations 161 offload_task(check_for_migrations, worker=True) 162 163 return ret 164 165 166 class PluginConfigEmptySerializer(serializers.Serializer): 167 """Serializer for a PluginConfig.""" 168 169 170 class PluginSettingSerializer(GenericReferencedSettingSerializer): 171 """Serializer for the PluginSetting model.""" 172 173 MODEL = PluginSetting 174 EXTRA_FIELDS = [ 175 'plugin', 176 ] 177 178 plugin = serializers.CharField(source='plugin.key', read_only=True) 179 180 181 class NotificationUserSettingSerializer(GenericReferencedSettingSerializer): 182 """Serializer for the PluginSetting model.""" 183 184 MODEL = NotificationUserSetting 185 EXTRA_FIELDS = ['method', ] 186 187 method = serializers.CharField(read_only=True) 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/InvenTree/plugin/serializers.py b/InvenTree/plugin/serializers.py --- a/InvenTree/plugin/serializers.py +++ b/InvenTree/plugin/serializers.py @@ -154,7 +154,13 @@ # save plugin to plugin_file if installed successfull if success: + # Read content of plugin file + plg_lines = open(settings.PLUGIN_FILE).readlines() with open(settings.PLUGIN_FILE, "a") as plugin_file: + # Check if last line has a newline + if plg_lines[-1][-1:] != '\n': + plugin_file.write('\n') + # Write new plugin to file plugin_file.write(f'{" ".join(install_name)} # Installed {timezone.now()} by {str(self.context["request"].user)}\n') # Check for migrations
{"golden_diff": "diff --git a/InvenTree/plugin/serializers.py b/InvenTree/plugin/serializers.py\n--- a/InvenTree/plugin/serializers.py\n+++ b/InvenTree/plugin/serializers.py\n@@ -154,7 +154,13 @@\n \n # save plugin to plugin_file if installed successfull\n if success:\n+ # Read content of plugin file\n+ plg_lines = open(settings.PLUGIN_FILE).readlines()\n with open(settings.PLUGIN_FILE, \"a\") as plugin_file:\n+ # Check if last line has a newline\n+ if plg_lines[-1][-1:] != '\\n':\n+ plugin_file.write('\\n')\n+ # Write new plugin to file\n plugin_file.write(f'{\" \".join(install_name)} # Installed {timezone.now()} by {str(self.context[\"request\"].user)}\\n')\n \n # Check for migrations\n", "issue": "\"Install Plugin\" via GUI fails to add plugin to plugins.txt correctly for first plugin\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find a similar issue\n\n### Describe the bug*\n\nWhen trying to install a plugin via the GUI function \"Install Plugin\" without any prior installed plugins, the plugin is appended to the first line of the plugins.txt.\r\n\r\nAs the first line is already populated with `# InvenTree Plugins (uses PIP framework to install)` the result is e.g. `# InvenTree Plugins (uses PIP framework to install)inventree-zebra-plugin@git+https://github.com/SergeoLacruz/inventree-zebra-plugin # Installed 2023-04-28 06:55:40.146423+00:00 by xyz` thus ignoring the plugin in a docker env restart because of the `#`.\r\n\r\nSubsequently installed plugins are correctly placed in an empty line of the plugins.txt.\n\n### Steps to Reproduce\n\n1. Have a fresh install without installed plugins.\r\n2. Go to \"Settings\" > \"Plugin Settings\" > \"Plugins\" > \"Install Plugin\"\r\n3. Fill \"Package Name\" with `inventree-zebra-plugin`\r\n4. Fill \"Source URL\" with `git+https://github.com/SergeoLacruz/inventree-zebra-plugin`\r\n5. \"Submit\"\r\n6. Check plugins.txt file\n\n### Expected behaviour\n\nThe installed plugin should be added below the comment such that it actually will be parsed by the plugin management.\n\n### Deployment Method\n\n- [X] Docker\n- [ ] Bare metal\n\n### Version Information\n\n# Version Information:\r\nInvenTree-Version: 0.11.0\r\nDjango Version: 3.2.18\r\nCommit Hash: 593a716\r\nCommit Date: 2023-04-18\r\nDatabase: postgresql\r\nDebug-Mode: False\r\nDeployed using Docker: True\r\nActive plugins: [{'name': 'InvenTreeBarcode', 'slug': 'inventreebarcode', 'version': '2.0.0'}, {'name': 'InvenTreeCoreNotificationsPlugin', 'slug': 'inventreecorenotificationsplugin', 'version': '1.0.0'}]\r\n\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "\"\"\"JSON serializers for plugin app.\"\"\"\n\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework import serializers\n\nfrom common.serializers import GenericReferencedSettingSerializer\nfrom InvenTree.tasks import check_for_migrations, offload_task\nfrom plugin.models import NotificationUserSetting, PluginConfig, PluginSetting\n\n\nclass MetadataSerializer(serializers.ModelSerializer):\n \"\"\"Serializer class for model metadata API access.\"\"\"\n\n metadata = serializers.JSONField(required=True)\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n fields = [\n 'metadata',\n ]\n\n def __init__(self, model_type, *args, **kwargs):\n \"\"\"Initialize the metadata serializer with information on the model type\"\"\"\n self.Meta.model = model_type\n super().__init__(*args, **kwargs)\n\n def update(self, instance, data):\n \"\"\"Perform update on the metadata field:\n\n - If this is a partial (PATCH) update, try to 'merge' data in\n - Else, if it is a PUT update, overwrite any existing metadata\n \"\"\"\n if self.partial:\n # Default behaviour is to \"merge\" new data in\n metadata = instance.metadata.copy() if instance.metadata else {}\n metadata.update(data['metadata'])\n data['metadata'] = metadata\n\n return super().update(instance, data)\n\n\nclass PluginConfigSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for a PluginConfig.\"\"\"\n\n class Meta:\n \"\"\"Meta for serializer.\"\"\"\n model = PluginConfig\n fields = [\n 'key',\n 'name',\n 'active',\n 'meta',\n 'mixins',\n ]\n\n meta = serializers.DictField(read_only=True)\n mixins = serializers.DictField(read_only=True)\n\n\nclass PluginConfigInstallSerializer(serializers.Serializer):\n \"\"\"Serializer for installing a new plugin.\"\"\"\n\n class Meta:\n \"\"\"Meta for serializer.\"\"\"\n fields = [\n 'url',\n 'packagename',\n 'confirm',\n ]\n\n url = serializers.CharField(\n required=False,\n allow_blank=True,\n label=_('Source URL'),\n help_text=_('Source for the package - this can be a custom registry or a VCS path')\n )\n packagename = serializers.CharField(\n required=False,\n allow_blank=True,\n label=_('Package Name'),\n help_text=_('Name for the Plugin Package - can also contain a version indicator'),\n )\n confirm = serializers.BooleanField(\n label=_('Confirm plugin installation'),\n help_text=_('This will install this plugin now into the current instance. The instance will go into maintenance.')\n )\n\n def validate(self, data):\n \"\"\"Validate inputs.\n\n Make sure both confirm and url are provided.\n \"\"\"\n super().validate(data)\n\n # check the base requirements are met\n if not data.get('confirm'):\n raise ValidationError({'confirm': _('Installation not confirmed')})\n if (not data.get('url')) and (not data.get('packagename')):\n msg = _('Either packagename of URL must be provided')\n raise ValidationError({'url': msg, 'packagename': msg})\n\n return data\n\n def save(self):\n \"\"\"Install a plugin from a package registry and set operational results as instance data.\"\"\"\n data = self.validated_data\n\n packagename = data.get('packagename', '')\n url = data.get('url', '')\n\n # build up the command\n install_name = []\n\n if url:\n # use custom registration / VCS\n if True in [identifier in url for identifier in ['git+https', 'hg+https', 'svn+svn', ]]:\n # using a VCS provider\n if packagename:\n install_name.append(f'{packagename}@{url}')\n else:\n install_name.append(url)\n else: # pragma: no cover\n # using a custom package repositories\n # This is only for pypa compliant directory services (all current are tested above)\n # and not covered by tests.\n install_name.append('-i')\n install_name.append(url)\n install_name.append(packagename)\n\n elif packagename:\n # use pypi\n install_name.append(packagename)\n\n command = 'python -m pip install'.split()\n command.extend(install_name)\n ret = {'command': ' '.join(command)}\n success = False\n # execute pypi\n try:\n result = subprocess.check_output(command, cwd=settings.BASE_DIR.parent)\n ret['result'] = str(result, 'utf-8')\n ret['success'] = True\n success = True\n except subprocess.CalledProcessError as error: # pragma: no cover\n ret['result'] = str(error.output, 'utf-8')\n ret['error'] = True\n\n # save plugin to plugin_file if installed successfull\n if success:\n with open(settings.PLUGIN_FILE, \"a\") as plugin_file:\n plugin_file.write(f'{\" \".join(install_name)} # Installed {timezone.now()} by {str(self.context[\"request\"].user)}\\n')\n\n # Check for migrations\n offload_task(check_for_migrations, worker=True)\n\n return ret\n\n\nclass PluginConfigEmptySerializer(serializers.Serializer):\n \"\"\"Serializer for a PluginConfig.\"\"\"\n\n\nclass PluginSettingSerializer(GenericReferencedSettingSerializer):\n \"\"\"Serializer for the PluginSetting model.\"\"\"\n\n MODEL = PluginSetting\n EXTRA_FIELDS = [\n 'plugin',\n ]\n\n plugin = serializers.CharField(source='plugin.key', read_only=True)\n\n\nclass NotificationUserSettingSerializer(GenericReferencedSettingSerializer):\n \"\"\"Serializer for the PluginSetting model.\"\"\"\n\n MODEL = NotificationUserSetting\n EXTRA_FIELDS = ['method', ]\n\n method = serializers.CharField(read_only=True)\n", "path": "InvenTree/plugin/serializers.py"}], "after_files": [{"content": "\"\"\"JSON serializers for plugin app.\"\"\"\n\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework import serializers\n\nfrom common.serializers import GenericReferencedSettingSerializer\nfrom InvenTree.tasks import check_for_migrations, offload_task\nfrom plugin.models import NotificationUserSetting, PluginConfig, PluginSetting\n\n\nclass MetadataSerializer(serializers.ModelSerializer):\n \"\"\"Serializer class for model metadata API access.\"\"\"\n\n metadata = serializers.JSONField(required=True)\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n fields = [\n 'metadata',\n ]\n\n def __init__(self, model_type, *args, **kwargs):\n \"\"\"Initialize the metadata serializer with information on the model type\"\"\"\n self.Meta.model = model_type\n super().__init__(*args, **kwargs)\n\n def update(self, instance, data):\n \"\"\"Perform update on the metadata field:\n\n - If this is a partial (PATCH) update, try to 'merge' data in\n - Else, if it is a PUT update, overwrite any existing metadata\n \"\"\"\n if self.partial:\n # Default behaviour is to \"merge\" new data in\n metadata = instance.metadata.copy() if instance.metadata else {}\n metadata.update(data['metadata'])\n data['metadata'] = metadata\n\n return super().update(instance, data)\n\n\nclass PluginConfigSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for a PluginConfig.\"\"\"\n\n class Meta:\n \"\"\"Meta for serializer.\"\"\"\n model = PluginConfig\n fields = [\n 'key',\n 'name',\n 'active',\n 'meta',\n 'mixins',\n ]\n\n meta = serializers.DictField(read_only=True)\n mixins = serializers.DictField(read_only=True)\n\n\nclass PluginConfigInstallSerializer(serializers.Serializer):\n \"\"\"Serializer for installing a new plugin.\"\"\"\n\n class Meta:\n \"\"\"Meta for serializer.\"\"\"\n fields = [\n 'url',\n 'packagename',\n 'confirm',\n ]\n\n url = serializers.CharField(\n required=False,\n allow_blank=True,\n label=_('Source URL'),\n help_text=_('Source for the package - this can be a custom registry or a VCS path')\n )\n packagename = serializers.CharField(\n required=False,\n allow_blank=True,\n label=_('Package Name'),\n help_text=_('Name for the Plugin Package - can also contain a version indicator'),\n )\n confirm = serializers.BooleanField(\n label=_('Confirm plugin installation'),\n help_text=_('This will install this plugin now into the current instance. The instance will go into maintenance.')\n )\n\n def validate(self, data):\n \"\"\"Validate inputs.\n\n Make sure both confirm and url are provided.\n \"\"\"\n super().validate(data)\n\n # check the base requirements are met\n if not data.get('confirm'):\n raise ValidationError({'confirm': _('Installation not confirmed')})\n if (not data.get('url')) and (not data.get('packagename')):\n msg = _('Either packagename of URL must be provided')\n raise ValidationError({'url': msg, 'packagename': msg})\n\n return data\n\n def save(self):\n \"\"\"Install a plugin from a package registry and set operational results as instance data.\"\"\"\n data = self.validated_data\n\n packagename = data.get('packagename', '')\n url = data.get('url', '')\n\n # build up the command\n install_name = []\n\n if url:\n # use custom registration / VCS\n if True in [identifier in url for identifier in ['git+https', 'hg+https', 'svn+svn', ]]:\n # using a VCS provider\n if packagename:\n install_name.append(f'{packagename}@{url}')\n else:\n install_name.append(url)\n else: # pragma: no cover\n # using a custom package repositories\n # This is only for pypa compliant directory services (all current are tested above)\n # and not covered by tests.\n install_name.append('-i')\n install_name.append(url)\n install_name.append(packagename)\n\n elif packagename:\n # use pypi\n install_name.append(packagename)\n\n command = 'python -m pip install'.split()\n command.extend(install_name)\n ret = {'command': ' '.join(command)}\n success = False\n # execute pypi\n try:\n result = subprocess.check_output(command, cwd=settings.BASE_DIR.parent)\n ret['result'] = str(result, 'utf-8')\n ret['success'] = True\n success = True\n except subprocess.CalledProcessError as error: # pragma: no cover\n ret['result'] = str(error.output, 'utf-8')\n ret['error'] = True\n\n # save plugin to plugin_file if installed successfull\n if success:\n # Read content of plugin file\n plg_lines = open(settings.PLUGIN_FILE).readlines()\n with open(settings.PLUGIN_FILE, \"a\") as plugin_file:\n # Check if last line has a newline\n if plg_lines[-1][-1:] != '\\n':\n plugin_file.write('\\n')\n # Write new plugin to file\n plugin_file.write(f'{\" \".join(install_name)} # Installed {timezone.now()} by {str(self.context[\"request\"].user)}\\n')\n\n # Check for migrations\n offload_task(check_for_migrations, worker=True)\n\n return ret\n\n\nclass PluginConfigEmptySerializer(serializers.Serializer):\n \"\"\"Serializer for a PluginConfig.\"\"\"\n\n\nclass PluginSettingSerializer(GenericReferencedSettingSerializer):\n \"\"\"Serializer for the PluginSetting model.\"\"\"\n\n MODEL = PluginSetting\n EXTRA_FIELDS = [\n 'plugin',\n ]\n\n plugin = serializers.CharField(source='plugin.key', read_only=True)\n\n\nclass NotificationUserSettingSerializer(GenericReferencedSettingSerializer):\n \"\"\"Serializer for the PluginSetting model.\"\"\"\n\n MODEL = NotificationUserSetting\n EXTRA_FIELDS = ['method', ]\n\n method = serializers.CharField(read_only=True)\n", "path": "InvenTree/plugin/serializers.py"}]}
2,468
202
gh_patches_debug_31192
rasdani/github-patches
git_diff
meltano__meltano-6118
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Verify `meltano install` telemetry fires for malformed plugin entries In #6109 @pnadolny13 noticed that with the following entry no events where fired: ```yaml - name: tap-something-invalid variant: meltanolabs pip_url: git+https://github.com/foo/tap-something-invalid.git ``` I wasn't able to reproduce that at the time and did see two events (started/aborted) come across. We should double check though, its entirely possible that my local dev setup had a seperate issue that was triggering the `aborted` event. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/cli/install.py` Content: ``` 1 """CLI command `meltano install`.""" 2 from __future__ import annotations 3 4 import click 5 6 from meltano.core.legacy_tracking import LegacyTracker 7 from meltano.core.plugin import PluginType 8 from meltano.core.plugin.error import PluginNotFoundError 9 from meltano.core.project_plugins_service import ProjectPluginsService 10 from meltano.core.tracking import PluginsTrackingContext, Tracker 11 from meltano.core.tracking import cli as cli_tracking 12 from meltano.core.tracking import cli_context_builder 13 14 from . import cli 15 from .params import pass_project 16 from .utils import CliError, install_plugins 17 18 19 @cli.command(short_help="Install project dependencies.") 20 @click.argument( 21 "plugin_type", type=click.Choice(PluginType.cli_arguments()), required=False 22 ) 23 @click.argument("plugin_name", nargs=-1, required=False) 24 @click.option( 25 "--clean", 26 is_flag=True, 27 help="Completely reinstall a plugin rather than simply upgrading if necessary.", 28 ) 29 @click.option( 30 "--parallelism", 31 "-p", 32 type=click.INT, 33 default=None, 34 help="Limit the number of plugins to install in parallel. Defaults to the number of cores.", 35 ) 36 @pass_project(migrate=True) 37 def install(project, plugin_type, plugin_name, clean, parallelism): 38 """ 39 Install all the dependencies of your project based on the meltano.yml file. 40 41 \b\nRead more at https://www.meltano.com/docs/command-line-interface.html#install 42 """ 43 tracker = Tracker(project) 44 tracker.add_contexts( 45 cli_context_builder( 46 "install", 47 None, 48 clean=clean, 49 parallelism=parallelism, 50 ) 51 ) 52 53 plugins_service = ProjectPluginsService(project) 54 55 if plugin_type: 56 try: 57 plugin_type = PluginType.from_cli_argument(plugin_type) 58 except ValueError: 59 # if we fail because plugin_type is not valid we have no plugins to instrument 60 tracker.track_command_event(cli_tracking.STARTED) 61 tracker.track_command_event(cli_tracking.ABORTED) 62 raise 63 plugins = plugins_service.get_plugins_of_type(plugin_type) 64 if plugin_name: 65 plugins = [plugin for plugin in plugins if plugin.name in plugin_name] 66 else: 67 try: 68 plugins = list(plugins_service.plugins()) 69 except PluginNotFoundError: 70 tracker.track_command_event(cli_tracking.STARTED) 71 tracker.track_command_event(cli_tracking.ABORTED) 72 raise 73 74 click.echo(f"Installing {len(plugins)} plugins...") 75 tracker.add_contexts( 76 PluginsTrackingContext([(candidate, None) for candidate in plugins]) 77 ) 78 tracker.track_command_event(cli_tracking.STARTED) 79 80 success = install_plugins(project, plugins, parallelism=parallelism, clean=clean) 81 82 legacy_tracker = LegacyTracker(project) 83 legacy_tracker.track_meltano_install() 84 85 if not success: 86 tracker.track_command_event(cli_tracking.FAILED) 87 raise CliError("Failed to install plugin(s)") 88 tracker.track_command_event(cli_tracking.COMPLETED) 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py --- a/src/meltano/cli/install.py +++ b/src/meltano/cli/install.py @@ -5,7 +5,6 @@ from meltano.core.legacy_tracking import LegacyTracker from meltano.core.plugin import PluginType -from meltano.core.plugin.error import PluginNotFoundError from meltano.core.project_plugins_service import ProjectPluginsService from meltano.core.tracking import PluginsTrackingContext, Tracker from meltano.core.tracking import cli as cli_tracking @@ -52,24 +51,18 @@ plugins_service = ProjectPluginsService(project) - if plugin_type: - try: + try: + if plugin_type: plugin_type = PluginType.from_cli_argument(plugin_type) - except ValueError: - # if we fail because plugin_type is not valid we have no plugins to instrument - tracker.track_command_event(cli_tracking.STARTED) - tracker.track_command_event(cli_tracking.ABORTED) - raise - plugins = plugins_service.get_plugins_of_type(plugin_type) - if plugin_name: - plugins = [plugin for plugin in plugins if plugin.name in plugin_name] - else: - try: + plugins = plugins_service.get_plugins_of_type(plugin_type) + if plugin_name: + plugins = [plugin for plugin in plugins if plugin.name in plugin_name] + else: plugins = list(plugins_service.plugins()) - except PluginNotFoundError: - tracker.track_command_event(cli_tracking.STARTED) - tracker.track_command_event(cli_tracking.ABORTED) - raise + except Exception: + tracker.track_command_event(cli_tracking.STARTED) + tracker.track_command_event(cli_tracking.ABORTED) + raise click.echo(f"Installing {len(plugins)} plugins...") tracker.add_contexts(
{"golden_diff": "diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py\n--- a/src/meltano/cli/install.py\n+++ b/src/meltano/cli/install.py\n@@ -5,7 +5,6 @@\n \n from meltano.core.legacy_tracking import LegacyTracker\n from meltano.core.plugin import PluginType\n-from meltano.core.plugin.error import PluginNotFoundError\n from meltano.core.project_plugins_service import ProjectPluginsService\n from meltano.core.tracking import PluginsTrackingContext, Tracker\n from meltano.core.tracking import cli as cli_tracking\n@@ -52,24 +51,18 @@\n \n plugins_service = ProjectPluginsService(project)\n \n- if plugin_type:\n- try:\n+ try:\n+ if plugin_type:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n- except ValueError:\n- # if we fail because plugin_type is not valid we have no plugins to instrument\n- tracker.track_command_event(cli_tracking.STARTED)\n- tracker.track_command_event(cli_tracking.ABORTED)\n- raise\n- plugins = plugins_service.get_plugins_of_type(plugin_type)\n- if plugin_name:\n- plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n- else:\n- try:\n+ plugins = plugins_service.get_plugins_of_type(plugin_type)\n+ if plugin_name:\n+ plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n+ else:\n plugins = list(plugins_service.plugins())\n- except PluginNotFoundError:\n- tracker.track_command_event(cli_tracking.STARTED)\n- tracker.track_command_event(cli_tracking.ABORTED)\n- raise\n+ except Exception:\n+ tracker.track_command_event(cli_tracking.STARTED)\n+ tracker.track_command_event(cli_tracking.ABORTED)\n+ raise\n \n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n", "issue": "Verify `meltano install` telemetry fires for malformed plugin entries\nIn #6109 @pnadolny13 noticed that with the following entry no events where fired:\r\n\r\n```yaml\r\n - name: tap-something-invalid\r\n variant: meltanolabs\r\n pip_url: git+https://github.com/foo/tap-something-invalid.git\r\n```\r\n\r\nI wasn't able to reproduce that at the time and did see two events (started/aborted) come across. We should double check though, its entirely possible that my local dev setup had a seperate issue that was triggering the `aborted` event.\n", "before_files": [{"content": "\"\"\"CLI command `meltano install`.\"\"\"\nfrom __future__ import annotations\n\nimport click\n\nfrom meltano.core.legacy_tracking import LegacyTracker\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.error import PluginNotFoundError\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.tracking import PluginsTrackingContext, Tracker\nfrom meltano.core.tracking import cli as cli_tracking\nfrom meltano.core.tracking import cli_context_builder\n\nfrom . import cli\nfrom .params import pass_project\nfrom .utils import CliError, install_plugins\n\n\n@cli.command(short_help=\"Install project dependencies.\")\n@click.argument(\n \"plugin_type\", type=click.Choice(PluginType.cli_arguments()), required=False\n)\n@click.argument(\"plugin_name\", nargs=-1, required=False)\n@click.option(\n \"--clean\",\n is_flag=True,\n help=\"Completely reinstall a plugin rather than simply upgrading if necessary.\",\n)\n@click.option(\n \"--parallelism\",\n \"-p\",\n type=click.INT,\n default=None,\n help=\"Limit the number of plugins to install in parallel. Defaults to the number of cores.\",\n)\n@pass_project(migrate=True)\ndef install(project, plugin_type, plugin_name, clean, parallelism):\n \"\"\"\n Install all the dependencies of your project based on the meltano.yml file.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html#install\n \"\"\"\n tracker = Tracker(project)\n tracker.add_contexts(\n cli_context_builder(\n \"install\",\n None,\n clean=clean,\n parallelism=parallelism,\n )\n )\n\n plugins_service = ProjectPluginsService(project)\n\n if plugin_type:\n try:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n except ValueError:\n # if we fail because plugin_type is not valid we have no plugins to instrument\n tracker.track_command_event(cli_tracking.STARTED)\n tracker.track_command_event(cli_tracking.ABORTED)\n raise\n plugins = plugins_service.get_plugins_of_type(plugin_type)\n if plugin_name:\n plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n else:\n try:\n plugins = list(plugins_service.plugins())\n except PluginNotFoundError:\n tracker.track_command_event(cli_tracking.STARTED)\n tracker.track_command_event(cli_tracking.ABORTED)\n raise\n\n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins])\n )\n tracker.track_command_event(cli_tracking.STARTED)\n\n success = install_plugins(project, plugins, parallelism=parallelism, clean=clean)\n\n legacy_tracker = LegacyTracker(project)\n legacy_tracker.track_meltano_install()\n\n if not success:\n tracker.track_command_event(cli_tracking.FAILED)\n raise CliError(\"Failed to install plugin(s)\")\n tracker.track_command_event(cli_tracking.COMPLETED)\n", "path": "src/meltano/cli/install.py"}], "after_files": [{"content": "\"\"\"CLI command `meltano install`.\"\"\"\nfrom __future__ import annotations\n\nimport click\n\nfrom meltano.core.legacy_tracking import LegacyTracker\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.project_plugins_service import ProjectPluginsService\nfrom meltano.core.tracking import PluginsTrackingContext, Tracker\nfrom meltano.core.tracking import cli as cli_tracking\nfrom meltano.core.tracking import cli_context_builder\n\nfrom . import cli\nfrom .params import pass_project\nfrom .utils import CliError, install_plugins\n\n\n@cli.command(short_help=\"Install project dependencies.\")\n@click.argument(\n \"plugin_type\", type=click.Choice(PluginType.cli_arguments()), required=False\n)\n@click.argument(\"plugin_name\", nargs=-1, required=False)\n@click.option(\n \"--clean\",\n is_flag=True,\n help=\"Completely reinstall a plugin rather than simply upgrading if necessary.\",\n)\n@click.option(\n \"--parallelism\",\n \"-p\",\n type=click.INT,\n default=None,\n help=\"Limit the number of plugins to install in parallel. Defaults to the number of cores.\",\n)\n@pass_project(migrate=True)\ndef install(project, plugin_type, plugin_name, clean, parallelism):\n \"\"\"\n Install all the dependencies of your project based on the meltano.yml file.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html#install\n \"\"\"\n tracker = Tracker(project)\n tracker.add_contexts(\n cli_context_builder(\n \"install\",\n None,\n clean=clean,\n parallelism=parallelism,\n )\n )\n\n plugins_service = ProjectPluginsService(project)\n\n try:\n if plugin_type:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n plugins = plugins_service.get_plugins_of_type(plugin_type)\n if plugin_name:\n plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n else:\n plugins = list(plugins_service.plugins())\n except Exception:\n tracker.track_command_event(cli_tracking.STARTED)\n tracker.track_command_event(cli_tracking.ABORTED)\n raise\n\n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins])\n )\n tracker.track_command_event(cli_tracking.STARTED)\n\n success = install_plugins(project, plugins, parallelism=parallelism, clean=clean)\n\n legacy_tracker = LegacyTracker(project)\n legacy_tracker.track_meltano_install()\n\n if not success:\n tracker.track_command_event(cli_tracking.FAILED)\n raise CliError(\"Failed to install plugin(s)\")\n tracker.track_command_event(cli_tracking.COMPLETED)\n", "path": "src/meltano/cli/install.py"}]}
1,187
406
gh_patches_debug_5622
rasdani/github-patches
git_diff
obspy__obspy-3127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IRIS federator routing client does not match https:// URLs — affects RASPISHAKE ### Avoid duplicates - [X] I searched existing issues ### Bug Summary I noticed that the `RoutingClient('iris-federator')` was never returning stations / data from the RASPISHAKE data center. Digging in, I found this line in the **federator** `_split_routing_response()` method: https://github.com/obspy/obspy/blob/9659cf3e4499fe15fa71bd0bd3a9600aca7f180e/obspy/clients/fdsn/routing/federator_routing_client.py#L167 Compare to this line in the **eidaws** `_split_routing_response()` method: https://github.com/obspy/obspy/blob/9659cf3e4499fe15fa71bd0bd3a9600aca7f180e/obspy/clients/fdsn/routing/eidaws_routing_client.py#L195 Note the difference is that the **federator** ignores `https://` URLs. Well, RASPISHAKE's URL is `https://`, see [here](http://service.iris.edu/irisws/fedcatalog/1/query?net=AM&sta=RAF63&format=request&includeoverlaps=false&nodata=404): > ``` > DATASELECTSERVICE=https://fdsnws.raspberryshakedata.com/fdsnws/dataselect/1/ > STATIONSERVICE=https://fdsnws.raspberryshakedata.com/fdsnws/station/1/ > ``` ### Possible fix? I imagine this is a quick fix, changing `"http://"` to `"http"` on line 167 to match how the **eidaws** URLs are handled. If this is indeed the case, I'm happy to make a PR. ### Code to Reproduce ```python from obspy.clients.fdsn import Client, RoutingClient kwargs = dict(network='AM', station='RAF63') Client('RASPISHAKE').get_stations(**kwargs) # GOOD: Returns Inventory with "Sending institution: SeisComP (RaspberryShake)" RoutingClient('iris-federator').get_stations(**kwargs) # BAD: Raises FDSNNoDataException, see attached error traceback ``` ### Error Traceback ```Python traceback --------------------------------------------------------------------------- FDSNNoDataException Traceback (most recent call last) Input In [7], in <cell line: 1>() ----> 1 RoutingClient('iris-federator').get_stations(**kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw) 230 if not kwsyntax: 231 args, kw = fix(args, kw, sig) --> 232 return caller(func, *(extras + args), **kw) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs) 76 if "filename" in kwargs: 77 raise ValueError("The `filename` argument is not supported") ---> 78 return f(*args, **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py:105, in FederatorRoutingClient.get_stations(self, **kwargs) 88 @_assert_filename_not_in_kwargs 89 def get_stations(self, **kwargs): 90 """ 91 Get stations from multiple data centers. 92 (...) 103 for details. 104 """ --> 105 return super(FederatorRoutingClient, self).get_stations(**kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw) 230 if not kwsyntax: 231 args, kw = fix(args, kw, sig) --> 232 return caller(func, *(extras + args), **kw) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs) 76 if "filename" in kwargs: 77 raise ValueError("The `filename` argument is not supported") ---> 78 return f(*args, **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:367, in BaseRoutingClient.get_stations(self, **kwargs) 363 # Just pass these to the bulk request. 364 bulk = [kwargs.pop(key, '*') for key in ( 365 "network", "station", "location", "channel", "starttime", 366 "endtime")] --> 367 return self.get_stations_bulk([bulk], **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw) 230 if not kwsyntax: 231 args, kw = fix(args, kw, sig) --> 232 return caller(func, *(extras + args), **kw) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs) 76 if "filename" in kwargs: 77 raise ValueError("The `filename` argument is not supported") ---> 78 return f(*args, **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py:142, in FederatorRoutingClient.get_stations_bulk(self, bulk, **kwargs) 138 r = self._download(self._url + "/query", data=bulk_str) 139 split = self._split_routing_response( 140 r.content.decode() if hasattr(r.content, "decode") else r.content, 141 service="station") --> 142 return self._download_stations(split, **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:238, in BaseRoutingClient._download_stations(self, split, **kwargs) 237 def _download_stations(self, split, **kwargs): --> 238 return self._download_parallel(split, data_type="station", **kwargs) File ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:245, in BaseRoutingClient._download_parallel(self, split, data_type, **kwargs) 242 split = self._filter_requests(split) 244 if not split: --> 245 raise FDSNNoDataException( 246 "Nothing remains to download after the provider " 247 "inclusion/exclusion filters have been applied.") 249 if data_type not in ["waveform", "station"]: # pragma: no cover 250 raise ValueError("Invalid data type.") FDSNNoDataException: Nothing remains to download after the provider inclusion/exclusion filters have been applied. ``` ### ObsPy Version? 1.3.0 ### Operating System? macOS ### Python Version? 3.9.12 ### Installation Method? conda --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `obspy/clients/fdsn/routing/federator_routing_client.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Routing client for the IRIS federator routing service. 4 5 :copyright: 6 The ObsPy Development Team (devs@obspy.org) 7 Celso G Reyes, 2017 8 IRIS-DMC 9 :license: 10 GNU Lesser General Public License, Version 3 11 (https://www.gnu.org/copyleft/lesser.html) 12 """ 13 import collections 14 15 from ..client import get_bulk_string 16 from .routing_client import ( 17 BaseRoutingClient, _assert_filename_not_in_kwargs, 18 _assert_attach_response_not_in_kwargs) 19 20 21 class FederatorRoutingClient(BaseRoutingClient): 22 def __init__(self, url="http://service.iris.edu/irisws/fedcatalog/1", 23 include_providers=None, exclude_providers=None, 24 debug=False, timeout=120, **kwargs): 25 """ 26 Initialize a federated routing client. 27 28 All parameters except ``url`` are passed on to the 29 :class:`~obspy.clients.fdsn.routing.routing_client.BaseRoutingClient` 30 parent class 31 32 :param url: The URL of the routing service. 33 :type url: str 34 """ 35 BaseRoutingClient.__init__(self, debug=debug, timeout=timeout, 36 include_providers=include_providers, 37 exclude_providers=exclude_providers, 38 **kwargs) 39 self._url = url 40 41 # Parameters the routing service can work with. If this becomes a 42 # standardized service and more implementations show up we should also 43 # parse the WADL files - but right now this would just complicate 44 # things. 45 self.kwargs_of_interest = [ 46 "includeoverlaps", "level", "network", "station", "channel", 47 "location", "starttime", "endtime", "startbefore", "endbefore", 48 "startafter", "endafter", "minlatitude", "maxlatitude", 49 "minlongitude", "maxlongitude", "latitude", "longitude", 50 "minradius", "maxradius"] 51 52 @_assert_attach_response_not_in_kwargs 53 @_assert_filename_not_in_kwargs 54 def get_waveforms_bulk(self, bulk, **kwargs): 55 """ 56 Get waveforms from multiple data centers. 57 58 It will pass on most parameters to the federated routing service. 59 They will also be passed on to the individual FDSNWS implementations 60 if a service supports them. 61 62 The ``filename`` and ``attach_response`` parameters of the single 63 provider FDSN client are not supported. 64 65 This can route on a number of different parameters, please see the 66 web site of the 67 `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_ 68 for details. 69 """ 70 bulk_params = ["network", "station", "location", "channel", 71 "starttime", "endtime"] 72 for _i in bulk_params: 73 if _i in kwargs: 74 raise ValueError("`%s` must not be part of the optional " 75 "parameters in a bulk request." % _i) 76 77 params = {k: str(kwargs[k]) 78 for k in self.kwargs_of_interest if k in kwargs} 79 params["format"] = "request" 80 81 bulk_str = get_bulk_string(bulk, params) 82 r = self._download(self._url + "/query", data=bulk_str) 83 split = self._split_routing_response( 84 r.content.decode() if hasattr(r.content, "decode") else r.content, 85 service="dataselect") 86 return self._download_waveforms(split, **kwargs) 87 88 @_assert_filename_not_in_kwargs 89 def get_stations(self, **kwargs): 90 """ 91 Get stations from multiple data centers. 92 93 It will pass on most parameters to the federated routing service. 94 They will also be passed on to the individual FDSNWS implementations 95 if a service supports them. 96 97 The ``filename`` parameter of the single provider FDSN client is not 98 supported. 99 100 This can route on a number of different parameters, please see the 101 web site of the 102 `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_ 103 for details. 104 """ 105 return super(FederatorRoutingClient, self).get_stations(**kwargs) 106 107 @_assert_filename_not_in_kwargs 108 def get_stations_bulk(self, bulk, **kwargs): 109 """ 110 Get stations from multiple data centers. 111 112 It will pass on most parameters to the federated routing service. 113 They will also be passed on to the individual FDSNWS implementations 114 if a service supports them. 115 116 The ``filename`` parameter of the single provider FDSN client is not 117 supported. 118 119 This can route on a number of different parameters, please see the 120 web site of the 121 `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_ 122 for details. 123 """ 124 bulk_params = ["network", "station", "location", "channel", 125 "starttime", "endtime"] 126 for _i in bulk_params: 127 if _i in kwargs: 128 raise ValueError("`%s` must not be part of the optional " 129 "parameters in a bulk request." % _i) 130 131 params = collections.OrderedDict() 132 for k in self.kwargs_of_interest: 133 if k in kwargs: 134 params[k] = str(kwargs[k]) 135 params["format"] = "request" 136 137 bulk_str = get_bulk_string(bulk, params) 138 r = self._download(self._url + "/query", data=bulk_str) 139 split = self._split_routing_response( 140 r.content.decode() if hasattr(r.content, "decode") else r.content, 141 service="station") 142 return self._download_stations(split, **kwargs) 143 144 @staticmethod 145 def _split_routing_response(data, service): 146 """ 147 Splits the routing responses per data center for the federator output. 148 149 Returns a dictionary with the keys being the root URLs of the fdsnws 150 endpoints and the values the data payloads for that endpoint. 151 152 :param data: The return value from the EIDAWS routing service. 153 """ 154 if service.lower() == "dataselect": 155 key = "DATASELECTSERVICE" 156 elif service.lower() == "station": 157 key = "STATIONSERVICE" 158 else: 159 raise ValueError("Service must be 'dataselect' or 'station'.") 160 161 split = collections.defaultdict(list) 162 current_key = None 163 for line in data.splitlines(): 164 line = line.strip() 165 if not line: 166 continue 167 if "http://" in line: 168 if key not in line: 169 continue 170 current_key = line[len(key) + 1:line.rfind("/fdsnws")] 171 continue 172 # Anything before the first data center can be ignored. 173 if current_key is None: 174 continue 175 split[current_key].append(line) 176 177 return {k: "\n".join(v) for k, v in split.items()} 178 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/obspy/clients/fdsn/routing/federator_routing_client.py b/obspy/clients/fdsn/routing/federator_routing_client.py --- a/obspy/clients/fdsn/routing/federator_routing_client.py +++ b/obspy/clients/fdsn/routing/federator_routing_client.py @@ -164,7 +164,7 @@ line = line.strip() if not line: continue - if "http://" in line: + if "http://" in line or "https://" in line: if key not in line: continue current_key = line[len(key) + 1:line.rfind("/fdsnws")]
{"golden_diff": "diff --git a/obspy/clients/fdsn/routing/federator_routing_client.py b/obspy/clients/fdsn/routing/federator_routing_client.py\n--- a/obspy/clients/fdsn/routing/federator_routing_client.py\n+++ b/obspy/clients/fdsn/routing/federator_routing_client.py\n@@ -164,7 +164,7 @@\n line = line.strip()\n if not line:\n continue\n- if \"http://\" in line:\n+ if \"http://\" in line or \"https://\" in line:\n if key not in line:\n continue\n current_key = line[len(key) + 1:line.rfind(\"/fdsnws\")]\n", "issue": "IRIS federator routing client does not match https:// URLs \u2014 affects RASPISHAKE\n### Avoid duplicates\r\n\r\n- [X] I searched existing issues\r\n\r\n### Bug Summary\r\n\r\nI noticed that the `RoutingClient('iris-federator')` was never returning stations / data from the RASPISHAKE data center. Digging in, I found this line in the **federator** `_split_routing_response()` method: https://github.com/obspy/obspy/blob/9659cf3e4499fe15fa71bd0bd3a9600aca7f180e/obspy/clients/fdsn/routing/federator_routing_client.py#L167\r\n\r\nCompare to this line in the **eidaws** `_split_routing_response()` method:\r\nhttps://github.com/obspy/obspy/blob/9659cf3e4499fe15fa71bd0bd3a9600aca7f180e/obspy/clients/fdsn/routing/eidaws_routing_client.py#L195\r\n\r\nNote the difference is that the **federator** ignores `https://` URLs. Well, RASPISHAKE's URL is `https://`, see [here](http://service.iris.edu/irisws/fedcatalog/1/query?net=AM&sta=RAF63&format=request&includeoverlaps=false&nodata=404):\r\n\r\n> ```\r\n> DATASELECTSERVICE=https://fdsnws.raspberryshakedata.com/fdsnws/dataselect/1/\r\n> STATIONSERVICE=https://fdsnws.raspberryshakedata.com/fdsnws/station/1/\r\n> ```\r\n\r\n### Possible fix?\r\n\r\nI imagine this is a quick fix, changing `\"http://\"` to `\"http\"` on line 167 to match how the **eidaws** URLs are handled. If this is indeed the case, I'm happy to make a PR.\r\n\r\n### Code to Reproduce\r\n\r\n```python\r\nfrom obspy.clients.fdsn import Client, RoutingClient\r\n\r\nkwargs = dict(network='AM', station='RAF63')\r\n\r\nClient('RASPISHAKE').get_stations(**kwargs) # GOOD: Returns Inventory with \"Sending institution: SeisComP (RaspberryShake)\"\r\nRoutingClient('iris-federator').get_stations(**kwargs) # BAD: Raises FDSNNoDataException, see attached error traceback\r\n```\r\n\r\n\r\n### Error Traceback\r\n\r\n```Python traceback\r\n---------------------------------------------------------------------------\r\nFDSNNoDataException Traceback (most recent call last)\r\nInput In [7], in <cell line: 1>()\r\n----> 1 RoutingClient('iris-federator').get_stations(**kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw)\r\n 230 if not kwsyntax:\r\n 231 args, kw = fix(args, kw, sig)\r\n--> 232 return caller(func, *(extras + args), **kw)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs)\r\n 76 if \"filename\" in kwargs:\r\n 77 raise ValueError(\"The `filename` argument is not supported\")\r\n---> 78 return f(*args, **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py:105, in FederatorRoutingClient.get_stations(self, **kwargs)\r\n 88 @_assert_filename_not_in_kwargs\r\n 89 def get_stations(self, **kwargs):\r\n 90 \"\"\"\r\n 91 Get stations from multiple data centers.\r\n 92\r\n (...)\r\n 103 for details.\r\n 104 \"\"\"\r\n--> 105 return super(FederatorRoutingClient, self).get_stations(**kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw)\r\n 230 if not kwsyntax:\r\n 231 args, kw = fix(args, kw, sig)\r\n--> 232 return caller(func, *(extras + args), **kw)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs)\r\n 76 if \"filename\" in kwargs:\r\n 77 raise ValueError(\"The `filename` argument is not supported\")\r\n---> 78 return f(*args, **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:367, in BaseRoutingClient.get_stations(self, **kwargs)\r\n 363 # Just pass these to the bulk request.\r\n 364 bulk = [kwargs.pop(key, '*') for key in (\r\n 365 \"network\", \"station\", \"location\", \"channel\", \"starttime\",\r\n 366 \"endtime\")]\r\n--> 367 return self.get_stations_bulk([bulk], **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/decorator.py:232, in decorate.<locals>.fun(*args, **kw)\r\n 230 if not kwsyntax:\r\n 231 args, kw = fix(args, kw, sig)\r\n--> 232 return caller(func, *(extras + args), **kw)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:78, in _assert_filename_not_in_kwargs(f, *args, **kwargs)\r\n 76 if \"filename\" in kwargs:\r\n 77 raise ValueError(\"The `filename` argument is not supported\")\r\n---> 78 return f(*args, **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/federator_routing_client.py:142, in FederatorRoutingClient.get_stations_bulk(self, bulk, **kwargs)\r\n 138 r = self._download(self._url + \"/query\", data=bulk_str)\r\n 139 split = self._split_routing_response(\r\n 140 r.content.decode() if hasattr(r.content, \"decode\") else r.content,\r\n 141 service=\"station\")\r\n--> 142 return self._download_stations(split, **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:238, in BaseRoutingClient._download_stations(self, split, **kwargs)\r\n 237 def _download_stations(self, split, **kwargs):\r\n--> 238 return self._download_parallel(split, data_type=\"station\", **kwargs)\r\n\r\nFile ~/miniconda3/envs/sonify/lib/python3.9/site-packages/obspy/clients/fdsn/routing/routing_client.py:245, in BaseRoutingClient._download_parallel(self, split, data_type, **kwargs)\r\n 242 split = self._filter_requests(split)\r\n 244 if not split:\r\n--> 245 raise FDSNNoDataException(\r\n 246 \"Nothing remains to download after the provider \"\r\n 247 \"inclusion/exclusion filters have been applied.\")\r\n 249 if data_type not in [\"waveform\", \"station\"]: # pragma: no cover\r\n 250 raise ValueError(\"Invalid data type.\")\r\n\r\nFDSNNoDataException: Nothing remains to download after the provider inclusion/exclusion filters have been applied.\r\n```\r\n\r\n\r\n### ObsPy Version?\r\n\r\n1.3.0\r\n\r\n### Operating System?\r\n\r\nmacOS\r\n\r\n### Python Version?\r\n\r\n3.9.12\r\n\r\n### Installation Method?\r\n\r\nconda\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nRouting client for the IRIS federator routing service.\n\n:copyright:\n The ObsPy Development Team (devs@obspy.org)\n Celso G Reyes, 2017\n IRIS-DMC\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport collections\n\nfrom ..client import get_bulk_string\nfrom .routing_client import (\n BaseRoutingClient, _assert_filename_not_in_kwargs,\n _assert_attach_response_not_in_kwargs)\n\n\nclass FederatorRoutingClient(BaseRoutingClient):\n def __init__(self, url=\"http://service.iris.edu/irisws/fedcatalog/1\",\n include_providers=None, exclude_providers=None,\n debug=False, timeout=120, **kwargs):\n \"\"\"\n Initialize a federated routing client.\n\n All parameters except ``url`` are passed on to the\n :class:`~obspy.clients.fdsn.routing.routing_client.BaseRoutingClient`\n parent class\n\n :param url: The URL of the routing service.\n :type url: str\n \"\"\"\n BaseRoutingClient.__init__(self, debug=debug, timeout=timeout,\n include_providers=include_providers,\n exclude_providers=exclude_providers,\n **kwargs)\n self._url = url\n\n # Parameters the routing service can work with. If this becomes a\n # standardized service and more implementations show up we should also\n # parse the WADL files - but right now this would just complicate\n # things.\n self.kwargs_of_interest = [\n \"includeoverlaps\", \"level\", \"network\", \"station\", \"channel\",\n \"location\", \"starttime\", \"endtime\", \"startbefore\", \"endbefore\",\n \"startafter\", \"endafter\", \"minlatitude\", \"maxlatitude\",\n \"minlongitude\", \"maxlongitude\", \"latitude\", \"longitude\",\n \"minradius\", \"maxradius\"]\n\n @_assert_attach_response_not_in_kwargs\n @_assert_filename_not_in_kwargs\n def get_waveforms_bulk(self, bulk, **kwargs):\n \"\"\"\n Get waveforms from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` and ``attach_response`` parameters of the single\n provider FDSN client are not supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n bulk_params = [\"network\", \"station\", \"location\", \"channel\",\n \"starttime\", \"endtime\"]\n for _i in bulk_params:\n if _i in kwargs:\n raise ValueError(\"`%s` must not be part of the optional \"\n \"parameters in a bulk request.\" % _i)\n\n params = {k: str(kwargs[k])\n for k in self.kwargs_of_interest if k in kwargs}\n params[\"format\"] = \"request\"\n\n bulk_str = get_bulk_string(bulk, params)\n r = self._download(self._url + \"/query\", data=bulk_str)\n split = self._split_routing_response(\n r.content.decode() if hasattr(r.content, \"decode\") else r.content,\n service=\"dataselect\")\n return self._download_waveforms(split, **kwargs)\n\n @_assert_filename_not_in_kwargs\n def get_stations(self, **kwargs):\n \"\"\"\n Get stations from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` parameter of the single provider FDSN client is not\n supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n return super(FederatorRoutingClient, self).get_stations(**kwargs)\n\n @_assert_filename_not_in_kwargs\n def get_stations_bulk(self, bulk, **kwargs):\n \"\"\"\n Get stations from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` parameter of the single provider FDSN client is not\n supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n bulk_params = [\"network\", \"station\", \"location\", \"channel\",\n \"starttime\", \"endtime\"]\n for _i in bulk_params:\n if _i in kwargs:\n raise ValueError(\"`%s` must not be part of the optional \"\n \"parameters in a bulk request.\" % _i)\n\n params = collections.OrderedDict()\n for k in self.kwargs_of_interest:\n if k in kwargs:\n params[k] = str(kwargs[k])\n params[\"format\"] = \"request\"\n\n bulk_str = get_bulk_string(bulk, params)\n r = self._download(self._url + \"/query\", data=bulk_str)\n split = self._split_routing_response(\n r.content.decode() if hasattr(r.content, \"decode\") else r.content,\n service=\"station\")\n return self._download_stations(split, **kwargs)\n\n @staticmethod\n def _split_routing_response(data, service):\n \"\"\"\n Splits the routing responses per data center for the federator output.\n\n Returns a dictionary with the keys being the root URLs of the fdsnws\n endpoints and the values the data payloads for that endpoint.\n\n :param data: The return value from the EIDAWS routing service.\n \"\"\"\n if service.lower() == \"dataselect\":\n key = \"DATASELECTSERVICE\"\n elif service.lower() == \"station\":\n key = \"STATIONSERVICE\"\n else:\n raise ValueError(\"Service must be 'dataselect' or 'station'.\")\n\n split = collections.defaultdict(list)\n current_key = None\n for line in data.splitlines():\n line = line.strip()\n if not line:\n continue\n if \"http://\" in line:\n if key not in line:\n continue\n current_key = line[len(key) + 1:line.rfind(\"/fdsnws\")]\n continue\n # Anything before the first data center can be ignored.\n if current_key is None:\n continue\n split[current_key].append(line)\n\n return {k: \"\\n\".join(v) for k, v in split.items()}\n", "path": "obspy/clients/fdsn/routing/federator_routing_client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nRouting client for the IRIS federator routing service.\n\n:copyright:\n The ObsPy Development Team (devs@obspy.org)\n Celso G Reyes, 2017\n IRIS-DMC\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport collections\n\nfrom ..client import get_bulk_string\nfrom .routing_client import (\n BaseRoutingClient, _assert_filename_not_in_kwargs,\n _assert_attach_response_not_in_kwargs)\n\n\nclass FederatorRoutingClient(BaseRoutingClient):\n def __init__(self, url=\"http://service.iris.edu/irisws/fedcatalog/1\",\n include_providers=None, exclude_providers=None,\n debug=False, timeout=120, **kwargs):\n \"\"\"\n Initialize a federated routing client.\n\n All parameters except ``url`` are passed on to the\n :class:`~obspy.clients.fdsn.routing.routing_client.BaseRoutingClient`\n parent class\n\n :param url: The URL of the routing service.\n :type url: str\n \"\"\"\n BaseRoutingClient.__init__(self, debug=debug, timeout=timeout,\n include_providers=include_providers,\n exclude_providers=exclude_providers,\n **kwargs)\n self._url = url\n\n # Parameters the routing service can work with. If this becomes a\n # standardized service and more implementations show up we should also\n # parse the WADL files - but right now this would just complicate\n # things.\n self.kwargs_of_interest = [\n \"includeoverlaps\", \"level\", \"network\", \"station\", \"channel\",\n \"location\", \"starttime\", \"endtime\", \"startbefore\", \"endbefore\",\n \"startafter\", \"endafter\", \"minlatitude\", \"maxlatitude\",\n \"minlongitude\", \"maxlongitude\", \"latitude\", \"longitude\",\n \"minradius\", \"maxradius\"]\n\n @_assert_attach_response_not_in_kwargs\n @_assert_filename_not_in_kwargs\n def get_waveforms_bulk(self, bulk, **kwargs):\n \"\"\"\n Get waveforms from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` and ``attach_response`` parameters of the single\n provider FDSN client are not supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n bulk_params = [\"network\", \"station\", \"location\", \"channel\",\n \"starttime\", \"endtime\"]\n for _i in bulk_params:\n if _i in kwargs:\n raise ValueError(\"`%s` must not be part of the optional \"\n \"parameters in a bulk request.\" % _i)\n\n params = {k: str(kwargs[k])\n for k in self.kwargs_of_interest if k in kwargs}\n params[\"format\"] = \"request\"\n\n bulk_str = get_bulk_string(bulk, params)\n r = self._download(self._url + \"/query\", data=bulk_str)\n split = self._split_routing_response(\n r.content.decode() if hasattr(r.content, \"decode\") else r.content,\n service=\"dataselect\")\n return self._download_waveforms(split, **kwargs)\n\n @_assert_filename_not_in_kwargs\n def get_stations(self, **kwargs):\n \"\"\"\n Get stations from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` parameter of the single provider FDSN client is not\n supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n return super(FederatorRoutingClient, self).get_stations(**kwargs)\n\n @_assert_filename_not_in_kwargs\n def get_stations_bulk(self, bulk, **kwargs):\n \"\"\"\n Get stations from multiple data centers.\n\n It will pass on most parameters to the federated routing service.\n They will also be passed on to the individual FDSNWS implementations\n if a service supports them.\n\n The ``filename`` parameter of the single provider FDSN client is not\n supported.\n\n This can route on a number of different parameters, please see the\n web site of the\n `IRIS Federator <https://service.iris.edu/irisws/fedcatalog/1/>`_\n for details.\n \"\"\"\n bulk_params = [\"network\", \"station\", \"location\", \"channel\",\n \"starttime\", \"endtime\"]\n for _i in bulk_params:\n if _i in kwargs:\n raise ValueError(\"`%s` must not be part of the optional \"\n \"parameters in a bulk request.\" % _i)\n\n params = collections.OrderedDict()\n for k in self.kwargs_of_interest:\n if k in kwargs:\n params[k] = str(kwargs[k])\n params[\"format\"] = \"request\"\n\n bulk_str = get_bulk_string(bulk, params)\n r = self._download(self._url + \"/query\", data=bulk_str)\n split = self._split_routing_response(\n r.content.decode() if hasattr(r.content, \"decode\") else r.content,\n service=\"station\")\n return self._download_stations(split, **kwargs)\n\n @staticmethod\n def _split_routing_response(data, service):\n \"\"\"\n Splits the routing responses per data center for the federator output.\n\n Returns a dictionary with the keys being the root URLs of the fdsnws\n endpoints and the values the data payloads for that endpoint.\n\n :param data: The return value from the EIDAWS routing service.\n \"\"\"\n if service.lower() == \"dataselect\":\n key = \"DATASELECTSERVICE\"\n elif service.lower() == \"station\":\n key = \"STATIONSERVICE\"\n else:\n raise ValueError(\"Service must be 'dataselect' or 'station'.\")\n\n split = collections.defaultdict(list)\n current_key = None\n for line in data.splitlines():\n line = line.strip()\n if not line:\n continue\n if \"http://\" in line or \"https://\" in line:\n if key not in line:\n continue\n current_key = line[len(key) + 1:line.rfind(\"/fdsnws\")]\n continue\n # Anything before the first data center can be ignored.\n if current_key is None:\n continue\n split[current_key].append(line)\n\n return {k: \"\\n\".join(v) for k, v in split.items()}\n", "path": "obspy/clients/fdsn/routing/federator_routing_client.py"}]}
4,027
154
gh_patches_debug_10915
rasdani/github-patches
git_diff
psf__black-2437
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Failing to parse single-quote f-string with backslash inside it **Describe the bug** The formatter is failing to parse single quote f-string as it cannot include a backslash inside it. **To Reproduce** 1. Take this test.py containing: ```python b = "example1" sts = "example2" f"\"{b}\"{' ' * (long-len(b)+1)}: \"{sts}\",\n" ``` 2. Run black without arguments. 3. See error --> ``` File "/var/task/black/__init__.py", line 986, in assert_equivalent dst_ast = parse_ast(dst) File "/var/task/black/parsing.py", line 125, in parse_ast raise SyntaxError( f'"{b}"{\' \' * (long-len(b)+1)}: "{sts}",\n' ``` **Expected behavior** The formatter is expected to throw the said error and fail to format the said file. **Environment (please complete the following information):** - Version: [main] - OS and Python version: [Linux/Python 3.9.5] **Does this bug also happen on main?** Yes --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/black/strings.py` Content: ``` 1 """ 2 Simple formatting on strings. Further string formatting code is in trans.py. 3 """ 4 5 import regex as re 6 import sys 7 from typing import List, Pattern 8 9 10 STRING_PREFIX_CHARS = "furbFURB" # All possible string prefix characters. 11 12 13 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: 14 """Replace `regex` with `replacement` twice on `original`. 15 16 This is used by string normalization to perform replaces on 17 overlapping matches. 18 """ 19 return regex.sub(replacement, regex.sub(replacement, original)) 20 21 22 def has_triple_quotes(string: str) -> bool: 23 """ 24 Returns: 25 True iff @string starts with three quotation characters. 26 """ 27 raw_string = string.lstrip(STRING_PREFIX_CHARS) 28 return raw_string[:3] in {'"""', "'''"} 29 30 31 def lines_with_leading_tabs_expanded(s: str) -> List[str]: 32 """ 33 Splits string into lines and expands only leading tabs (following the normal 34 Python rules) 35 """ 36 lines = [] 37 for line in s.splitlines(): 38 # Find the index of the first non-whitespace character after a string of 39 # whitespace that includes at least one tab 40 match = re.match(r"\s*\t+\s*(\S)", line) 41 if match: 42 first_non_whitespace_idx = match.start(1) 43 44 lines.append( 45 line[:first_non_whitespace_idx].expandtabs() 46 + line[first_non_whitespace_idx:] 47 ) 48 else: 49 lines.append(line) 50 return lines 51 52 53 def fix_docstring(docstring: str, prefix: str) -> str: 54 # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation 55 if not docstring: 56 return "" 57 lines = lines_with_leading_tabs_expanded(docstring) 58 # Determine minimum indentation (first line doesn't count): 59 indent = sys.maxsize 60 for line in lines[1:]: 61 stripped = line.lstrip() 62 if stripped: 63 indent = min(indent, len(line) - len(stripped)) 64 # Remove indentation (first line is special): 65 trimmed = [lines[0].strip()] 66 if indent < sys.maxsize: 67 last_line_idx = len(lines) - 2 68 for i, line in enumerate(lines[1:]): 69 stripped_line = line[indent:].rstrip() 70 if stripped_line or i == last_line_idx: 71 trimmed.append(prefix + stripped_line) 72 else: 73 trimmed.append("") 74 return "\n".join(trimmed) 75 76 77 def get_string_prefix(string: str) -> str: 78 """ 79 Pre-conditions: 80 * assert_is_leaf_string(@string) 81 82 Returns: 83 @string's prefix (e.g. '', 'r', 'f', or 'rf'). 84 """ 85 assert_is_leaf_string(string) 86 87 prefix = "" 88 prefix_idx = 0 89 while string[prefix_idx] in STRING_PREFIX_CHARS: 90 prefix += string[prefix_idx] 91 prefix_idx += 1 92 93 return prefix 94 95 96 def assert_is_leaf_string(string: str) -> None: 97 """ 98 Checks the pre-condition that @string has the format that you would expect 99 of `leaf.value` where `leaf` is some Leaf such that `leaf.type == 100 token.STRING`. A more precise description of the pre-conditions that are 101 checked are listed below. 102 103 Pre-conditions: 104 * @string starts with either ', ", <prefix>', or <prefix>" where 105 `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`. 106 * @string ends with a quote character (' or "). 107 108 Raises: 109 AssertionError(...) if the pre-conditions listed above are not 110 satisfied. 111 """ 112 dquote_idx = string.find('"') 113 squote_idx = string.find("'") 114 if -1 in [dquote_idx, squote_idx]: 115 quote_idx = max(dquote_idx, squote_idx) 116 else: 117 quote_idx = min(squote_idx, dquote_idx) 118 119 assert ( 120 0 <= quote_idx < len(string) - 1 121 ), f"{string!r} is missing a starting quote character (' or \")." 122 assert string[-1] in ( 123 "'", 124 '"', 125 ), f"{string!r} is missing an ending quote character (' or \")." 126 assert set(string[:quote_idx]).issubset( 127 set(STRING_PREFIX_CHARS) 128 ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." 129 130 131 def normalize_string_prefix(s: str, remove_u_prefix: bool = False) -> str: 132 """Make all string prefixes lowercase. 133 134 If remove_u_prefix is given, also removes any u prefix from the string. 135 """ 136 match = re.match(r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", s, re.DOTALL) 137 assert match is not None, f"failed to match string {s!r}" 138 orig_prefix = match.group(1) 139 new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u") 140 if remove_u_prefix: 141 new_prefix = new_prefix.replace("u", "") 142 return f"{new_prefix}{match.group(2)}" 143 144 145 def normalize_string_quotes(s: str) -> str: 146 """Prefer double quotes but only if it doesn't cause more escaping. 147 148 Adds or removes backslashes as appropriate. Doesn't parse and fix 149 strings nested in f-strings. 150 """ 151 value = s.lstrip(STRING_PREFIX_CHARS) 152 if value[:3] == '"""': 153 return s 154 155 elif value[:3] == "'''": 156 orig_quote = "'''" 157 new_quote = '"""' 158 elif value[0] == '"': 159 orig_quote = '"' 160 new_quote = "'" 161 else: 162 orig_quote = "'" 163 new_quote = '"' 164 first_quote_pos = s.find(orig_quote) 165 if first_quote_pos == -1: 166 return s # There's an internal error 167 168 prefix = s[:first_quote_pos] 169 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") 170 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") 171 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") 172 body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] 173 if "r" in prefix.casefold(): 174 if unescaped_new_quote.search(body): 175 # There's at least one unescaped new_quote in this raw string 176 # so converting is impossible 177 return s 178 179 # Do not introduce or remove backslashes in raw strings 180 new_body = body 181 else: 182 # remove unnecessary escapes 183 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) 184 if body != new_body: 185 # Consider the string without unnecessary escapes as the original 186 body = new_body 187 s = f"{prefix}{orig_quote}{body}{orig_quote}" 188 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) 189 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) 190 if "f" in prefix.casefold(): 191 matches = re.findall( 192 r""" 193 (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { 194 ([^{].*?) # contents of the brackets except if begins with {{ 195 \}(?:[^}]|$) # A } followed by end of the string or a non-} 196 """, 197 new_body, 198 re.VERBOSE, 199 ) 200 for m in matches: 201 if "\\" in str(m): 202 # Do not introduce backslashes in interpolated expressions 203 return s 204 205 if new_quote == '"""' and new_body[-1:] == '"': 206 # edge case: 207 new_body = new_body[:-1] + '\\"' 208 orig_escape_count = body.count("\\") 209 new_escape_count = new_body.count("\\") 210 if new_escape_count > orig_escape_count: 211 return s # Do not introduce more escaping 212 213 if new_escape_count == orig_escape_count and orig_quote == '"': 214 return s # Prefer double quotes 215 216 return f"{prefix}{new_quote}{new_body}{new_quote}" 217 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/black/strings.py b/src/black/strings.py --- a/src/black/strings.py +++ b/src/black/strings.py @@ -190,9 +190,9 @@ if "f" in prefix.casefold(): matches = re.findall( r""" - (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { + (?:(?<!\{)|^)\{ # start of the string or a non-{ followed by a single { ([^{].*?) # contents of the brackets except if begins with {{ - \}(?:[^}]|$) # A } followed by end of the string or a non-} + \}(?:(?!\})|$) # A } followed by end of the string or a non-} """, new_body, re.VERBOSE,
{"golden_diff": "diff --git a/src/black/strings.py b/src/black/strings.py\n--- a/src/black/strings.py\n+++ b/src/black/strings.py\n@@ -190,9 +190,9 @@\n if \"f\" in prefix.casefold():\n matches = re.findall(\n r\"\"\"\n- (?:[^{]|^)\\{ # start of the string or a non-{ followed by a single {\n+ (?:(?<!\\{)|^)\\{ # start of the string or a non-{ followed by a single {\n ([^{].*?) # contents of the brackets except if begins with {{\n- \\}(?:[^}]|$) # A } followed by end of the string or a non-}\n+ \\}(?:(?!\\})|$) # A } followed by end of the string or a non-}\n \"\"\",\n new_body,\n re.VERBOSE,\n", "issue": "Failing to parse single-quote f-string with backslash inside it\n**Describe the bug**\r\n\r\nThe formatter is failing to parse single quote f-string as it cannot include a backslash inside it.\r\n\r\n**To Reproduce**\r\n\r\n1. Take this test.py containing:\r\n```python\r\nb = \"example1\"\r\nsts = \"example2\"\r\nf\"\\\"{b}\\\"{' ' * (long-len(b)+1)}: \\\"{sts}\\\",\\n\"\r\n```\r\n2. Run black without arguments.\r\n3. See error -->\r\n```\r\n File \"/var/task/black/__init__.py\", line 986, in assert_equivalent\r\n dst_ast = parse_ast(dst)\r\n File \"/var/task/black/parsing.py\", line 125, in parse_ast\r\n raise SyntaxError(\r\nf'\"{b}\"{\\' \\' * (long-len(b)+1)}: \"{sts}\",\\n'\r\n```\r\n\r\n**Expected behavior**\r\nThe formatter is expected to throw the said error and fail to format the said file.\r\n\r\n**Environment (please complete the following information):**\r\n\r\n- Version: [main]\r\n- OS and Python version: [Linux/Python 3.9.5]\r\n\r\n**Does this bug also happen on main?**\r\nYes\r\n\n", "before_files": [{"content": "\"\"\"\nSimple formatting on strings. Further string formatting code is in trans.py.\n\"\"\"\n\nimport regex as re\nimport sys\nfrom typing import List, Pattern\n\n\nSTRING_PREFIX_CHARS = \"furbFURB\" # All possible string prefix characters.\n\n\ndef sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:\n \"\"\"Replace `regex` with `replacement` twice on `original`.\n\n This is used by string normalization to perform replaces on\n overlapping matches.\n \"\"\"\n return regex.sub(replacement, regex.sub(replacement, original))\n\n\ndef has_triple_quotes(string: str) -> bool:\n \"\"\"\n Returns:\n True iff @string starts with three quotation characters.\n \"\"\"\n raw_string = string.lstrip(STRING_PREFIX_CHARS)\n return raw_string[:3] in {'\"\"\"', \"'''\"}\n\n\ndef lines_with_leading_tabs_expanded(s: str) -> List[str]:\n \"\"\"\n Splits string into lines and expands only leading tabs (following the normal\n Python rules)\n \"\"\"\n lines = []\n for line in s.splitlines():\n # Find the index of the first non-whitespace character after a string of\n # whitespace that includes at least one tab\n match = re.match(r\"\\s*\\t+\\s*(\\S)\", line)\n if match:\n first_non_whitespace_idx = match.start(1)\n\n lines.append(\n line[:first_non_whitespace_idx].expandtabs()\n + line[first_non_whitespace_idx:]\n )\n else:\n lines.append(line)\n return lines\n\n\ndef fix_docstring(docstring: str, prefix: str) -> str:\n # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\n if not docstring:\n return \"\"\n lines = lines_with_leading_tabs_expanded(docstring)\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n last_line_idx = len(lines) - 2\n for i, line in enumerate(lines[1:]):\n stripped_line = line[indent:].rstrip()\n if stripped_line or i == last_line_idx:\n trimmed.append(prefix + stripped_line)\n else:\n trimmed.append(\"\")\n return \"\\n\".join(trimmed)\n\n\ndef get_string_prefix(string: str) -> str:\n \"\"\"\n Pre-conditions:\n * assert_is_leaf_string(@string)\n\n Returns:\n @string's prefix (e.g. '', 'r', 'f', or 'rf').\n \"\"\"\n assert_is_leaf_string(string)\n\n prefix = \"\"\n prefix_idx = 0\n while string[prefix_idx] in STRING_PREFIX_CHARS:\n prefix += string[prefix_idx]\n prefix_idx += 1\n\n return prefix\n\n\ndef assert_is_leaf_string(string: str) -> None:\n \"\"\"\n Checks the pre-condition that @string has the format that you would expect\n of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==\n token.STRING`. A more precise description of the pre-conditions that are\n checked are listed below.\n\n Pre-conditions:\n * @string starts with either ', \", <prefix>', or <prefix>\" where\n `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.\n * @string ends with a quote character (' or \").\n\n Raises:\n AssertionError(...) if the pre-conditions listed above are not\n satisfied.\n \"\"\"\n dquote_idx = string.find('\"')\n squote_idx = string.find(\"'\")\n if -1 in [dquote_idx, squote_idx]:\n quote_idx = max(dquote_idx, squote_idx)\n else:\n quote_idx = min(squote_idx, dquote_idx)\n\n assert (\n 0 <= quote_idx < len(string) - 1\n ), f\"{string!r} is missing a starting quote character (' or \\\").\"\n assert string[-1] in (\n \"'\",\n '\"',\n ), f\"{string!r} is missing an ending quote character (' or \\\").\"\n assert set(string[:quote_idx]).issubset(\n set(STRING_PREFIX_CHARS)\n ), f\"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}.\"\n\n\ndef normalize_string_prefix(s: str, remove_u_prefix: bool = False) -> str:\n \"\"\"Make all string prefixes lowercase.\n\n If remove_u_prefix is given, also removes any u prefix from the string.\n \"\"\"\n match = re.match(r\"^([\" + STRING_PREFIX_CHARS + r\"]*)(.*)$\", s, re.DOTALL)\n assert match is not None, f\"failed to match string {s!r}\"\n orig_prefix = match.group(1)\n new_prefix = orig_prefix.replace(\"F\", \"f\").replace(\"B\", \"b\").replace(\"U\", \"u\")\n if remove_u_prefix:\n new_prefix = new_prefix.replace(\"u\", \"\")\n return f\"{new_prefix}{match.group(2)}\"\n\n\ndef normalize_string_quotes(s: str) -> str:\n \"\"\"Prefer double quotes but only if it doesn't cause more escaping.\n\n Adds or removes backslashes as appropriate. Doesn't parse and fix\n strings nested in f-strings.\n \"\"\"\n value = s.lstrip(STRING_PREFIX_CHARS)\n if value[:3] == '\"\"\"':\n return s\n\n elif value[:3] == \"'''\":\n orig_quote = \"'''\"\n new_quote = '\"\"\"'\n elif value[0] == '\"':\n orig_quote = '\"'\n new_quote = \"'\"\n else:\n orig_quote = \"'\"\n new_quote = '\"'\n first_quote_pos = s.find(orig_quote)\n if first_quote_pos == -1:\n return s # There's an internal error\n\n prefix = s[:first_quote_pos]\n unescaped_new_quote = re.compile(rf\"(([^\\\\]|^)(\\\\\\\\)*){new_quote}\")\n escaped_new_quote = re.compile(rf\"([^\\\\]|^)\\\\((?:\\\\\\\\)*){new_quote}\")\n escaped_orig_quote = re.compile(rf\"([^\\\\]|^)\\\\((?:\\\\\\\\)*){orig_quote}\")\n body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)]\n if \"r\" in prefix.casefold():\n if unescaped_new_quote.search(body):\n # There's at least one unescaped new_quote in this raw string\n # so converting is impossible\n return s\n\n # Do not introduce or remove backslashes in raw strings\n new_body = body\n else:\n # remove unnecessary escapes\n new_body = sub_twice(escaped_new_quote, rf\"\\1\\2{new_quote}\", body)\n if body != new_body:\n # Consider the string without unnecessary escapes as the original\n body = new_body\n s = f\"{prefix}{orig_quote}{body}{orig_quote}\"\n new_body = sub_twice(escaped_orig_quote, rf\"\\1\\2{orig_quote}\", new_body)\n new_body = sub_twice(unescaped_new_quote, rf\"\\1\\\\{new_quote}\", new_body)\n if \"f\" in prefix.casefold():\n matches = re.findall(\n r\"\"\"\n (?:[^{]|^)\\{ # start of the string or a non-{ followed by a single {\n ([^{].*?) # contents of the brackets except if begins with {{\n \\}(?:[^}]|$) # A } followed by end of the string or a non-}\n \"\"\",\n new_body,\n re.VERBOSE,\n )\n for m in matches:\n if \"\\\\\" in str(m):\n # Do not introduce backslashes in interpolated expressions\n return s\n\n if new_quote == '\"\"\"' and new_body[-1:] == '\"':\n # edge case:\n new_body = new_body[:-1] + '\\\\\"'\n orig_escape_count = body.count(\"\\\\\")\n new_escape_count = new_body.count(\"\\\\\")\n if new_escape_count > orig_escape_count:\n return s # Do not introduce more escaping\n\n if new_escape_count == orig_escape_count and orig_quote == '\"':\n return s # Prefer double quotes\n\n return f\"{prefix}{new_quote}{new_body}{new_quote}\"\n", "path": "src/black/strings.py"}], "after_files": [{"content": "\"\"\"\nSimple formatting on strings. Further string formatting code is in trans.py.\n\"\"\"\n\nimport regex as re\nimport sys\nfrom typing import List, Pattern\n\n\nSTRING_PREFIX_CHARS = \"furbFURB\" # All possible string prefix characters.\n\n\ndef sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:\n \"\"\"Replace `regex` with `replacement` twice on `original`.\n\n This is used by string normalization to perform replaces on\n overlapping matches.\n \"\"\"\n return regex.sub(replacement, regex.sub(replacement, original))\n\n\ndef has_triple_quotes(string: str) -> bool:\n \"\"\"\n Returns:\n True iff @string starts with three quotation characters.\n \"\"\"\n raw_string = string.lstrip(STRING_PREFIX_CHARS)\n return raw_string[:3] in {'\"\"\"', \"'''\"}\n\n\ndef lines_with_leading_tabs_expanded(s: str) -> List[str]:\n \"\"\"\n Splits string into lines and expands only leading tabs (following the normal\n Python rules)\n \"\"\"\n lines = []\n for line in s.splitlines():\n # Find the index of the first non-whitespace character after a string of\n # whitespace that includes at least one tab\n match = re.match(r\"\\s*\\t+\\s*(\\S)\", line)\n if match:\n first_non_whitespace_idx = match.start(1)\n\n lines.append(\n line[:first_non_whitespace_idx].expandtabs()\n + line[first_non_whitespace_idx:]\n )\n else:\n lines.append(line)\n return lines\n\n\ndef fix_docstring(docstring: str, prefix: str) -> str:\n # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\n if not docstring:\n return \"\"\n lines = lines_with_leading_tabs_expanded(docstring)\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n last_line_idx = len(lines) - 2\n for i, line in enumerate(lines[1:]):\n stripped_line = line[indent:].rstrip()\n if stripped_line or i == last_line_idx:\n trimmed.append(prefix + stripped_line)\n else:\n trimmed.append(\"\")\n return \"\\n\".join(trimmed)\n\n\ndef get_string_prefix(string: str) -> str:\n \"\"\"\n Pre-conditions:\n * assert_is_leaf_string(@string)\n\n Returns:\n @string's prefix (e.g. '', 'r', 'f', or 'rf').\n \"\"\"\n assert_is_leaf_string(string)\n\n prefix = \"\"\n prefix_idx = 0\n while string[prefix_idx] in STRING_PREFIX_CHARS:\n prefix += string[prefix_idx]\n prefix_idx += 1\n\n return prefix\n\n\ndef assert_is_leaf_string(string: str) -> None:\n \"\"\"\n Checks the pre-condition that @string has the format that you would expect\n of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==\n token.STRING`. A more precise description of the pre-conditions that are\n checked are listed below.\n\n Pre-conditions:\n * @string starts with either ', \", <prefix>', or <prefix>\" where\n `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.\n * @string ends with a quote character (' or \").\n\n Raises:\n AssertionError(...) if the pre-conditions listed above are not\n satisfied.\n \"\"\"\n dquote_idx = string.find('\"')\n squote_idx = string.find(\"'\")\n if -1 in [dquote_idx, squote_idx]:\n quote_idx = max(dquote_idx, squote_idx)\n else:\n quote_idx = min(squote_idx, dquote_idx)\n\n assert (\n 0 <= quote_idx < len(string) - 1\n ), f\"{string!r} is missing a starting quote character (' or \\\").\"\n assert string[-1] in (\n \"'\",\n '\"',\n ), f\"{string!r} is missing an ending quote character (' or \\\").\"\n assert set(string[:quote_idx]).issubset(\n set(STRING_PREFIX_CHARS)\n ), f\"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}.\"\n\n\ndef normalize_string_prefix(s: str, remove_u_prefix: bool = False) -> str:\n \"\"\"Make all string prefixes lowercase.\n\n If remove_u_prefix is given, also removes any u prefix from the string.\n \"\"\"\n match = re.match(r\"^([\" + STRING_PREFIX_CHARS + r\"]*)(.*)$\", s, re.DOTALL)\n assert match is not None, f\"failed to match string {s!r}\"\n orig_prefix = match.group(1)\n new_prefix = orig_prefix.replace(\"F\", \"f\").replace(\"B\", \"b\").replace(\"U\", \"u\")\n if remove_u_prefix:\n new_prefix = new_prefix.replace(\"u\", \"\")\n return f\"{new_prefix}{match.group(2)}\"\n\n\ndef normalize_string_quotes(s: str) -> str:\n \"\"\"Prefer double quotes but only if it doesn't cause more escaping.\n\n Adds or removes backslashes as appropriate. Doesn't parse and fix\n strings nested in f-strings.\n \"\"\"\n value = s.lstrip(STRING_PREFIX_CHARS)\n if value[:3] == '\"\"\"':\n return s\n\n elif value[:3] == \"'''\":\n orig_quote = \"'''\"\n new_quote = '\"\"\"'\n elif value[0] == '\"':\n orig_quote = '\"'\n new_quote = \"'\"\n else:\n orig_quote = \"'\"\n new_quote = '\"'\n first_quote_pos = s.find(orig_quote)\n if first_quote_pos == -1:\n return s # There's an internal error\n\n prefix = s[:first_quote_pos]\n unescaped_new_quote = re.compile(rf\"(([^\\\\]|^)(\\\\\\\\)*){new_quote}\")\n escaped_new_quote = re.compile(rf\"([^\\\\]|^)\\\\((?:\\\\\\\\)*){new_quote}\")\n escaped_orig_quote = re.compile(rf\"([^\\\\]|^)\\\\((?:\\\\\\\\)*){orig_quote}\")\n body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)]\n if \"r\" in prefix.casefold():\n if unescaped_new_quote.search(body):\n # There's at least one unescaped new_quote in this raw string\n # so converting is impossible\n return s\n\n # Do not introduce or remove backslashes in raw strings\n new_body = body\n else:\n # remove unnecessary escapes\n new_body = sub_twice(escaped_new_quote, rf\"\\1\\2{new_quote}\", body)\n if body != new_body:\n # Consider the string without unnecessary escapes as the original\n body = new_body\n s = f\"{prefix}{orig_quote}{body}{orig_quote}\"\n new_body = sub_twice(escaped_orig_quote, rf\"\\1\\2{orig_quote}\", new_body)\n new_body = sub_twice(unescaped_new_quote, rf\"\\1\\\\{new_quote}\", new_body)\n if \"f\" in prefix.casefold():\n matches = re.findall(\n r\"\"\"\n (?:(?<!\\{)|^)\\{ # start of the string or a non-{ followed by a single {\n ([^{].*?) # contents of the brackets except if begins with {{\n \\}(?:(?!\\})|$) # A } followed by end of the string or a non-}\n \"\"\",\n new_body,\n re.VERBOSE,\n )\n for m in matches:\n if \"\\\\\" in str(m):\n # Do not introduce backslashes in interpolated expressions\n return s\n\n if new_quote == '\"\"\"' and new_body[-1:] == '\"':\n # edge case:\n new_body = new_body[:-1] + '\\\\\"'\n orig_escape_count = body.count(\"\\\\\")\n new_escape_count = new_body.count(\"\\\\\")\n if new_escape_count > orig_escape_count:\n return s # Do not introduce more escaping\n\n if new_escape_count == orig_escape_count and orig_quote == '\"':\n return s # Prefer double quotes\n\n return f\"{prefix}{new_quote}{new_body}{new_quote}\"\n", "path": "src/black/strings.py"}]}
2,898
196
gh_patches_debug_34990
rasdani/github-patches
git_diff
streamlink__streamlink-838
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- azubu.tv: remove plugin http://www.azubu.tv/ `Soon a new future for Azubu and Hitbox, together as a single force in the world of eSports and competitive gaming, will be revealed. We will be launching a new brand, website, and mobile apps. There you will find the best offerings from both Azubu and Hitbox in one new place.` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/azubutv.py` Content: ``` 1 #!/usr/bin/env python 2 import json 3 import requests 4 5 import re 6 7 from io import BytesIO 8 from time import sleep 9 10 from streamlink.exceptions import PluginError 11 12 from streamlink.plugin import Plugin 13 from streamlink.plugin.api import http, validate 14 from streamlink.stream import HLSStream 15 16 17 HTTP_HEADERS = { 18 "User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " 19 "(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36"), 20 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV' 21 22 } 23 24 _url_re = re.compile(r"http(s)?://(\w+\.)?azubu.tv/(?P<domain>\w+)") 25 26 PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)" 27 stream_video_url = "http://api.azubu.tv/public/channel/{}/player" 28 29 30 class AzubuTV(Plugin): 31 @classmethod 32 def can_handle_url(cls, url): 33 return _url_re.match(url) 34 35 @classmethod 36 def stream_weight(cls, stream): 37 if stream == "source": 38 weight = 1080 39 else: 40 weight, group = Plugin.stream_weight(stream) 41 42 return weight, "azubutv" 43 44 def _parse_params(self, params): 45 rval = {} 46 matches = re.findall(PARAMS_REGEX, params) 47 48 for key, value in matches: 49 try: 50 value = ast.literal_eval(value) 51 except Exception: 52 pass 53 54 rval[key] = value 55 56 return rval 57 58 def _get_stream_url(self, o): 59 60 match = _url_re.match(self.url) 61 channel = match.group('domain') 62 63 channel_info = requests.get(stream_video_url.format(channel)) 64 j = json.loads(channel_info.text) 65 66 if j["data"]["is_live"] is not True: 67 return "", False 68 else: 69 is_live = True 70 71 stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}' 72 73 r = requests.get(stream_url.format(j["data"]["stream_video"]["reference_id"]), headers=HTTP_HEADERS) 74 t = json.loads(r.text) 75 76 stream_url = t["sources"][0]["src"] 77 return stream_url, is_live 78 79 def _get_streams(self): 80 hls_url, is_live = self._get_stream_url(self) 81 82 if not is_live: 83 return 84 85 split = self.url.split(" ") 86 params = (" ").join(split[1:]) 87 params = self._parse_params(params) 88 89 try: 90 streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params) 91 except IOError as err: 92 raise PluginError(err) 93 94 return streams 95 96 97 __plugin__ = AzubuTV 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/azubutv.py b/src/streamlink/plugins/azubutv.py deleted file mode 100644 --- a/src/streamlink/plugins/azubutv.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -import json -import requests - -import re - -from io import BytesIO -from time import sleep - -from streamlink.exceptions import PluginError - -from streamlink.plugin import Plugin -from streamlink.plugin.api import http, validate -from streamlink.stream import HLSStream - - -HTTP_HEADERS = { - "User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " - "(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36"), - 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV' - -} - -_url_re = re.compile(r"http(s)?://(\w+\.)?azubu.tv/(?P<domain>\w+)") - -PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)" -stream_video_url = "http://api.azubu.tv/public/channel/{}/player" - - -class AzubuTV(Plugin): - @classmethod - def can_handle_url(cls, url): - return _url_re.match(url) - - @classmethod - def stream_weight(cls, stream): - if stream == "source": - weight = 1080 - else: - weight, group = Plugin.stream_weight(stream) - - return weight, "azubutv" - - def _parse_params(self, params): - rval = {} - matches = re.findall(PARAMS_REGEX, params) - - for key, value in matches: - try: - value = ast.literal_eval(value) - except Exception: - pass - - rval[key] = value - - return rval - - def _get_stream_url(self, o): - - match = _url_re.match(self.url) - channel = match.group('domain') - - channel_info = requests.get(stream_video_url.format(channel)) - j = json.loads(channel_info.text) - - if j["data"]["is_live"] is not True: - return "", False - else: - is_live = True - - stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}' - - r = requests.get(stream_url.format(j["data"]["stream_video"]["reference_id"]), headers=HTTP_HEADERS) - t = json.loads(r.text) - - stream_url = t["sources"][0]["src"] - return stream_url, is_live - - def _get_streams(self): - hls_url, is_live = self._get_stream_url(self) - - if not is_live: - return - - split = self.url.split(" ") - params = (" ").join(split[1:]) - params = self._parse_params(params) - - try: - streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params) - except IOError as err: - raise PluginError(err) - - return streams - - -__plugin__ = AzubuTV
{"golden_diff": "diff --git a/src/streamlink/plugins/azubutv.py b/src/streamlink/plugins/azubutv.py\ndeleted file mode 100644\n--- a/src/streamlink/plugins/azubutv.py\n+++ /dev/null\n@@ -1,97 +0,0 @@\n-#!/usr/bin/env python\n-import json\n-import requests\n-\n-import re\n-\n-from io import BytesIO\n-from time import sleep\n-\n-from streamlink.exceptions import PluginError\n-\n-from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http, validate\n-from streamlink.stream import HLSStream\n-\n-\n-HTTP_HEADERS = {\n- \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n- \"(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36\"),\n- 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'\n-\n-}\n-\n-_url_re = re.compile(r\"http(s)?://(\\w+\\.)?azubu.tv/(?P<domain>\\w+)\")\n-\n-PARAMS_REGEX = r\"(\\w+)=({.+?}|\\[.+?\\]|\\(.+?\\)|'(?:[^'\\\\]|\\\\')*'|\\\"(?:[^\\\"\\\\]|\\\\\\\")*\\\"|\\S+)\"\n-stream_video_url = \"http://api.azubu.tv/public/channel/{}/player\"\n-\n-\n-class AzubuTV(Plugin):\n- @classmethod\n- def can_handle_url(cls, url):\n- return _url_re.match(url)\n-\n- @classmethod\n- def stream_weight(cls, stream):\n- if stream == \"source\":\n- weight = 1080\n- else:\n- weight, group = Plugin.stream_weight(stream)\n-\n- return weight, \"azubutv\"\n-\n- def _parse_params(self, params):\n- rval = {}\n- matches = re.findall(PARAMS_REGEX, params)\n-\n- for key, value in matches:\n- try:\n- value = ast.literal_eval(value)\n- except Exception:\n- pass\n-\n- rval[key] = value\n-\n- return rval\n-\n- def _get_stream_url(self, o):\n-\n- match = _url_re.match(self.url)\n- channel = match.group('domain')\n-\n- channel_info = requests.get(stream_video_url.format(channel))\n- j = json.loads(channel_info.text)\n-\n- if j[\"data\"][\"is_live\"] is not True:\n- return \"\", False\n- else:\n- is_live = True\n-\n- stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'\n-\n- r = requests.get(stream_url.format(j[\"data\"][\"stream_video\"][\"reference_id\"]), headers=HTTP_HEADERS)\n- t = json.loads(r.text)\n-\n- stream_url = t[\"sources\"][0][\"src\"]\n- return stream_url, is_live\n-\n- def _get_streams(self):\n- hls_url, is_live = self._get_stream_url(self)\n-\n- if not is_live:\n- return\n-\n- split = self.url.split(\" \")\n- params = (\" \").join(split[1:])\n- params = self._parse_params(params)\n-\n- try:\n- streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)\n- except IOError as err:\n- raise PluginError(err)\n-\n- return streams\n-\n-\n-__plugin__ = AzubuTV\n", "issue": "azubu.tv: remove plugin\nhttp://www.azubu.tv/\r\n`Soon a new future for Azubu and Hitbox, together as a single force in the world of eSports and competitive gaming, will be revealed. We will be launching a new brand, website, and mobile apps. There you will find the best offerings from both Azubu and Hitbox in one new place.`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport json\nimport requests\n\nimport re\n\nfrom io import BytesIO\nfrom time import sleep\n\nfrom streamlink.exceptions import PluginError\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import HLSStream\n\n\nHTTP_HEADERS = {\n \"User-Agent\": (\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36\"),\n 'Accept': 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV'\n\n}\n\n_url_re = re.compile(r\"http(s)?://(\\w+\\.)?azubu.tv/(?P<domain>\\w+)\")\n\nPARAMS_REGEX = r\"(\\w+)=({.+?}|\\[.+?\\]|\\(.+?\\)|'(?:[^'\\\\]|\\\\')*'|\\\"(?:[^\\\"\\\\]|\\\\\\\")*\\\"|\\S+)\"\nstream_video_url = \"http://api.azubu.tv/public/channel/{}/player\"\n\n\nclass AzubuTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream == \"source\":\n weight = 1080\n else:\n weight, group = Plugin.stream_weight(stream)\n\n return weight, \"azubutv\"\n\n def _parse_params(self, params):\n rval = {}\n matches = re.findall(PARAMS_REGEX, params)\n\n for key, value in matches:\n try:\n value = ast.literal_eval(value)\n except Exception:\n pass\n\n rval[key] = value\n\n return rval\n\n def _get_stream_url(self, o):\n\n match = _url_re.match(self.url)\n channel = match.group('domain')\n\n channel_info = requests.get(stream_video_url.format(channel))\n j = json.loads(channel_info.text)\n\n if j[\"data\"][\"is_live\"] is not True:\n return \"\", False\n else:\n is_live = True\n\n stream_url = 'https://edge.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:{0}'\n\n r = requests.get(stream_url.format(j[\"data\"][\"stream_video\"][\"reference_id\"]), headers=HTTP_HEADERS)\n t = json.loads(r.text)\n\n stream_url = t[\"sources\"][0][\"src\"]\n return stream_url, is_live\n\n def _get_streams(self):\n hls_url, is_live = self._get_stream_url(self)\n\n if not is_live:\n return\n\n split = self.url.split(\" \")\n params = (\" \").join(split[1:])\n params = self._parse_params(params)\n\n try:\n streams = HLSStream.parse_variant_playlist(self.session, hls_url, **params)\n except IOError as err:\n raise PluginError(err)\n\n return streams\n\n\n__plugin__ = AzubuTV\n", "path": "src/streamlink/plugins/azubutv.py"}], "after_files": [{"content": null, "path": "src/streamlink/plugins/azubutv.py"}]}
1,314
898
gh_patches_debug_5927
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-3504
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Old version banner uses a version number format present nowhere else ## Details - Project URL: https://github.com/dfhack/dfhack/ - Build URL: http://dfhack.readthedocs.io/en/v0.40.24-r5/ and http://dfhack.readthedocs.io/en/0.42.06-r1/ - Read the Docs username: lethosor ## Expected result The banner on documentation pages for old versions of our project should refer to the newest version using the same version number format present used elsewhere - in this case, "0.43.03-r1" ## Actual result The banner refers to "0.43.3-post1", which isn't used anywhere else, as far as I can tell. ## Additional Information It would be great if this banner could use the version numbers that our project uses instead of the ones with "post1" at the end. I realize the scheme we use is a bit unconventional, but it's tied to another project which uses the "0.43.03" format that we don't really have control over. I made a tentative patch at https://github.com/lethosor/readthedocs.org/commit/7d84130471885905a3f663324af602b7be1f7f64, although I haven't tested it enough to be confident that it'll work for other projects. In particular, I'm not sure in what situations (if any) `slugs` could contain multiple items. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/restapi/views/footer_views.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Endpoint to generate footer HTML.""" 3 4 from __future__ import ( 5 absolute_import, division, print_function, unicode_literals) 6 7 import six 8 from django.conf import settings 9 from django.shortcuts import get_object_or_404 10 from django.template import loader as template_loader 11 from rest_framework import decorators, permissions 12 from rest_framework.renderers import JSONRenderer 13 from rest_framework.response import Response 14 from rest_framework_jsonp.renderers import JSONPRenderer 15 16 from readthedocs.builds.constants import LATEST, TAG 17 from readthedocs.builds.models import Version 18 from readthedocs.projects.models import Project 19 from readthedocs.projects.version_handling import ( 20 highest_version, parse_version_failsafe) 21 from readthedocs.restapi.signals import footer_response 22 23 24 def get_version_compare_data(project, base_version=None): 25 """ 26 Retrieve metadata about the highest version available for this project. 27 28 :param base_version: We assert whether or not the base_version is also the 29 highest version in the resulting "is_highest" value. 30 """ 31 versions_qs = project.versions.public().filter(active=True) 32 33 # Take preferences over tags only if the project has at least one tag 34 if versions_qs.filter(type=TAG).exists(): 35 versions_qs = versions_qs.filter(type=TAG) 36 37 highest_version_obj, highest_version_comparable = highest_version( 38 versions_qs) 39 ret_val = { 40 'project': six.text_type(highest_version_obj), 41 'version': six.text_type(highest_version_comparable), 42 'is_highest': True, 43 } 44 if highest_version_obj: 45 ret_val['url'] = highest_version_obj.get_absolute_url() 46 ret_val['slug'] = (highest_version_obj.slug,) 47 if base_version and base_version.slug != LATEST: 48 try: 49 base_version_comparable = parse_version_failsafe( 50 base_version.verbose_name) 51 if base_version_comparable: 52 # This is only place where is_highest can get set. All error 53 # cases will be set to True, for non- standard versions. 54 ret_val['is_highest'] = ( 55 base_version_comparable >= highest_version_comparable) 56 else: 57 ret_val['is_highest'] = True 58 except (Version.DoesNotExist, TypeError): 59 ret_val['is_highest'] = True 60 return ret_val 61 62 63 @decorators.api_view(['GET']) 64 @decorators.permission_classes((permissions.AllowAny,)) 65 @decorators.renderer_classes((JSONRenderer, JSONPRenderer)) 66 def footer_html(request): 67 """Render and return footer markup.""" 68 # TODO refactor this function 69 # pylint: disable=too-many-locals 70 project_slug = request.GET.get('project', None) 71 version_slug = request.GET.get('version', None) 72 page_slug = request.GET.get('page', None) 73 theme = request.GET.get('theme', False) 74 docroot = request.GET.get('docroot', '') 75 subproject = request.GET.get('subproject', False) 76 source_suffix = request.GET.get('source_suffix', '.rst') 77 78 # Hack in a fix for missing version slug deploy that went out a while back 79 if version_slug == '': 80 version_slug = LATEST 81 82 new_theme = (theme == 'sphinx_rtd_theme') 83 using_theme = (theme == 'default') 84 project = get_object_or_404(Project, slug=project_slug) 85 version = get_object_or_404( 86 Version.objects.public( 87 request.user, project=project, only_active=False), 88 slug__iexact=version_slug) 89 main_project = project.main_language_project or project 90 91 if page_slug and page_slug != 'index': 92 if (main_project.documentation_type == 'sphinx_htmldir' or 93 main_project.documentation_type == 'mkdocs'): 94 path = page_slug + '/' 95 elif main_project.documentation_type == 'sphinx_singlehtml': 96 path = 'index.html#document-' + page_slug 97 else: 98 path = page_slug + '.html' 99 else: 100 path = '' 101 102 version_compare_data = get_version_compare_data(project, version) 103 104 context = { 105 'project': project, 106 'version': version, 107 'path': path, 108 'downloads': version.get_downloads(pretty=True), 109 'current_version': version.verbose_name, 110 'versions': project.ordered_active_versions(user=request.user), 111 'main_project': main_project, 112 'translations': main_project.translations.all(), 113 'current_language': project.language, 114 'using_theme': using_theme, 115 'new_theme': new_theme, 116 'settings': settings, 117 'subproject': subproject, 118 'github_edit_url': version.get_github_url( 119 docroot, 120 page_slug, 121 source_suffix, 122 'edit', 123 ), 124 'github_view_url': version.get_github_url( 125 docroot, 126 page_slug, 127 source_suffix, 128 'view', 129 ), 130 'gitlab_edit_url': version.get_gitlab_url( 131 docroot, 132 page_slug, 133 source_suffix, 134 'edit', 135 ), 136 'gitlab_view_url': version.get_gitlab_url( 137 docroot, 138 page_slug, 139 source_suffix, 140 'view', 141 ), 142 'bitbucket_url': version.get_bitbucket_url( 143 docroot, 144 page_slug, 145 source_suffix, 146 ), 147 'theme': theme, 148 } 149 150 html = template_loader.get_template('restapi/footer.html').render( 151 context, 152 request, 153 ) 154 resp_data = { 155 'html': html, 156 'version_active': version.active, 157 'version_compare': version_compare_data, 158 'version_supported': version.supported, 159 } 160 161 # Allow folks to hook onto the footer response for various information 162 # collection, or to modify the resp_data. 163 footer_response.send( 164 sender=None, 165 request=request, 166 context=context, 167 resp_data=resp_data, 168 ) 169 170 return Response(resp_data) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/restapi/views/footer_views.py b/readthedocs/restapi/views/footer_views.py --- a/readthedocs/restapi/views/footer_views.py +++ b/readthedocs/restapi/views/footer_views.py @@ -43,7 +43,7 @@ } if highest_version_obj: ret_val['url'] = highest_version_obj.get_absolute_url() - ret_val['slug'] = (highest_version_obj.slug,) + ret_val['slug'] = highest_version_obj.slug if base_version and base_version.slug != LATEST: try: base_version_comparable = parse_version_failsafe(
{"golden_diff": "diff --git a/readthedocs/restapi/views/footer_views.py b/readthedocs/restapi/views/footer_views.py\n--- a/readthedocs/restapi/views/footer_views.py\n+++ b/readthedocs/restapi/views/footer_views.py\n@@ -43,7 +43,7 @@\n }\n if highest_version_obj:\n ret_val['url'] = highest_version_obj.get_absolute_url()\n- ret_val['slug'] = (highest_version_obj.slug,)\n+ ret_val['slug'] = highest_version_obj.slug\n if base_version and base_version.slug != LATEST:\n try:\n base_version_comparable = parse_version_failsafe(\n", "issue": "Old version banner uses a version number format present nowhere else\n## Details\n- Project URL: https://github.com/dfhack/dfhack/\n- Build URL: http://dfhack.readthedocs.io/en/v0.40.24-r5/ and http://dfhack.readthedocs.io/en/0.42.06-r1/\n- Read the Docs username: lethosor\n## Expected result\n\nThe banner on documentation pages for old versions of our project should refer to the newest version using the same version number format present used elsewhere - in this case, \"0.43.03-r1\"\n## Actual result\n\nThe banner refers to \"0.43.3-post1\", which isn't used anywhere else, as far as I can tell.\n## Additional Information\n\nIt would be great if this banner could use the version numbers that our project uses instead of the ones with \"post1\" at the end. I realize the scheme we use is a bit unconventional, but it's tied to another project which uses the \"0.43.03\" format that we don't really have control over.\n\nI made a tentative patch at https://github.com/lethosor/readthedocs.org/commit/7d84130471885905a3f663324af602b7be1f7f64, although I haven't tested it enough to be confident that it'll work for other projects. In particular, I'm not sure in what situations (if any) `slugs` could contain multiple items.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Endpoint to generate footer HTML.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport six\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.template import loader as template_loader\nfrom rest_framework import decorators, permissions\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework_jsonp.renderers import JSONPRenderer\n\nfrom readthedocs.builds.constants import LATEST, TAG\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.version_handling import (\n highest_version, parse_version_failsafe)\nfrom readthedocs.restapi.signals import footer_response\n\n\ndef get_version_compare_data(project, base_version=None):\n \"\"\"\n Retrieve metadata about the highest version available for this project.\n\n :param base_version: We assert whether or not the base_version is also the\n highest version in the resulting \"is_highest\" value.\n \"\"\"\n versions_qs = project.versions.public().filter(active=True)\n\n # Take preferences over tags only if the project has at least one tag\n if versions_qs.filter(type=TAG).exists():\n versions_qs = versions_qs.filter(type=TAG)\n\n highest_version_obj, highest_version_comparable = highest_version(\n versions_qs)\n ret_val = {\n 'project': six.text_type(highest_version_obj),\n 'version': six.text_type(highest_version_comparable),\n 'is_highest': True,\n }\n if highest_version_obj:\n ret_val['url'] = highest_version_obj.get_absolute_url()\n ret_val['slug'] = (highest_version_obj.slug,)\n if base_version and base_version.slug != LATEST:\n try:\n base_version_comparable = parse_version_failsafe(\n base_version.verbose_name)\n if base_version_comparable:\n # This is only place where is_highest can get set. All error\n # cases will be set to True, for non- standard versions.\n ret_val['is_highest'] = (\n base_version_comparable >= highest_version_comparable)\n else:\n ret_val['is_highest'] = True\n except (Version.DoesNotExist, TypeError):\n ret_val['is_highest'] = True\n return ret_val\n\n\n@decorators.api_view(['GET'])\n@decorators.permission_classes((permissions.AllowAny,))\n@decorators.renderer_classes((JSONRenderer, JSONPRenderer))\ndef footer_html(request):\n \"\"\"Render and return footer markup.\"\"\"\n # TODO refactor this function\n # pylint: disable=too-many-locals\n project_slug = request.GET.get('project', None)\n version_slug = request.GET.get('version', None)\n page_slug = request.GET.get('page', None)\n theme = request.GET.get('theme', False)\n docroot = request.GET.get('docroot', '')\n subproject = request.GET.get('subproject', False)\n source_suffix = request.GET.get('source_suffix', '.rst')\n\n # Hack in a fix for missing version slug deploy that went out a while back\n if version_slug == '':\n version_slug = LATEST\n\n new_theme = (theme == 'sphinx_rtd_theme')\n using_theme = (theme == 'default')\n project = get_object_or_404(Project, slug=project_slug)\n version = get_object_or_404(\n Version.objects.public(\n request.user, project=project, only_active=False),\n slug__iexact=version_slug)\n main_project = project.main_language_project or project\n\n if page_slug and page_slug != 'index':\n if (main_project.documentation_type == 'sphinx_htmldir' or\n main_project.documentation_type == 'mkdocs'):\n path = page_slug + '/'\n elif main_project.documentation_type == 'sphinx_singlehtml':\n path = 'index.html#document-' + page_slug\n else:\n path = page_slug + '.html'\n else:\n path = ''\n\n version_compare_data = get_version_compare_data(project, version)\n\n context = {\n 'project': project,\n 'version': version,\n 'path': path,\n 'downloads': version.get_downloads(pretty=True),\n 'current_version': version.verbose_name,\n 'versions': project.ordered_active_versions(user=request.user),\n 'main_project': main_project,\n 'translations': main_project.translations.all(),\n 'current_language': project.language,\n 'using_theme': using_theme,\n 'new_theme': new_theme,\n 'settings': settings,\n 'subproject': subproject,\n 'github_edit_url': version.get_github_url(\n docroot,\n page_slug,\n source_suffix,\n 'edit',\n ),\n 'github_view_url': version.get_github_url(\n docroot,\n page_slug,\n source_suffix,\n 'view',\n ),\n 'gitlab_edit_url': version.get_gitlab_url(\n docroot,\n page_slug,\n source_suffix,\n 'edit',\n ),\n 'gitlab_view_url': version.get_gitlab_url(\n docroot,\n page_slug,\n source_suffix,\n 'view',\n ),\n 'bitbucket_url': version.get_bitbucket_url(\n docroot,\n page_slug,\n source_suffix,\n ),\n 'theme': theme,\n }\n\n html = template_loader.get_template('restapi/footer.html').render(\n context,\n request,\n )\n resp_data = {\n 'html': html,\n 'version_active': version.active,\n 'version_compare': version_compare_data,\n 'version_supported': version.supported,\n }\n\n # Allow folks to hook onto the footer response for various information\n # collection, or to modify the resp_data.\n footer_response.send(\n sender=None,\n request=request,\n context=context,\n resp_data=resp_data,\n )\n\n return Response(resp_data)\n", "path": "readthedocs/restapi/views/footer_views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Endpoint to generate footer HTML.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport six\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.template import loader as template_loader\nfrom rest_framework import decorators, permissions\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework_jsonp.renderers import JSONPRenderer\n\nfrom readthedocs.builds.constants import LATEST, TAG\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.version_handling import (\n highest_version, parse_version_failsafe)\nfrom readthedocs.restapi.signals import footer_response\n\n\ndef get_version_compare_data(project, base_version=None):\n \"\"\"\n Retrieve metadata about the highest version available for this project.\n\n :param base_version: We assert whether or not the base_version is also the\n highest version in the resulting \"is_highest\" value.\n \"\"\"\n versions_qs = project.versions.public().filter(active=True)\n\n # Take preferences over tags only if the project has at least one tag\n if versions_qs.filter(type=TAG).exists():\n versions_qs = versions_qs.filter(type=TAG)\n\n highest_version_obj, highest_version_comparable = highest_version(\n versions_qs)\n ret_val = {\n 'project': six.text_type(highest_version_obj),\n 'version': six.text_type(highest_version_comparable),\n 'is_highest': True,\n }\n if highest_version_obj:\n ret_val['url'] = highest_version_obj.get_absolute_url()\n ret_val['slug'] = highest_version_obj.slug\n if base_version and base_version.slug != LATEST:\n try:\n base_version_comparable = parse_version_failsafe(\n base_version.verbose_name)\n if base_version_comparable:\n # This is only place where is_highest can get set. All error\n # cases will be set to True, for non- standard versions.\n ret_val['is_highest'] = (\n base_version_comparable >= highest_version_comparable)\n else:\n ret_val['is_highest'] = True\n except (Version.DoesNotExist, TypeError):\n ret_val['is_highest'] = True\n return ret_val\n\n\n@decorators.api_view(['GET'])\n@decorators.permission_classes((permissions.AllowAny,))\n@decorators.renderer_classes((JSONRenderer, JSONPRenderer))\ndef footer_html(request):\n \"\"\"Render and return footer markup.\"\"\"\n # TODO refactor this function\n # pylint: disable=too-many-locals\n project_slug = request.GET.get('project', None)\n version_slug = request.GET.get('version', None)\n page_slug = request.GET.get('page', None)\n theme = request.GET.get('theme', False)\n docroot = request.GET.get('docroot', '')\n subproject = request.GET.get('subproject', False)\n source_suffix = request.GET.get('source_suffix', '.rst')\n\n # Hack in a fix for missing version slug deploy that went out a while back\n if version_slug == '':\n version_slug = LATEST\n\n new_theme = (theme == 'sphinx_rtd_theme')\n using_theme = (theme == 'default')\n project = get_object_or_404(Project, slug=project_slug)\n version = get_object_or_404(\n Version.objects.public(\n request.user, project=project, only_active=False),\n slug__iexact=version_slug)\n main_project = project.main_language_project or project\n\n if page_slug and page_slug != 'index':\n if (main_project.documentation_type == 'sphinx_htmldir' or\n main_project.documentation_type == 'mkdocs'):\n path = page_slug + '/'\n elif main_project.documentation_type == 'sphinx_singlehtml':\n path = 'index.html#document-' + page_slug\n else:\n path = page_slug + '.html'\n else:\n path = ''\n\n version_compare_data = get_version_compare_data(project, version)\n\n context = {\n 'project': project,\n 'version': version,\n 'path': path,\n 'downloads': version.get_downloads(pretty=True),\n 'current_version': version.verbose_name,\n 'versions': project.ordered_active_versions(user=request.user),\n 'main_project': main_project,\n 'translations': main_project.translations.all(),\n 'current_language': project.language,\n 'using_theme': using_theme,\n 'new_theme': new_theme,\n 'settings': settings,\n 'subproject': subproject,\n 'github_edit_url': version.get_github_url(\n docroot,\n page_slug,\n source_suffix,\n 'edit',\n ),\n 'github_view_url': version.get_github_url(\n docroot,\n page_slug,\n source_suffix,\n 'view',\n ),\n 'gitlab_edit_url': version.get_gitlab_url(\n docroot,\n page_slug,\n source_suffix,\n 'edit',\n ),\n 'gitlab_view_url': version.get_gitlab_url(\n docroot,\n page_slug,\n source_suffix,\n 'view',\n ),\n 'bitbucket_url': version.get_bitbucket_url(\n docroot,\n page_slug,\n source_suffix,\n ),\n 'theme': theme,\n }\n\n html = template_loader.get_template('restapi/footer.html').render(\n context,\n request,\n )\n resp_data = {\n 'html': html,\n 'version_active': version.active,\n 'version_compare': version_compare_data,\n 'version_supported': version.supported,\n }\n\n # Allow folks to hook onto the footer response for various information\n # collection, or to modify the resp_data.\n footer_response.send(\n sender=None,\n request=request,\n context=context,\n resp_data=resp_data,\n )\n\n return Response(resp_data)\n", "path": "readthedocs/restapi/views/footer_views.py"}]}
2,274
136
gh_patches_debug_48199
rasdani/github-patches
git_diff
secdev__scapy-1779
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Scapy crashes when tcpdump is not in $PATH Here is the fix: ```diff git diff scapy/arch/common.py diff --git a/scapy/arch/common.py b/scapy/arch/common.py index 9da19141..f103bebe 100644 --- a/scapy/arch/common.py +++ b/scapy/arch/common.py @@ -27,8 +27,11 @@ import scapy.modules.six as six def _check_tcpdump(): with open(os.devnull, 'wb') as devnull: - proc = subprocess.Popen([conf.prog.tcpdump, "--version"], - stdout=devnull, stderr=subprocess.STDOUT) + try: + proc = subprocess.Popen([conf.prog.tcpdump, "--version"], + stdout=devnull, stderr=subprocess.STDOUT) + except OSError: + return False return proc.wait() == 0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/arch/common.py` Content: ``` 1 # This file is part of Scapy 2 # See http://www.secdev.org/projects/scapy for more information 3 # Copyright (C) Philippe Biondi <phil@secdev.org> 4 # This program is published under a GPLv2 license 5 6 """ 7 Functions common to different architectures 8 """ 9 10 # Important Note: This file is not needed on Windows, and mustn't be loaded 11 12 import socket 13 import subprocess 14 from fcntl import ioctl 15 import os 16 import struct 17 import ctypes 18 from ctypes import POINTER, Structure 19 from ctypes import c_uint, c_uint32, c_ushort, c_ubyte 20 from scapy.config import conf 21 from scapy.data import MTU 22 from scapy.error import Scapy_Exception 23 import scapy.modules.six as six 24 25 # BOOT 26 27 28 def _check_tcpdump(): 29 with open(os.devnull, 'wb') as devnull: 30 proc = subprocess.Popen([conf.prog.tcpdump, "--version"], 31 stdout=devnull, stderr=subprocess.STDOUT) 32 return proc.wait() == 0 33 34 35 TCPDUMP = _check_tcpdump() 36 37 # UTILS 38 39 40 def get_if(iff, cmd): 41 """Ease SIOCGIF* ioctl calls""" 42 43 sck = socket.socket() 44 ifreq = ioctl(sck, cmd, struct.pack("16s16x", iff.encode("utf8"))) 45 sck.close() 46 return ifreq 47 48 # BPF HANDLERS 49 50 51 class bpf_insn(Structure): 52 """"The BPF instruction data structure""" 53 _fields_ = [("code", c_ushort), 54 ("jt", c_ubyte), 55 ("jf", c_ubyte), 56 ("k", c_uint32)] 57 58 59 class bpf_program(Structure): 60 """"Structure for BIOCSETF""" 61 _fields_ = [("bf_len", c_uint), 62 ("bf_insns", POINTER(bpf_insn))] 63 64 65 def _legacy_bpf_pointer(tcpdump_lines): 66 """Get old-format BPF Pointer. Deprecated""" 67 X86_64 = os.uname()[4] in ['x86_64', 'aarch64'] 68 size = int(tcpdump_lines[0]) 69 bpf = b"" 70 for l in tcpdump_lines[1:]: 71 if six.PY2: 72 int_type = long # noqa: F821 73 else: 74 int_type = int 75 bpf += struct.pack("HBBI", *map(int_type, l.split())) 76 77 # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501 78 if conf.use_pypy: 79 str_buffer = ctypes.create_string_buffer(bpf) 80 return struct.pack('HL', size, ctypes.addressof(str_buffer)) 81 else: 82 # XXX. Argl! We need to give the kernel a pointer on the BPF, 83 # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501 84 if X86_64: 85 return struct.pack("HL", size, id(bpf) + 36) 86 else: 87 return struct.pack("HI", size, id(bpf) + 20) 88 89 90 def get_bpf_pointer(tcpdump_lines): 91 """Create a BPF Pointer for TCPDump filter""" 92 if conf.use_pypy: 93 return _legacy_bpf_pointer(tcpdump_lines) 94 95 # Allocate BPF instructions 96 size = int(tcpdump_lines[0]) 97 bpf_insn_a = bpf_insn * size 98 bip = bpf_insn_a() 99 100 # Fill the BPF instruction structures with the byte code 101 tcpdump_lines = tcpdump_lines[1:] 102 i = 0 103 for line in tcpdump_lines: 104 values = [int(v) for v in line.split()] 105 bip[i].code = c_ushort(values[0]) 106 bip[i].jt = c_ubyte(values[1]) 107 bip[i].jf = c_ubyte(values[2]) 108 bip[i].k = c_uint(values[3]) 109 i += 1 110 111 # Create the BPF program 112 return bpf_program(size, bip) 113 114 115 def compile_filter(bpf_filter, iface=None): 116 """Asks Tcpdump to parse the filter, then build the matching 117 BPF bytecode using get_bpf_pointer. 118 """ 119 if not TCPDUMP: 120 raise Scapy_Exception("tcpdump is not available. Cannot use filter !") 121 try: 122 process = subprocess.Popen([ 123 conf.prog.tcpdump, 124 "-p", 125 "-i", (conf.iface if iface is None else iface), 126 "-ddd", 127 "-s", str(MTU), 128 bpf_filter], 129 stdout=subprocess.PIPE, 130 stderr=subprocess.PIPE 131 ) 132 except OSError as ex: 133 raise Scapy_Exception("Failed to attach filter: %s" % ex) 134 lines, err = process.communicate() 135 ret = process.returncode 136 if ret: 137 raise Scapy_Exception( 138 "Failed to attach filter: tcpdump returned: %s" % err 139 ) 140 lines = lines.strip().split(b"\n") 141 return get_bpf_pointer(lines) 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/arch/common.py b/scapy/arch/common.py --- a/scapy/arch/common.py +++ b/scapy/arch/common.py @@ -26,9 +26,15 @@ def _check_tcpdump(): + """ + Return True if the tcpdump command can be started + """ with open(os.devnull, 'wb') as devnull: - proc = subprocess.Popen([conf.prog.tcpdump, "--version"], - stdout=devnull, stderr=subprocess.STDOUT) + try: + proc = subprocess.Popen([conf.prog.tcpdump, "--version"], + stdout=devnull, stderr=subprocess.STDOUT) + except OSError: + return False return proc.wait() == 0
{"golden_diff": "diff --git a/scapy/arch/common.py b/scapy/arch/common.py\n--- a/scapy/arch/common.py\n+++ b/scapy/arch/common.py\n@@ -26,9 +26,15 @@\n \n \n def _check_tcpdump():\n+ \"\"\"\n+ Return True if the tcpdump command can be started\n+ \"\"\"\n with open(os.devnull, 'wb') as devnull:\n- proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\n- stdout=devnull, stderr=subprocess.STDOUT)\n+ try:\n+ proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\n+ stdout=devnull, stderr=subprocess.STDOUT)\n+ except OSError:\n+ return False\n return proc.wait() == 0\n", "issue": "Scapy crashes when tcpdump is not in $PATH\nHere is the fix:\r\n\r\n```diff\r\ngit diff scapy/arch/common.py\r\ndiff --git a/scapy/arch/common.py b/scapy/arch/common.py\r\nindex 9da19141..f103bebe 100644\r\n--- a/scapy/arch/common.py\r\n+++ b/scapy/arch/common.py\r\n@@ -27,8 +27,11 @@ import scapy.modules.six as six\r\n \r\n def _check_tcpdump():\r\n with open(os.devnull, 'wb') as devnull:\r\n- proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\r\n- stdout=devnull, stderr=subprocess.STDOUT)\r\n+ try:\r\n+ proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\r\n+ stdout=devnull, stderr=subprocess.STDOUT)\r\n+ except OSError:\r\n+ return False\r\n return proc.wait() == 0\r\n```\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\n# Important Note: This file is not needed on Windows, and mustn't be loaded\n\nimport socket\nimport subprocess\nfrom fcntl import ioctl\nimport os\nimport struct\nimport ctypes\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.config import conf\nfrom scapy.data import MTU\nfrom scapy.error import Scapy_Exception\nimport scapy.modules.six as six\n\n# BOOT\n\n\ndef _check_tcpdump():\n with open(os.devnull, 'wb') as devnull:\n proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\n stdout=devnull, stderr=subprocess.STDOUT)\n return proc.wait() == 0\n\n\nTCPDUMP = _check_tcpdump()\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n\n\ndef compile_filter(bpf_filter, iface=None):\n \"\"\"Asks Tcpdump to parse the filter, then build the matching\n BPF bytecode using get_bpf_pointer.\n \"\"\"\n if not TCPDUMP:\n raise Scapy_Exception(\"tcpdump is not available. Cannot use filter !\")\n try:\n process = subprocess.Popen([\n conf.prog.tcpdump,\n \"-p\",\n \"-i\", (conf.iface if iface is None else iface),\n \"-ddd\",\n \"-s\", str(MTU),\n bpf_filter],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except OSError as ex:\n raise Scapy_Exception(\"Failed to attach filter: %s\" % ex)\n lines, err = process.communicate()\n ret = process.returncode\n if ret:\n raise Scapy_Exception(\n \"Failed to attach filter: tcpdump returned: %s\" % err\n )\n lines = lines.strip().split(b\"\\n\")\n return get_bpf_pointer(lines)\n", "path": "scapy/arch/common.py"}], "after_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <phil@secdev.org>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\n# Important Note: This file is not needed on Windows, and mustn't be loaded\n\nimport socket\nimport subprocess\nfrom fcntl import ioctl\nimport os\nimport struct\nimport ctypes\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.config import conf\nfrom scapy.data import MTU\nfrom scapy.error import Scapy_Exception\nimport scapy.modules.six as six\n\n# BOOT\n\n\ndef _check_tcpdump():\n \"\"\"\n Return True if the tcpdump command can be started\n \"\"\"\n with open(os.devnull, 'wb') as devnull:\n try:\n proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\n stdout=devnull, stderr=subprocess.STDOUT)\n except OSError:\n return False\n return proc.wait() == 0\n\n\nTCPDUMP = _check_tcpdump()\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n\n\ndef compile_filter(bpf_filter, iface=None):\n \"\"\"Asks Tcpdump to parse the filter, then build the matching\n BPF bytecode using get_bpf_pointer.\n \"\"\"\n if not TCPDUMP:\n raise Scapy_Exception(\"tcpdump is not available. Cannot use filter !\")\n try:\n process = subprocess.Popen([\n conf.prog.tcpdump,\n \"-p\",\n \"-i\", (conf.iface if iface is None else iface),\n \"-ddd\",\n \"-s\", str(MTU),\n bpf_filter],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except OSError as ex:\n raise Scapy_Exception(\"Failed to attach filter: %s\" % ex)\n lines, err = process.communicate()\n ret = process.returncode\n if ret:\n raise Scapy_Exception(\n \"Failed to attach filter: tcpdump returned: %s\" % err\n )\n lines = lines.strip().split(b\"\\n\")\n return get_bpf_pointer(lines)\n", "path": "scapy/arch/common.py"}]}
1,917
166
gh_patches_debug_5984
rasdani/github-patches
git_diff
pydantic__pydantic-8110
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `CallableDiscriminator` isn't in the docs -- note in release notes (it's `Discriminator`) ### Initial Checks - [X] I have searched Google & GitHub for similar requests and couldn't find anything - [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing ### Description It's a trap! The release notes for 2.5.0 say `CallableDiscriminator`, but the class was renamed to `Discriminator` before release - so searching the docs for "CallableDiscriminator" doesn't find it. Maybe worth a note in the line of the release notes to indicate it is now called `Discriminator` to direct users to this cool new feature. ### Affected Components - [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/) - [ ] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage) - [ ] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()` - [ ] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/) - [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/) - [ ] [Model Config](https://docs.pydantic.dev/concepts/config/) - [ ] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type - [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/) - [ ] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models) - [ ] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode - [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydantic/_internal/_validate_call.py` Content: ``` 1 from __future__ import annotations as _annotations 2 3 import inspect 4 from dataclasses import dataclass 5 from functools import partial 6 from typing import Any, Awaitable, Callable 7 8 import pydantic_core 9 10 from ..config import ConfigDict 11 from ..plugin._schema_validator import create_schema_validator 12 from . import _generate_schema, _typing_extra 13 from ._config import ConfigWrapper 14 15 16 @dataclass 17 class CallMarker: 18 function: Callable[..., Any] 19 validate_return: bool 20 21 22 class ValidateCallWrapper: 23 """This is a wrapper around a function that validates the arguments passed to it, and optionally the return value. 24 25 It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so 26 these functions can be applied to instance methods, class methods, static methods, as well as normal functions. 27 """ 28 29 __slots__ = ( 30 'raw_function', 31 '_config', 32 '_validate_return', 33 '__pydantic_core_schema__', 34 '__pydantic_validator__', 35 '__signature__', 36 '__name__', 37 '__qualname__', 38 '__annotations__', 39 '__dict__', # required for __module__ 40 ) 41 42 def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool): 43 self.raw_function = function 44 self._config = config 45 self._validate_return = validate_return 46 self.__signature__ = inspect.signature(function) 47 if isinstance(function, partial): 48 func = function.func 49 schema_type = func 50 self.__name__ = f'partial({func.__name__})' 51 self.__qualname__ = f'partial({func.__qualname__})' 52 self.__annotations__ = func.__annotations__ 53 self.__module__ = func.__module__ 54 self.__doc__ = func.__doc__ 55 else: 56 schema_type = function 57 self.__name__ = function.__name__ 58 self.__qualname__ = function.__qualname__ 59 self.__annotations__ = function.__annotations__ 60 self.__module__ = function.__module__ 61 self.__doc__ = function.__doc__ 62 63 namespace = _typing_extra.add_module_globals(function, None) 64 config_wrapper = ConfigWrapper(config) 65 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace) 66 schema = gen_schema.clean_schema(gen_schema.generate_schema(function)) 67 self.__pydantic_core_schema__ = schema 68 core_config = config_wrapper.core_config(self) 69 70 self.__pydantic_validator__ = create_schema_validator( 71 schema, 72 schema_type, 73 self.__module__, 74 self.__qualname__, 75 'validate_call', 76 core_config, 77 config_wrapper.plugin_settings, 78 ) 79 80 if self._validate_return: 81 return_type = ( 82 self.__signature__.return_annotation 83 if self.__signature__.return_annotation is not self.__signature__.empty 84 else Any 85 ) 86 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace) 87 schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type)) 88 self.__return_pydantic_core_schema__ = schema 89 validator = create_schema_validator( 90 schema, 91 schema_type, 92 self.__module__, 93 self.__qualname__, 94 'validate_call', 95 core_config, 96 config_wrapper.plugin_settings, 97 ) 98 if inspect.iscoroutinefunction(self.raw_function): 99 100 async def return_val_wrapper(aw: Awaitable[Any]) -> None: 101 return validator.validate_python(await aw) 102 103 self.__return_pydantic_validator__ = return_val_wrapper 104 else: 105 self.__return_pydantic_validator__ = validator.validate_python 106 else: 107 self.__return_pydantic_core_schema__ = None 108 self.__return_pydantic_validator__ = None 109 110 self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods 111 112 def __call__(self, *args: Any, **kwargs: Any) -> Any: 113 res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs)) 114 if self.__return_pydantic_validator__: 115 return self.__return_pydantic_validator__(res) 116 return res 117 118 def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper: 119 """Bind the raw function and return another ValidateCallWrapper wrapping that.""" 120 if obj is None: 121 try: 122 # Handle the case where a method is accessed as a class attribute 123 return objtype.__getattribute__(objtype, self._name) # type: ignore 124 except AttributeError: 125 # This will happen the first time the attribute is accessed 126 pass 127 128 bound_function = self.raw_function.__get__(obj, objtype) 129 result = self.__class__(bound_function, self._config, self._validate_return) 130 131 # skip binding to instance when obj or objtype has __slots__ attribute 132 if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'): 133 return result 134 135 if self._name is not None: 136 if obj is not None: 137 object.__setattr__(obj, self._name, result) 138 else: 139 object.__setattr__(objtype, self._name, result) 140 return result 141 142 def __set_name__(self, owner: Any, name: str) -> None: 143 self._name = name 144 145 def __repr__(self) -> str: 146 return f'ValidateCallWrapper({self.raw_function})' 147 148 def __eq__(self, other): 149 return self.raw_function == other.raw_function 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py --- a/pydantic/_internal/_validate_call.py +++ b/pydantic/_internal/_validate_call.py @@ -145,5 +145,12 @@ def __repr__(self) -> str: return f'ValidateCallWrapper({self.raw_function})' - def __eq__(self, other): - return self.raw_function == other.raw_function + def __eq__(self, other) -> bool: + return ( + (self.raw_function == other.raw_function) + and (self._config == other._config) + and (self._validate_return == other._validate_return) + ) + + def __hash__(self): + return hash(self.raw_function)
{"golden_diff": "diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py\n--- a/pydantic/_internal/_validate_call.py\n+++ b/pydantic/_internal/_validate_call.py\n@@ -145,5 +145,12 @@\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n \n- def __eq__(self, other):\n- return self.raw_function == other.raw_function\n+ def __eq__(self, other) -> bool:\n+ return (\n+ (self.raw_function == other.raw_function)\n+ and (self._config == other._config)\n+ and (self._validate_return == other._validate_return)\n+ )\n+\n+ def __hash__(self):\n+ return hash(self.raw_function)\n", "issue": "`CallableDiscriminator` isn't in the docs -- note in release notes (it's `Discriminator`)\n### Initial Checks\n\n- [X] I have searched Google & GitHub for similar requests and couldn't find anything\n- [X] I have read and followed [the docs](https://docs.pydantic.dev) and still think this feature is missing\n\n### Description\n\nIt's a trap!\r\n\r\nThe release notes for 2.5.0 say `CallableDiscriminator`, but the class was renamed to `Discriminator` before release - so searching the docs for \"CallableDiscriminator\" doesn't find it.\r\n\r\nMaybe worth a note in the line of the release notes to indicate it is now called `Discriminator` to direct users to this cool new feature.\n\n### Affected Components\n\n- [ ] [Compatibility between releases](https://docs.pydantic.dev/changelog/)\n- [ ] [Data validation/parsing](https://docs.pydantic.dev/concepts/models/#basic-model-usage)\n- [ ] [Data serialization](https://docs.pydantic.dev/concepts/serialization/) - `.model_dump()` and `.model_dump_json()`\n- [ ] [JSON Schema](https://docs.pydantic.dev/concepts/json_schema/)\n- [ ] [Dataclasses](https://docs.pydantic.dev/concepts/dataclasses/)\n- [ ] [Model Config](https://docs.pydantic.dev/concepts/config/)\n- [ ] [Field Types](https://docs.pydantic.dev/api/types/) - adding or changing a particular data type\n- [ ] [Function validation decorator](https://docs.pydantic.dev/concepts/validation_decorator/)\n- [ ] [Generic Models](https://docs.pydantic.dev/concepts/models/#generic-models)\n- [ ] [Other Model behaviour](https://docs.pydantic.dev/concepts/models/) - `model_construct()`, pickling, private attributes, ORM mode\n- [ ] [Plugins](https://docs.pydantic.dev/) and integration with other tools - mypy, FastAPI, python-devtools, Hypothesis, VS Code, PyCharm, etc.\n", "before_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Awaitable, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom ..plugin._schema_validator import create_schema_validator\nfrom . import _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n schema_type = func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n schema_type = function\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(function))\n self.__pydantic_core_schema__ = schema\n core_config = config_wrapper.core_config(self)\n\n self.__pydantic_validator__ = create_schema_validator(\n schema,\n schema_type,\n self.__module__,\n self.__qualname__,\n 'validate_call',\n core_config,\n config_wrapper.plugin_settings,\n )\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))\n self.__return_pydantic_core_schema__ = schema\n validator = create_schema_validator(\n schema,\n schema_type,\n self.__module__,\n self.__qualname__,\n 'validate_call',\n core_config,\n config_wrapper.plugin_settings,\n )\n if inspect.iscoroutinefunction(self.raw_function):\n\n async def return_val_wrapper(aw: Awaitable[Any]) -> None:\n return validator.validate_python(await aw)\n\n self.__return_pydantic_validator__ = return_val_wrapper\n else:\n self.__return_pydantic_validator__ = validator.validate_python\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n if obj is None:\n try:\n # Handle the case where a method is accessed as a class attribute\n return objtype.__getattribute__(objtype, self._name) # type: ignore\n except AttributeError:\n # This will happen the first time the attribute is accessed\n pass\n\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n\n # skip binding to instance when obj or objtype has __slots__ attribute\n if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'):\n return result\n\n if self._name is not None:\n if obj is not None:\n object.__setattr__(obj, self._name, result)\n else:\n object.__setattr__(objtype, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n\n def __eq__(self, other):\n return self.raw_function == other.raw_function\n", "path": "pydantic/_internal/_validate_call.py"}], "after_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Awaitable, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom ..plugin._schema_validator import create_schema_validator\nfrom . import _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n schema_type = func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n schema_type = function\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(function))\n self.__pydantic_core_schema__ = schema\n core_config = config_wrapper.core_config(self)\n\n self.__pydantic_validator__ = create_schema_validator(\n schema,\n schema_type,\n self.__module__,\n self.__qualname__,\n 'validate_call',\n core_config,\n config_wrapper.plugin_settings,\n )\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))\n self.__return_pydantic_core_schema__ = schema\n validator = create_schema_validator(\n schema,\n schema_type,\n self.__module__,\n self.__qualname__,\n 'validate_call',\n core_config,\n config_wrapper.plugin_settings,\n )\n if inspect.iscoroutinefunction(self.raw_function):\n\n async def return_val_wrapper(aw: Awaitable[Any]) -> None:\n return validator.validate_python(await aw)\n\n self.__return_pydantic_validator__ = return_val_wrapper\n else:\n self.__return_pydantic_validator__ = validator.validate_python\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n if obj is None:\n try:\n # Handle the case where a method is accessed as a class attribute\n return objtype.__getattribute__(objtype, self._name) # type: ignore\n except AttributeError:\n # This will happen the first time the attribute is accessed\n pass\n\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n\n # skip binding to instance when obj or objtype has __slots__ attribute\n if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'):\n return result\n\n if self._name is not None:\n if obj is not None:\n object.__setattr__(obj, self._name, result)\n else:\n object.__setattr__(objtype, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n\n def __eq__(self, other) -> bool:\n return (\n (self.raw_function == other.raw_function)\n and (self._config == other._config)\n and (self._validate_return == other._validate_return)\n )\n\n def __hash__(self):\n return hash(self.raw_function)\n", "path": "pydantic/_internal/_validate_call.py"}]}
2,246
182
gh_patches_debug_22072
rasdani/github-patches
git_diff
dask__distributed-3056
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cpu cores estimate based on cgroups I was reading the announcement for 2.4.0 and got interested in https://github.com/dask/distributed/pull/3039 by @jcrist That did lead me to this part of the code: https://github.com/dask/distributed/blob/7d017c467590c758fa4b8cb2b1193205fe5aa7ad/distributed/system.py#L62 Just by looking at it (and although I'm not an expert I think I know what's going on), I have to observations -- half way between a question and a bugreport. 1. in my docker environment I have here (ubuntu 18.04), the filename is different: ``` ~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us 100000 ~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us 220000 ``` in the code is that path: ``` ~$ ls /sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us ls: cannot access '/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us': No such file or directory ``` 2. The actual calculation is `int(quota / period)`. I think this should round up, not down. The point is, if you have a fraction like "2.5 cores", it will report 2 cores and keep half a core unused. It would be better if it reports 3 cores and then cgroups limits to the actual 2.5. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/system.py` Content: ``` 1 import os 2 import sys 3 4 import psutil 5 6 __all__ = ("memory_limit", "cpu_count", "MEMORY_LIMIT", "CPU_COUNT") 7 8 9 def memory_limit(): 10 """Get the memory limit (in bytes) for this system. 11 12 Takes the minimum value from the following locations: 13 14 - Total system host memory 15 - Cgroups limit (if set) 16 - RSS rlimit (if set) 17 """ 18 limit = psutil.virtual_memory().total 19 20 # Check cgroups if available 21 if sys.platform == "linux": 22 try: 23 with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as f: 24 cgroups_limit = int(f.read()) 25 if cgroups_limit > 0: 26 limit = min(limit, cgroups_limit) 27 except Exception: 28 pass 29 30 # Check rlimit if available 31 try: 32 import resource 33 34 hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1] 35 if hard_limit > 0: 36 limit = min(limit, hard_limit) 37 except (ImportError, OSError): 38 pass 39 40 return limit 41 42 43 def cpu_count(): 44 """Get the available CPU count for this system. 45 46 Takes the minimum value from the following locations: 47 48 - Total system cpus available on the host. 49 - CPU Affinity (if set) 50 - Cgroups limit (if set) 51 """ 52 count = os.cpu_count() 53 54 # Check CPU affinity if available 55 try: 56 affinity_count = len(psutil.Process().cpu_affinity()) 57 if affinity_count > 0: 58 count = min(count, affinity_count) 59 except Exception: 60 pass 61 62 # Check cgroups if available 63 if sys.platform == "linux": 64 try: 65 with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us") as f: 66 quota = int(f.read()) 67 with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us") as f: 68 period = int(f.read()) 69 cgroups_count = int(quota / period) 70 if cgroups_count > 0: 71 count = min(count, cgroups_count) 72 except Exception: 73 pass 74 75 return count 76 77 78 MEMORY_LIMIT = memory_limit() 79 CPU_COUNT = cpu_count() 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/system.py b/distributed/system.py --- a/distributed/system.py +++ b/distributed/system.py @@ -1,3 +1,4 @@ +import math import os import sys @@ -61,16 +62,20 @@ # Check cgroups if available if sys.platform == "linux": - try: - with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us") as f: - quota = int(f.read()) - with open("/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us") as f: - period = int(f.read()) - cgroups_count = int(quota / period) - if cgroups_count > 0: - count = min(count, cgroups_count) - except Exception: - pass + # The directory name isn't standardized across linux distros, check both + for dirname in ["cpuacct,cpu", "cpu,cpuacct"]: + try: + with open("/sys/fs/cgroup/%s/cpu.cfs_quota_us" % dirname) as f: + quota = int(f.read()) + with open("/sys/fs/cgroup/%s/cpu.cfs_period_us" % dirname) as f: + period = int(f.read()) + # We round up on fractional CPUs + cgroups_count = math.ceil(quota / period) + if cgroups_count > 0: + count = min(count, cgroups_count) + break + except Exception: + pass return count
{"golden_diff": "diff --git a/distributed/system.py b/distributed/system.py\n--- a/distributed/system.py\n+++ b/distributed/system.py\n@@ -1,3 +1,4 @@\n+import math\n import os\n import sys\n \n@@ -61,16 +62,20 @@\n \n # Check cgroups if available\n if sys.platform == \"linux\":\n- try:\n- with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\") as f:\n- quota = int(f.read())\n- with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us\") as f:\n- period = int(f.read())\n- cgroups_count = int(quota / period)\n- if cgroups_count > 0:\n- count = min(count, cgroups_count)\n- except Exception:\n- pass\n+ # The directory name isn't standardized across linux distros, check both\n+ for dirname in [\"cpuacct,cpu\", \"cpu,cpuacct\"]:\n+ try:\n+ with open(\"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname) as f:\n+ quota = int(f.read())\n+ with open(\"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname) as f:\n+ period = int(f.read())\n+ # We round up on fractional CPUs\n+ cgroups_count = math.ceil(quota / period)\n+ if cgroups_count > 0:\n+ count = min(count, cgroups_count)\n+ break\n+ except Exception:\n+ pass\n \n return count\n", "issue": "cpu cores estimate based on cgroups\nI was reading the announcement for 2.4.0 and got interested in https://github.com/dask/distributed/pull/3039 by @jcrist \r\n\r\nThat did lead me to this part of the code:\r\nhttps://github.com/dask/distributed/blob/7d017c467590c758fa4b8cb2b1193205fe5aa7ad/distributed/system.py#L62\r\n\r\nJust by looking at it (and although I'm not an expert I think I know what's going on), I have to observations -- half way between a question and a bugreport.\r\n\r\n1. in my docker environment I have here (ubuntu 18.04), the filename is different:\r\n\r\n```\r\n~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us \r\n100000\r\n~$ cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us \r\n220000\r\n```\r\nin the code is that path:\r\n```\r\n~$ ls /sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\r\nls: cannot access '/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us': No such file or directory\r\n```\r\n\r\n2. The actual calculation is `int(quota / period)`. I think this should round up, not down. The point is, if you have a fraction like \"2.5 cores\", it will report 2 cores and keep half a core unused. It would be better if it reports 3 cores and then cgroups limits to the actual 2.5.\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\n\nimport psutil\n\n__all__ = (\"memory_limit\", \"cpu_count\", \"MEMORY_LIMIT\", \"CPU_COUNT\")\n\n\ndef memory_limit():\n \"\"\"Get the memory limit (in bytes) for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system host memory\n - Cgroups limit (if set)\n - RSS rlimit (if set)\n \"\"\"\n limit = psutil.virtual_memory().total\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\") as f:\n cgroups_limit = int(f.read())\n if cgroups_limit > 0:\n limit = min(limit, cgroups_limit)\n except Exception:\n pass\n\n # Check rlimit if available\n try:\n import resource\n\n hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1]\n if hard_limit > 0:\n limit = min(limit, hard_limit)\n except (ImportError, OSError):\n pass\n\n return limit\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpuacct,cpu/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\nMEMORY_LIMIT = memory_limit()\nCPU_COUNT = cpu_count()\n", "path": "distributed/system.py"}], "after_files": [{"content": "import math\nimport os\nimport sys\n\nimport psutil\n\n__all__ = (\"memory_limit\", \"cpu_count\", \"MEMORY_LIMIT\", \"CPU_COUNT\")\n\n\ndef memory_limit():\n \"\"\"Get the memory limit (in bytes) for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system host memory\n - Cgroups limit (if set)\n - RSS rlimit (if set)\n \"\"\"\n limit = psutil.virtual_memory().total\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\") as f:\n cgroups_limit = int(f.read())\n if cgroups_limit > 0:\n limit = min(limit, cgroups_limit)\n except Exception:\n pass\n\n # Check rlimit if available\n try:\n import resource\n\n hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1]\n if hard_limit > 0:\n limit = min(limit, hard_limit)\n except (ImportError, OSError):\n pass\n\n return limit\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n # The directory name isn't standardized across linux distros, check both\n for dirname in [\"cpuacct,cpu\", \"cpu,cpuacct\"]:\n try:\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname) as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname) as f:\n period = int(f.read())\n # We round up on fractional CPUs\n cgroups_count = math.ceil(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n break\n except Exception:\n pass\n\n return count\n\n\nMEMORY_LIMIT = memory_limit()\nCPU_COUNT = cpu_count()\n", "path": "distributed/system.py"}]}
1,250
346
gh_patches_debug_1615
rasdani/github-patches
git_diff
urllib3__urllib3-987
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- urllib3 fails to install on centos7 due to old setuptools not supporting <=, < environment markers. Current urllib3 fails to install on centos7. This bug was most likely introduced after https://github.com/shazow/urllib3/commit/9f5454eac808a105307b2d363c99ce97e5109821. centos7 ships a very old version of setuptools (0.9.8) which does not support `<=` as an environment marker. See https://github.com/pypa/setuptools/issues/380. ``` $ python --version Python 2.7.5 $ rpm -qa python-setuptools python-setuptools-0.9.8-4.el7.noarch $ lsb_release -a ... Description: CentOS Linux release 7.2.1511 (Core) Release: 7.2.1511 $ virtualenv venv ... $ venv/bin/pip install urllib3 Downloading/unpacking urllib3 Downloading urllib3-1.18.tar.gz (183kB): 183kB downloaded Running setup.py egg_info for package urllib3 error in urllib3 setup command: Invalid environment marker: python_version <= "2.7" Complete output from command python setup.py egg_info: error in urllib3 setup command: Invalid environment marker: python_version <= "2.7" ---------------------------------------- Cleaning up... Command python setup.py egg_info failed with error code 1 in /home/rene/src/venv/build/urllib3 Storing complete log in /home/rene/.pip/pip.log ``` Installing https://github.com/shazow/urllib3/commit/f620d997134708b09560ca5797aa79a59a2ef4c0 (commit before 9f5454eac808a105307b2d363c99ce97e5109821) works fine. ``` $ venv/bin/pip install git+git://github.com/shazow/urllib3.git@f620d997134708b09560ca5797aa79a59a2ef4c0 ... Successfully installed urllib3 Cleaning up... ``` But 9f5454eac808a105307b2d363c99ce97e5109821 fails. ``` $ venv/bin/pip install git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 Downloading/unpacking git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 Cloning git://github.com/shazow/urllib3.git (to 9f5454eac808a105307b2d363c99ce97e5109821) to /tmp/pip-lnVDAG-build Could not find a tag or branch '9f5454eac808a105307b2d363c99ce97e5109821', assuming commit. Running setup.py egg_info for package from git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 error in urllib3 setup command: Invalid environment marker: python_version < "3.3" Complete output from command python setup.py egg_info: error in urllib3 setup command: Invalid environment marker: python_version < "3.3" ---------------------------------------- Cleaning up... Command python setup.py egg_info failed with error code 1 in /tmp/pip-lnVDAG-build Storing complete log in /home/rene/.pip/pip.log ``` urllib3 1.17 setup.py does not ship with < or <= markers so my workaround right now is to install urllib3==1.17. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup 4 5 import os 6 import re 7 import codecs 8 9 base_path = os.path.dirname(__file__) 10 11 # Get the version (borrowed from SQLAlchemy) 12 with open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp: 13 VERSION = re.compile(r".*__version__ = '(.*?)'", 14 re.S).match(fp.read()).group(1) 15 16 with codecs.open('README.rst', encoding='utf-8') as fp: 17 readme = fp.read() 18 with codecs.open('CHANGES.rst', encoding='utf-8') as fp: 19 changes = fp.read() 20 version = VERSION 21 22 setup(name='urllib3', 23 version=version, 24 description="HTTP library with thread-safe connection pooling, file post, and more.", 25 long_description=u'\n\n'.join([readme, changes]), 26 classifiers=[ 27 'Environment :: Web Environment', 28 'Intended Audience :: Developers', 29 'License :: OSI Approved :: MIT License', 30 'Operating System :: OS Independent', 31 'Programming Language :: Python', 32 'Programming Language :: Python :: 2', 33 'Programming Language :: Python :: 3', 34 'Topic :: Internet :: WWW/HTTP', 35 'Topic :: Software Development :: Libraries', 36 ], 37 keywords='urllib httplib threadsafe filepost http https ssl pooling', 38 author='Andrey Petrov', 39 author_email='andrey.petrov@shazow.net', 40 url='https://urllib3.readthedocs.io/', 41 license='MIT', 42 packages=['urllib3', 43 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', 44 'urllib3.packages.backports', 'urllib3.contrib', 45 'urllib3.util', 46 ], 47 requires=[], 48 tests_require=[ 49 # These are a less-specific subset of dev-requirements.txt, for the 50 # convenience of distro package maintainers. 51 'nose', 52 'mock', 53 'tornado', 54 ], 55 test_suite='test', 56 extras_require={ 57 'secure': [ 58 'pyOpenSSL>=0.14', 59 'cryptography>=1.3.4', 60 'idna>=2.0.0', 61 'certifi', 62 ], 63 'secure:python_version <= "2.7"': [ 64 "ipaddress", 65 ], 66 'socks': [ 67 'PySocks>=1.5.6,<2.0,!=1.5.7', 68 ] 69 }, 70 ) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -59,8 +59,6 @@ 'cryptography>=1.3.4', 'idna>=2.0.0', 'certifi', - ], - 'secure:python_version <= "2.7"': [ "ipaddress", ], 'socks': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,8 +59,6 @@\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n- ],\n- 'secure:python_version <= \"2.7\"': [\n \"ipaddress\",\n ],\n 'socks': [\n", "issue": "urllib3 fails to install on centos7 due to old setuptools not supporting <=, < environment markers.\nCurrent urllib3 fails to install on centos7. This bug was most likely introduced after https://github.com/shazow/urllib3/commit/9f5454eac808a105307b2d363c99ce97e5109821.\n\ncentos7 ships a very old version of setuptools (0.9.8) which does not support `<=` as an environment marker. See https://github.com/pypa/setuptools/issues/380.\n\n```\n$ python --version\nPython 2.7.5\n\n$ rpm -qa python-setuptools\npython-setuptools-0.9.8-4.el7.noarch\n\n$ lsb_release -a\n...\nDescription: CentOS Linux release 7.2.1511 (Core) \nRelease: 7.2.1511\n\n$ virtualenv venv\n...\n\n$ venv/bin/pip install urllib3\nDownloading/unpacking urllib3\n Downloading urllib3-1.18.tar.gz (183kB): 183kB downloaded\n Running setup.py egg_info for package urllib3\n error in urllib3 setup command: Invalid environment marker: python_version <= \"2.7\"\n Complete output from command python setup.py egg_info:\n error in urllib3 setup command: Invalid environment marker: python_version <= \"2.7\"\n\n----------------------------------------\nCleaning up...\nCommand python setup.py egg_info failed with error code 1 in /home/rene/src/venv/build/urllib3\nStoring complete log in /home/rene/.pip/pip.log\n```\n\nInstalling https://github.com/shazow/urllib3/commit/f620d997134708b09560ca5797aa79a59a2ef4c0 (commit before 9f5454eac808a105307b2d363c99ce97e5109821) works fine.\n\n```\n$ venv/bin/pip install git+git://github.com/shazow/urllib3.git@f620d997134708b09560ca5797aa79a59a2ef4c0\n...\nSuccessfully installed urllib3\nCleaning up...\n```\n\nBut 9f5454eac808a105307b2d363c99ce97e5109821 fails.\n\n```\n$ venv/bin/pip install git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821\nDownloading/unpacking git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821\n Cloning git://github.com/shazow/urllib3.git (to 9f5454eac808a105307b2d363c99ce97e5109821) to /tmp/pip-lnVDAG-build\n Could not find a tag or branch '9f5454eac808a105307b2d363c99ce97e5109821', assuming commit.\n Running setup.py egg_info for package from git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821\n error in urllib3 setup command: Invalid environment marker: python_version < \"3.3\"\n Complete output from command python setup.py egg_info:\n error in urllib3 setup command: Invalid environment marker: python_version < \"3.3\"\n\n----------------------------------------\nCleaning up...\nCommand python setup.py egg_info failed with error code 1 in /tmp/pip-lnVDAG-build\nStoring complete log in /home/rene/.pip/pip.log\n```\n\nurllib3 1.17 setup.py does not ship with < or <= markers so my workaround right now is to install urllib3==1.17.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.contrib',\n 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n ],\n 'secure:python_version <= \"2.7\"': [\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='andrey.petrov@shazow.net',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.contrib',\n 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}]}
1,950
90
gh_patches_debug_13457
rasdani/github-patches
git_diff
modin-project__modin-3382
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- JSON dispatcher data file split correction Originated from https://github.com/modin-project/modin/pull/2607#discussion_r571989125. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modin/engines/base/io/text/json_dispatcher.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 """Module houses `JSONDispatcher` class, that is used for reading `.json` files.""" 15 16 from modin.engines.base.io.text.text_file_dispatcher import TextFileDispatcher 17 from io import BytesIO 18 import pandas 19 import numpy as np 20 from csv import QUOTE_NONE 21 22 from modin.config import NPartitions 23 24 25 class JSONDispatcher(TextFileDispatcher): 26 """ 27 Class handles utils for reading `.json` files. 28 29 Inherits some common for text files util functions from `TextFileDispatcher` class. 30 """ 31 32 @classmethod 33 def _read(cls, path_or_buf, **kwargs): 34 """ 35 Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters. 36 37 Parameters 38 ---------- 39 path_or_buf : str, path object or file-like object 40 `path_or_buf` parameter of `read_json` function. 41 **kwargs : dict 42 Parameters of `read_json` function. 43 44 Returns 45 ------- 46 BaseQueryCompiler 47 Query compiler with imported data for further processing. 48 """ 49 path_or_buf = cls.get_path_or_buffer(path_or_buf) 50 if isinstance(path_or_buf, str): 51 if not cls.file_exists(path_or_buf): 52 return cls.single_worker_read(path_or_buf, **kwargs) 53 path_or_buf = cls.get_path(path_or_buf) 54 elif not cls.pathlib_or_pypath(path_or_buf): 55 return cls.single_worker_read(path_or_buf, **kwargs) 56 if not kwargs.get("lines", False): 57 return cls.single_worker_read(path_or_buf, **kwargs) 58 columns = pandas.read_json( 59 BytesIO(b"" + open(path_or_buf, "rb").readline()), lines=True 60 ).columns 61 kwargs["columns"] = columns 62 empty_pd_df = pandas.DataFrame(columns=columns) 63 64 with cls.file_open(path_or_buf, "rb", kwargs.get("compression", "infer")) as f: 65 partition_ids = [] 66 index_ids = [] 67 dtypes_ids = [] 68 69 column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) 70 71 args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} 72 73 splits = cls.partitioned_file( 74 f, 75 num_partitions=NPartitions.get(), 76 is_quoting=(args.get("quoting", "") != QUOTE_NONE), 77 ) 78 for start, end in splits: 79 args.update({"start": start, "end": end}) 80 partition_id = cls.deploy(cls.parse, num_splits + 3, args) 81 partition_ids.append(partition_id[:-3]) 82 index_ids.append(partition_id[-3]) 83 dtypes_ids.append(partition_id[-2]) 84 85 # partition_id[-1] contains the columns for each partition, which will be useful 86 # for implementing when `lines=False`. 87 row_lengths = cls.materialize(index_ids) 88 new_index = pandas.RangeIndex(sum(row_lengths)) 89 90 dtypes = cls.get_dtypes(dtypes_ids) 91 partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) 92 93 if isinstance(dtypes, pandas.Series): 94 dtypes.index = columns 95 else: 96 dtypes = pandas.Series(dtypes, index=columns) 97 98 new_frame = cls.frame_cls( 99 np.array(partition_ids), 100 new_index, 101 columns, 102 row_lengths, 103 column_widths, 104 dtypes=dtypes, 105 ) 106 new_frame.synchronize_labels(axis=0) 107 return cls.query_compiler_cls(new_frame) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modin/engines/base/io/text/json_dispatcher.py b/modin/engines/base/io/text/json_dispatcher.py --- a/modin/engines/base/io/text/json_dispatcher.py +++ b/modin/engines/base/io/text/json_dispatcher.py @@ -17,7 +17,6 @@ from io import BytesIO import pandas import numpy as np -from csv import QUOTE_NONE from modin.config import NPartitions @@ -73,7 +72,6 @@ splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), - is_quoting=(args.get("quoting", "") != QUOTE_NONE), ) for start, end in splits: args.update({"start": start, "end": end})
{"golden_diff": "diff --git a/modin/engines/base/io/text/json_dispatcher.py b/modin/engines/base/io/text/json_dispatcher.py\n--- a/modin/engines/base/io/text/json_dispatcher.py\n+++ b/modin/engines/base/io/text/json_dispatcher.py\n@@ -17,7 +17,6 @@\n from io import BytesIO\n import pandas\n import numpy as np\n-from csv import QUOTE_NONE\n \n from modin.config import NPartitions\n \n@@ -73,7 +72,6 @@\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n- is_quoting=(args.get(\"quoting\", \"\") != QUOTE_NONE),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n", "issue": "JSON dispatcher data file split correction\nOriginated from https://github.com/modin-project/modin/pull/2607#discussion_r571989125.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses `JSONDispatcher` class, that is used for reading `.json` files.\"\"\"\n\nfrom modin.engines.base.io.text.text_file_dispatcher import TextFileDispatcher\nfrom io import BytesIO\nimport pandas\nimport numpy as np\nfrom csv import QUOTE_NONE\n\nfrom modin.config import NPartitions\n\n\nclass JSONDispatcher(TextFileDispatcher):\n \"\"\"\n Class handles utils for reading `.json` files.\n\n Inherits some common for text files util functions from `TextFileDispatcher` class.\n \"\"\"\n\n @classmethod\n def _read(cls, path_or_buf, **kwargs):\n \"\"\"\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n \"\"\"\n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n columns = pandas.read_json(\n BytesIO(b\"\" + open(path_or_buf, \"rb\").readline()), lines=True\n ).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with cls.file_open(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)\n\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, **kwargs}\n\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n is_quoting=(args.get(\"quoting\", \"\") != QUOTE_NONE),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n partition_id = cls.deploy(cls.parse, num_splits + 3, args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "path": "modin/engines/base/io/text/json_dispatcher.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses `JSONDispatcher` class, that is used for reading `.json` files.\"\"\"\n\nfrom modin.engines.base.io.text.text_file_dispatcher import TextFileDispatcher\nfrom io import BytesIO\nimport pandas\nimport numpy as np\n\nfrom modin.config import NPartitions\n\n\nclass JSONDispatcher(TextFileDispatcher):\n \"\"\"\n Class handles utils for reading `.json` files.\n\n Inherits some common for text files util functions from `TextFileDispatcher` class.\n \"\"\"\n\n @classmethod\n def _read(cls, path_or_buf, **kwargs):\n \"\"\"\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n \"\"\"\n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n columns = pandas.read_json(\n BytesIO(b\"\" + open(path_or_buf, \"rb\").readline()), lines=True\n ).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with cls.file_open(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)\n\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, **kwargs}\n\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n partition_id = cls.deploy(cls.parse, num_splits + 3, args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "path": "modin/engines/base/io/text/json_dispatcher.py"}]}
1,428
170
gh_patches_debug_7993
rasdani/github-patches
git_diff
pytorch__vision-2954
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature Pyramid Network code bug https://github.com/pytorch/vision/blob/c2e8a00885e68ae1200eb6440f540e181d9125de/torchvision/ops/feature_pyramid_network.py#L60 this line does not work, I think it should be modified as self.modules() instead of self.children() --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/ops/feature_pyramid_network.py` Content: ``` 1 from collections import OrderedDict 2 3 import torch 4 import torch.nn.functional as F 5 from torch import nn, Tensor 6 7 from torch.jit.annotations import Tuple, List, Dict, Optional 8 9 10 class ExtraFPNBlock(nn.Module): 11 """ 12 Base class for the extra block in the FPN. 13 14 Arguments: 15 results (List[Tensor]): the result of the FPN 16 x (List[Tensor]): the original feature maps 17 names (List[str]): the names for each one of the 18 original feature maps 19 20 Returns: 21 results (List[Tensor]): the extended set of results 22 of the FPN 23 names (List[str]): the extended set of names for the results 24 """ 25 def forward( 26 self, 27 results: List[Tensor], 28 x: List[Tensor], 29 names: List[str], 30 ) -> Tuple[List[Tensor], List[str]]: 31 pass 32 33 34 class FeaturePyramidNetwork(nn.Module): 35 """ 36 Module that adds a FPN from on top of a set of feature maps. This is based on 37 `"Feature Pyramid Network for Object Detection" <https://arxiv.org/abs/1612.03144>`_. 38 39 The feature maps are currently supposed to be in increasing depth 40 order. 41 42 The input to the model is expected to be an OrderedDict[Tensor], containing 43 the feature maps on top of which the FPN will be added. 44 45 Arguments: 46 in_channels_list (list[int]): number of channels for each feature map that 47 is passed to the module 48 out_channels (int): number of channels of the FPN representation 49 extra_blocks (ExtraFPNBlock or None): if provided, extra operations will 50 be performed. It is expected to take the fpn features, the original 51 features and the names of the original features as input, and returns 52 a new list of feature maps and their corresponding names 53 54 Examples:: 55 56 >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5) 57 >>> # get some dummy data 58 >>> x = OrderedDict() 59 >>> x['feat0'] = torch.rand(1, 10, 64, 64) 60 >>> x['feat2'] = torch.rand(1, 20, 16, 16) 61 >>> x['feat3'] = torch.rand(1, 30, 8, 8) 62 >>> # compute the FPN on top of x 63 >>> output = m(x) 64 >>> print([(k, v.shape) for k, v in output.items()]) 65 >>> # returns 66 >>> [('feat0', torch.Size([1, 5, 64, 64])), 67 >>> ('feat2', torch.Size([1, 5, 16, 16])), 68 >>> ('feat3', torch.Size([1, 5, 8, 8]))] 69 70 """ 71 def __init__( 72 self, 73 in_channels_list: List[int], 74 out_channels: int, 75 extra_blocks: Optional[ExtraFPNBlock] = None, 76 ): 77 super(FeaturePyramidNetwork, self).__init__() 78 self.inner_blocks = nn.ModuleList() 79 self.layer_blocks = nn.ModuleList() 80 for in_channels in in_channels_list: 81 if in_channels == 0: 82 raise ValueError("in_channels=0 is currently not supported") 83 inner_block_module = nn.Conv2d(in_channels, out_channels, 1) 84 layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1) 85 self.inner_blocks.append(inner_block_module) 86 self.layer_blocks.append(layer_block_module) 87 88 # initialize parameters now to avoid modifying the initialization of top_blocks 89 for m in self.children(): 90 if isinstance(m, nn.Conv2d): 91 nn.init.kaiming_uniform_(m.weight, a=1) 92 nn.init.constant_(m.bias, 0) 93 94 if extra_blocks is not None: 95 assert isinstance(extra_blocks, ExtraFPNBlock) 96 self.extra_blocks = extra_blocks 97 98 def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: 99 """ 100 This is equivalent to self.inner_blocks[idx](x), 101 but torchscript doesn't support this yet 102 """ 103 num_blocks = 0 104 for m in self.inner_blocks: 105 num_blocks += 1 106 if idx < 0: 107 idx += num_blocks 108 i = 0 109 out = x 110 for module in self.inner_blocks: 111 if i == idx: 112 out = module(x) 113 i += 1 114 return out 115 116 def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: 117 """ 118 This is equivalent to self.layer_blocks[idx](x), 119 but torchscript doesn't support this yet 120 """ 121 num_blocks = 0 122 for m in self.layer_blocks: 123 num_blocks += 1 124 if idx < 0: 125 idx += num_blocks 126 i = 0 127 out = x 128 for module in self.layer_blocks: 129 if i == idx: 130 out = module(x) 131 i += 1 132 return out 133 134 def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]: 135 """ 136 Computes the FPN for a set of feature maps. 137 138 Arguments: 139 x (OrderedDict[Tensor]): feature maps for each feature level. 140 141 Returns: 142 results (OrderedDict[Tensor]): feature maps after FPN layers. 143 They are ordered from highest resolution first. 144 """ 145 # unpack OrderedDict into two lists for easier handling 146 names = list(x.keys()) 147 x = list(x.values()) 148 149 last_inner = self.get_result_from_inner_blocks(x[-1], -1) 150 results = [] 151 results.append(self.get_result_from_layer_blocks(last_inner, -1)) 152 153 for idx in range(len(x) - 2, -1, -1): 154 inner_lateral = self.get_result_from_inner_blocks(x[idx], idx) 155 feat_shape = inner_lateral.shape[-2:] 156 inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") 157 last_inner = inner_lateral + inner_top_down 158 results.insert(0, self.get_result_from_layer_blocks(last_inner, idx)) 159 160 if self.extra_blocks is not None: 161 results, names = self.extra_blocks(results, x, names) 162 163 # make it back an OrderedDict 164 out = OrderedDict([(k, v) for k, v in zip(names, results)]) 165 166 return out 167 168 169 class LastLevelMaxPool(ExtraFPNBlock): 170 """ 171 Applies a max_pool2d on top of the last feature map 172 """ 173 def forward( 174 self, 175 x: List[Tensor], 176 y: List[Tensor], 177 names: List[str], 178 ) -> Tuple[List[Tensor], List[str]]: 179 names.append("pool") 180 x.append(F.max_pool2d(x[-1], 1, 2, 0)) 181 return x, names 182 183 184 class LastLevelP6P7(ExtraFPNBlock): 185 """ 186 This module is used in RetinaNet to generate extra layers, P6 and P7. 187 """ 188 def __init__(self, in_channels: int, out_channels: int): 189 super(LastLevelP6P7, self).__init__() 190 self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) 191 self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) 192 for module in [self.p6, self.p7]: 193 nn.init.kaiming_uniform_(module.weight, a=1) 194 nn.init.constant_(module.bias, 0) 195 self.use_P5 = in_channels == out_channels 196 197 def forward( 198 self, 199 p: List[Tensor], 200 c: List[Tensor], 201 names: List[str], 202 ) -> Tuple[List[Tensor], List[str]]: 203 p5, c5 = p[-1], c[-1] 204 x = p5 if self.use_P5 else c5 205 p6 = self.p6(x) 206 p7 = self.p7(F.relu(p6)) 207 p.extend([p6, p7]) 208 names.extend(["p6", "p7"]) 209 return p, names 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py --- a/torchvision/ops/feature_pyramid_network.py +++ b/torchvision/ops/feature_pyramid_network.py @@ -86,7 +86,7 @@ self.layer_blocks.append(layer_block_module) # initialize parameters now to avoid modifying the initialization of top_blocks - for m in self.children(): + for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0)
{"golden_diff": "diff --git a/torchvision/ops/feature_pyramid_network.py b/torchvision/ops/feature_pyramid_network.py\n--- a/torchvision/ops/feature_pyramid_network.py\n+++ b/torchvision/ops/feature_pyramid_network.py\n@@ -86,7 +86,7 @@\n self.layer_blocks.append(layer_block_module)\n \n # initialize parameters now to avoid modifying the initialization of top_blocks\n- for m in self.children():\n+ for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_uniform_(m.weight, a=1)\n nn.init.constant_(m.bias, 0)\n", "issue": "Feature Pyramid Network code bug\nhttps://github.com/pytorch/vision/blob/c2e8a00885e68ae1200eb6440f540e181d9125de/torchvision/ops/feature_pyramid_network.py#L60\r\n\r\nthis line does not work, I think it should be modified as self.modules() instead of self.children()\n", "before_files": [{"content": "from collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nfrom torch.jit.annotations import Tuple, List, Dict, Optional\n\n\nclass ExtraFPNBlock(nn.Module):\n \"\"\"\n Base class for the extra block in the FPN.\n\n Arguments:\n results (List[Tensor]): the result of the FPN\n x (List[Tensor]): the original feature maps\n names (List[str]): the names for each one of the\n original feature maps\n\n Returns:\n results (List[Tensor]): the extended set of results\n of the FPN\n names (List[str]): the extended set of names for the results\n \"\"\"\n def forward(\n self,\n results: List[Tensor],\n x: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n pass\n\n\nclass FeaturePyramidNetwork(nn.Module):\n \"\"\"\n Module that adds a FPN from on top of a set of feature maps. This is based on\n `\"Feature Pyramid Network for Object Detection\" <https://arxiv.org/abs/1612.03144>`_.\n\n The feature maps are currently supposed to be in increasing depth\n order.\n\n The input to the model is expected to be an OrderedDict[Tensor], containing\n the feature maps on top of which the FPN will be added.\n\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n is passed to the module\n out_channels (int): number of channels of the FPN representation\n extra_blocks (ExtraFPNBlock or None): if provided, extra operations will\n be performed. It is expected to take the fpn features, the original\n features and the names of the original features as input, and returns\n a new list of feature maps and their corresponding names\n\n Examples::\n\n >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5)\n >>> # get some dummy data\n >>> x = OrderedDict()\n >>> x['feat0'] = torch.rand(1, 10, 64, 64)\n >>> x['feat2'] = torch.rand(1, 20, 16, 16)\n >>> x['feat3'] = torch.rand(1, 30, 8, 8)\n >>> # compute the FPN on top of x\n >>> output = m(x)\n >>> print([(k, v.shape) for k, v in output.items()])\n >>> # returns\n >>> [('feat0', torch.Size([1, 5, 64, 64])),\n >>> ('feat2', torch.Size([1, 5, 16, 16])),\n >>> ('feat3', torch.Size([1, 5, 8, 8]))]\n\n \"\"\"\n def __init__(\n self,\n in_channels_list: List[int],\n out_channels: int,\n extra_blocks: Optional[ExtraFPNBlock] = None,\n ):\n super(FeaturePyramidNetwork, self).__init__()\n self.inner_blocks = nn.ModuleList()\n self.layer_blocks = nn.ModuleList()\n for in_channels in in_channels_list:\n if in_channels == 0:\n raise ValueError(\"in_channels=0 is currently not supported\")\n inner_block_module = nn.Conv2d(in_channels, out_channels, 1)\n layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n self.inner_blocks.append(inner_block_module)\n self.layer_blocks.append(layer_block_module)\n\n # initialize parameters now to avoid modifying the initialization of top_blocks\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_uniform_(m.weight, a=1)\n nn.init.constant_(m.bias, 0)\n\n if extra_blocks is not None:\n assert isinstance(extra_blocks, ExtraFPNBlock)\n self.extra_blocks = extra_blocks\n\n def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor:\n \"\"\"\n This is equivalent to self.inner_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = 0\n for m in self.inner_blocks:\n num_blocks += 1\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.inner_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor:\n \"\"\"\n This is equivalent to self.layer_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = 0\n for m in self.layer_blocks:\n num_blocks += 1\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.layer_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]:\n \"\"\"\n Computes the FPN for a set of feature maps.\n\n Arguments:\n x (OrderedDict[Tensor]): feature maps for each feature level.\n\n Returns:\n results (OrderedDict[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n \"\"\"\n # unpack OrderedDict into two lists for easier handling\n names = list(x.keys())\n x = list(x.values())\n\n last_inner = self.get_result_from_inner_blocks(x[-1], -1)\n results = []\n results.append(self.get_result_from_layer_blocks(last_inner, -1))\n\n for idx in range(len(x) - 2, -1, -1):\n inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)\n feat_shape = inner_lateral.shape[-2:]\n inner_top_down = F.interpolate(last_inner, size=feat_shape, mode=\"nearest\")\n last_inner = inner_lateral + inner_top_down\n results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))\n\n if self.extra_blocks is not None:\n results, names = self.extra_blocks(results, x, names)\n\n # make it back an OrderedDict\n out = OrderedDict([(k, v) for k, v in zip(names, results)])\n\n return out\n\n\nclass LastLevelMaxPool(ExtraFPNBlock):\n \"\"\"\n Applies a max_pool2d on top of the last feature map\n \"\"\"\n def forward(\n self,\n x: List[Tensor],\n y: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n names.append(\"pool\")\n x.append(F.max_pool2d(x[-1], 1, 2, 0))\n return x, names\n\n\nclass LastLevelP6P7(ExtraFPNBlock):\n \"\"\"\n This module is used in RetinaNet to generate extra layers, P6 and P7.\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int):\n super(LastLevelP6P7, self).__init__()\n self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)\n self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)\n for module in [self.p6, self.p7]:\n nn.init.kaiming_uniform_(module.weight, a=1)\n nn.init.constant_(module.bias, 0)\n self.use_P5 = in_channels == out_channels\n\n def forward(\n self,\n p: List[Tensor],\n c: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n p5, c5 = p[-1], c[-1]\n x = p5 if self.use_P5 else c5\n p6 = self.p6(x)\n p7 = self.p7(F.relu(p6))\n p.extend([p6, p7])\n names.extend([\"p6\", \"p7\"])\n return p, names\n", "path": "torchvision/ops/feature_pyramid_network.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nfrom torch.jit.annotations import Tuple, List, Dict, Optional\n\n\nclass ExtraFPNBlock(nn.Module):\n \"\"\"\n Base class for the extra block in the FPN.\n\n Arguments:\n results (List[Tensor]): the result of the FPN\n x (List[Tensor]): the original feature maps\n names (List[str]): the names for each one of the\n original feature maps\n\n Returns:\n results (List[Tensor]): the extended set of results\n of the FPN\n names (List[str]): the extended set of names for the results\n \"\"\"\n def forward(\n self,\n results: List[Tensor],\n x: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n pass\n\n\nclass FeaturePyramidNetwork(nn.Module):\n \"\"\"\n Module that adds a FPN from on top of a set of feature maps. This is based on\n `\"Feature Pyramid Network for Object Detection\" <https://arxiv.org/abs/1612.03144>`_.\n\n The feature maps are currently supposed to be in increasing depth\n order.\n\n The input to the model is expected to be an OrderedDict[Tensor], containing\n the feature maps on top of which the FPN will be added.\n\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n is passed to the module\n out_channels (int): number of channels of the FPN representation\n extra_blocks (ExtraFPNBlock or None): if provided, extra operations will\n be performed. It is expected to take the fpn features, the original\n features and the names of the original features as input, and returns\n a new list of feature maps and their corresponding names\n\n Examples::\n\n >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5)\n >>> # get some dummy data\n >>> x = OrderedDict()\n >>> x['feat0'] = torch.rand(1, 10, 64, 64)\n >>> x['feat2'] = torch.rand(1, 20, 16, 16)\n >>> x['feat3'] = torch.rand(1, 30, 8, 8)\n >>> # compute the FPN on top of x\n >>> output = m(x)\n >>> print([(k, v.shape) for k, v in output.items()])\n >>> # returns\n >>> [('feat0', torch.Size([1, 5, 64, 64])),\n >>> ('feat2', torch.Size([1, 5, 16, 16])),\n >>> ('feat3', torch.Size([1, 5, 8, 8]))]\n\n \"\"\"\n def __init__(\n self,\n in_channels_list: List[int],\n out_channels: int,\n extra_blocks: Optional[ExtraFPNBlock] = None,\n ):\n super(FeaturePyramidNetwork, self).__init__()\n self.inner_blocks = nn.ModuleList()\n self.layer_blocks = nn.ModuleList()\n for in_channels in in_channels_list:\n if in_channels == 0:\n raise ValueError(\"in_channels=0 is currently not supported\")\n inner_block_module = nn.Conv2d(in_channels, out_channels, 1)\n layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n self.inner_blocks.append(inner_block_module)\n self.layer_blocks.append(layer_block_module)\n\n # initialize parameters now to avoid modifying the initialization of top_blocks\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_uniform_(m.weight, a=1)\n nn.init.constant_(m.bias, 0)\n\n if extra_blocks is not None:\n assert isinstance(extra_blocks, ExtraFPNBlock)\n self.extra_blocks = extra_blocks\n\n def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor:\n \"\"\"\n This is equivalent to self.inner_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = 0\n for m in self.inner_blocks:\n num_blocks += 1\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.inner_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor:\n \"\"\"\n This is equivalent to self.layer_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = 0\n for m in self.layer_blocks:\n num_blocks += 1\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.layer_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]:\n \"\"\"\n Computes the FPN for a set of feature maps.\n\n Arguments:\n x (OrderedDict[Tensor]): feature maps for each feature level.\n\n Returns:\n results (OrderedDict[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n \"\"\"\n # unpack OrderedDict into two lists for easier handling\n names = list(x.keys())\n x = list(x.values())\n\n last_inner = self.get_result_from_inner_blocks(x[-1], -1)\n results = []\n results.append(self.get_result_from_layer_blocks(last_inner, -1))\n\n for idx in range(len(x) - 2, -1, -1):\n inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)\n feat_shape = inner_lateral.shape[-2:]\n inner_top_down = F.interpolate(last_inner, size=feat_shape, mode=\"nearest\")\n last_inner = inner_lateral + inner_top_down\n results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))\n\n if self.extra_blocks is not None:\n results, names = self.extra_blocks(results, x, names)\n\n # make it back an OrderedDict\n out = OrderedDict([(k, v) for k, v in zip(names, results)])\n\n return out\n\n\nclass LastLevelMaxPool(ExtraFPNBlock):\n \"\"\"\n Applies a max_pool2d on top of the last feature map\n \"\"\"\n def forward(\n self,\n x: List[Tensor],\n y: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n names.append(\"pool\")\n x.append(F.max_pool2d(x[-1], 1, 2, 0))\n return x, names\n\n\nclass LastLevelP6P7(ExtraFPNBlock):\n \"\"\"\n This module is used in RetinaNet to generate extra layers, P6 and P7.\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int):\n super(LastLevelP6P7, self).__init__()\n self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)\n self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)\n for module in [self.p6, self.p7]:\n nn.init.kaiming_uniform_(module.weight, a=1)\n nn.init.constant_(module.bias, 0)\n self.use_P5 = in_channels == out_channels\n\n def forward(\n self,\n p: List[Tensor],\n c: List[Tensor],\n names: List[str],\n ) -> Tuple[List[Tensor], List[str]]:\n p5, c5 = p[-1], c[-1]\n x = p5 if self.use_P5 else c5\n p6 = self.p6(x)\n p7 = self.p7(F.relu(p6))\n p.extend([p6, p7])\n names.extend([\"p6\", \"p7\"])\n return p, names\n", "path": "torchvision/ops/feature_pyramid_network.py"}]}
2,687
144
gh_patches_debug_33856
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2674
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests [FEATURE]: Patch meta information of `torch.nn.functional.softmax()` This is a part of issue #2628, we will patch meta information of `torch.nn.functional.softmax()` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/auto_parallel/meta_profiler/meta_registry/activation.py` Content: ``` 1 from typing import List, Tuple 2 3 import torch 4 5 from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem 6 from colossalai.fx.profiler.memory_utils import activation_size 7 from colossalai.fx.profiler.opcount import flop_mapping 8 9 from ..registry import meta_register 10 11 __all__ = ["relu_meta_info"] 12 13 14 @meta_register.register(torch.nn.ReLU) 15 def relu_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: 16 """torch.nn.ReLU metainfo generator 17 The aten graph of torch.nn.ReLU is 18 graph(): 19 %input_2 : [#users=1] = placeholder[target=placeholder](default=) 20 %relu_default : [#users=2] = call_function[target=torch.ops.aten.relu.default](args = (%input_2,), kwargs = {}) 21 %zeros_like_default : [#users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%relu_default,), kwargs = {dtype: None, layout: None, device: None, pin_memory: None}) 22 %detach_default : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%relu_default,), kwargs = {}) 23 %threshold_backward_default : [#users=1] = call_function[target=torch.ops.aten.threshold_backward.default](args = (%zeros_like_default, %detach_default, None), kwargs = {}) 24 %detach_default_1 : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%threshold_backward_default,), kwargs = {}) 25 %detach_default_2 : [#users=0] = call_function[target=torch.ops.aten.detach.default](args = (%detach_default_1,), kwargs = {}) 26 27 Returns: 28 Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs 29 """ 30 31 input_tensor = args[0].data 32 output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data 33 is_inplace = kwargs.get("inplace", False) 34 35 # construct input args for forward 36 fwd_in_args = [input_tensor] 37 38 # construct input args for backward 39 bwd_in_args = [output_tensor] 40 41 # calculate cost 42 # the fwd op with compute cost is relu.default 43 # the bwd op with compute cost is threshold_backward 44 45 # calculate compute cost 46 fwd_compute_cost = flop_mapping[torch.ops.aten.relu.default](fwd_in_args, (output_tensor,)) 47 bwd_compute_cost = flop_mapping[torch.ops.aten.threshold_backward.default](bwd_in_args, (input_tensor,)) 48 compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost) 49 50 # calculate memory cost 51 # NOTE: the inplace ReLU don't have forward memory cost 52 # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward 53 fwd_memory_cost = MemoryCost( 54 activation=activation_size(input_tensor) if is_inplace else activation_size([output_tensor, input_tensor]), 55 parameter=0, 56 temp=0, 57 buffer=0) 58 59 bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), parameter=0, temp=0, buffer=0) 60 61 # total cost is the sum of forward and backward cost 62 total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation, 63 parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter) 64 65 memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost) 66 67 # store fwd_in, fwd_buffer, fwd_out 68 # NOTE: It might seems a little bit weird here, we just want to align it with the older version 69 # of MetaInfoProp. In the future we might modify this part to make it clearer. 70 fwd_in = [] 71 fwd_buffer = [torch.zeros_like(output_tensor, device='meta')] 72 fwd_out = [torch.zeros_like(output_tensor, device='meta')] 73 74 return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py --- a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py +++ b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py @@ -72,3 +72,53 @@ fwd_out = [torch.zeros_like(output_tensor, device='meta')] return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out + + +@meta_register.register(torch.nn.Softmax) +@meta_register.register(torch.nn.functional.softmax) +def softmax_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: + """torch.nn.Softmax metainfo generator + Returns: + Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs + """ + input_tensor = next( + filter( + lambda x: + (x.type == OperationDataType.ARG or x.type == OperationDataType.PARAM) and x.name != 'softmax_dim', + args)).data + output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data + softmax_dim = next(filter(lambda x: x.name == 'softmax_dim', args)).data + + # calculate cost + + # calculate compute cost + fwd_compute_cost = flop_mapping[torch.ops.aten._softmax.default]([input_tensor], [output_tensor]) + bwd_compute_cost = flop_mapping[torch.ops.aten._softmax_backward_data.default]([output_tensor], [input_tensor]) + + compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost) + + # calculate memory cost + # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward + fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]), + parameter=0, + temp=0, + buffer=0) + bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), + parameter=0, + temp=activation_size(input_tensor), + buffer=0) + + # total cost is the sum of forward and backward cost + total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation, + parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter, + temp=fwd_memory_cost.temp + bwd_memory_cost.temp, + buffer=fwd_memory_cost.buffer + bwd_memory_cost.buffer) + + memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost) + + # store fwd_in, fwd_buffer, fwd_out + fwd_in = [] + fwd_buffer = [torch.zeros_like(output_tensor, device='meta')] + fwd_out = [torch.zeros_like(output_tensor, device='meta')] + + return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out
{"golden_diff": "diff --git a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n--- a/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n+++ b/colossalai/auto_parallel/meta_profiler/meta_registry/activation.py\n@@ -72,3 +72,53 @@\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n \n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n+\n+\n+@meta_register.register(torch.nn.Softmax)\n+@meta_register.register(torch.nn.functional.softmax)\n+def softmax_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n+ \"\"\"torch.nn.Softmax metainfo generator\n+ Returns:\n+ Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n+ \"\"\"\n+ input_tensor = next(\n+ filter(\n+ lambda x:\n+ (x.type == OperationDataType.ARG or x.type == OperationDataType.PARAM) and x.name != 'softmax_dim',\n+ args)).data\n+ output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n+ softmax_dim = next(filter(lambda x: x.name == 'softmax_dim', args)).data\n+\n+ # calculate cost\n+\n+ # calculate compute cost\n+ fwd_compute_cost = flop_mapping[torch.ops.aten._softmax.default]([input_tensor], [output_tensor])\n+ bwd_compute_cost = flop_mapping[torch.ops.aten._softmax_backward_data.default]([output_tensor], [input_tensor])\n+\n+ compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n+\n+ # calculate memory cost\n+ # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n+ fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]),\n+ parameter=0,\n+ temp=0,\n+ buffer=0)\n+ bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor),\n+ parameter=0,\n+ temp=activation_size(input_tensor),\n+ buffer=0)\n+\n+ # total cost is the sum of forward and backward cost\n+ total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n+ parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter,\n+ temp=fwd_memory_cost.temp + bwd_memory_cost.temp,\n+ buffer=fwd_memory_cost.buffer + bwd_memory_cost.buffer)\n+\n+ memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n+\n+ # store fwd_in, fwd_buffer, fwd_out\n+ fwd_in = []\n+ fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n+ fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n+\n+ return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[FEATURE]: Patch meta information of `torch.nn.functional.softmax()`\nThis is a part of issue #2628, we will patch meta information of `torch.nn.functional.softmax()`\n", "before_files": [{"content": "from typing import List, Tuple\n\nimport torch\n\nfrom colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem\nfrom colossalai.fx.profiler.memory_utils import activation_size\nfrom colossalai.fx.profiler.opcount import flop_mapping\n\nfrom ..registry import meta_register\n\n__all__ = [\"relu_meta_info\"]\n\n\n@meta_register.register(torch.nn.ReLU)\ndef relu_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n \"\"\"torch.nn.ReLU metainfo generator\n The aten graph of torch.nn.ReLU is\n graph():\n %input_2 : [#users=1] = placeholder[target=placeholder](default=)\n %relu_default : [#users=2] = call_function[target=torch.ops.aten.relu.default](args = (%input_2,), kwargs = {})\n %zeros_like_default : [#users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%relu_default,), kwargs = {dtype: None, layout: None, device: None, pin_memory: None})\n %detach_default : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%relu_default,), kwargs = {})\n %threshold_backward_default : [#users=1] = call_function[target=torch.ops.aten.threshold_backward.default](args = (%zeros_like_default, %detach_default, None), kwargs = {})\n %detach_default_1 : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%threshold_backward_default,), kwargs = {})\n %detach_default_2 : [#users=0] = call_function[target=torch.ops.aten.detach.default](args = (%detach_default_1,), kwargs = {})\n\n Returns:\n Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n \"\"\"\n\n input_tensor = args[0].data\n output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n is_inplace = kwargs.get(\"inplace\", False)\n\n # construct input args for forward\n fwd_in_args = [input_tensor]\n\n # construct input args for backward\n bwd_in_args = [output_tensor]\n\n # calculate cost\n # the fwd op with compute cost is relu.default\n # the bwd op with compute cost is threshold_backward\n\n # calculate compute cost\n fwd_compute_cost = flop_mapping[torch.ops.aten.relu.default](fwd_in_args, (output_tensor,))\n bwd_compute_cost = flop_mapping[torch.ops.aten.threshold_backward.default](bwd_in_args, (input_tensor,))\n compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n\n # calculate memory cost\n # NOTE: the inplace ReLU don't have forward memory cost\n # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n fwd_memory_cost = MemoryCost(\n activation=activation_size(input_tensor) if is_inplace else activation_size([output_tensor, input_tensor]),\n parameter=0,\n temp=0,\n buffer=0)\n\n bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), parameter=0, temp=0, buffer=0)\n\n # total cost is the sum of forward and backward cost\n total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter)\n\n memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n\n # store fwd_in, fwd_buffer, fwd_out\n # NOTE: It might seems a little bit weird here, we just want to align it with the older version\n # of MetaInfoProp. In the future we might modify this part to make it clearer.\n fwd_in = []\n fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n\n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "path": "colossalai/auto_parallel/meta_profiler/meta_registry/activation.py"}], "after_files": [{"content": "from typing import List, Tuple\n\nimport torch\n\nfrom colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem\nfrom colossalai.fx.profiler.memory_utils import activation_size\nfrom colossalai.fx.profiler.opcount import flop_mapping\n\nfrom ..registry import meta_register\n\n__all__ = [\"relu_meta_info\"]\n\n\n@meta_register.register(torch.nn.ReLU)\ndef relu_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n \"\"\"torch.nn.ReLU metainfo generator\n The aten graph of torch.nn.ReLU is\n graph():\n %input_2 : [#users=1] = placeholder[target=placeholder](default=)\n %relu_default : [#users=2] = call_function[target=torch.ops.aten.relu.default](args = (%input_2,), kwargs = {})\n %zeros_like_default : [#users=1] = call_function[target=torch.ops.aten.zeros_like.default](args = (%relu_default,), kwargs = {dtype: None, layout: None, device: None, pin_memory: None})\n %detach_default : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%relu_default,), kwargs = {})\n %threshold_backward_default : [#users=1] = call_function[target=torch.ops.aten.threshold_backward.default](args = (%zeros_like_default, %detach_default, None), kwargs = {})\n %detach_default_1 : [#users=1] = call_function[target=torch.ops.aten.detach.default](args = (%threshold_backward_default,), kwargs = {})\n %detach_default_2 : [#users=0] = call_function[target=torch.ops.aten.detach.default](args = (%detach_default_1,), kwargs = {})\n\n Returns:\n Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n \"\"\"\n\n input_tensor = args[0].data\n output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n is_inplace = kwargs.get(\"inplace\", False)\n\n # construct input args for forward\n fwd_in_args = [input_tensor]\n\n # construct input args for backward\n bwd_in_args = [output_tensor]\n\n # calculate cost\n # the fwd op with compute cost is relu.default\n # the bwd op with compute cost is threshold_backward\n\n # calculate compute cost\n fwd_compute_cost = flop_mapping[torch.ops.aten.relu.default](fwd_in_args, (output_tensor,))\n bwd_compute_cost = flop_mapping[torch.ops.aten.threshold_backward.default](bwd_in_args, (input_tensor,))\n compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n\n # calculate memory cost\n # NOTE: the inplace ReLU don't have forward memory cost\n # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n fwd_memory_cost = MemoryCost(\n activation=activation_size(input_tensor) if is_inplace else activation_size([output_tensor, input_tensor]),\n parameter=0,\n temp=0,\n buffer=0)\n\n bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor), parameter=0, temp=0, buffer=0)\n\n # total cost is the sum of forward and backward cost\n total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter)\n\n memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n\n # store fwd_in, fwd_buffer, fwd_out\n # NOTE: It might seems a little bit weird here, we just want to align it with the older version\n # of MetaInfoProp. In the future we might modify this part to make it clearer.\n fwd_in = []\n fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n\n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n\n\n@meta_register.register(torch.nn.Softmax)\n@meta_register.register(torch.nn.functional.softmax)\ndef softmax_meta_info(*args, **kwargs) -> Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]:\n \"\"\"torch.nn.Softmax metainfo generator\n Returns:\n Tuple[TrainCycleItem, TrainCycleItem, List[torch.Tensor]]: compute cost, memory cost and forward inputs\n \"\"\"\n input_tensor = next(\n filter(\n lambda x:\n (x.type == OperationDataType.ARG or x.type == OperationDataType.PARAM) and x.name != 'softmax_dim',\n args)).data\n output_tensor = next(filter(lambda x: x.type == OperationDataType.OUTPUT, args)).data\n softmax_dim = next(filter(lambda x: x.name == 'softmax_dim', args)).data\n\n # calculate cost\n\n # calculate compute cost\n fwd_compute_cost = flop_mapping[torch.ops.aten._softmax.default]([input_tensor], [output_tensor])\n bwd_compute_cost = flop_mapping[torch.ops.aten._softmax_backward_data.default]([output_tensor], [input_tensor])\n\n compute_cost = TrainCycleItem(fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost)\n\n # calculate memory cost\n # NOTE: currently in SPMD solver we always believe that there will be a new tensor created in forward\n fwd_memory_cost = MemoryCost(activation=activation_size([input_tensor, output_tensor]),\n parameter=0,\n temp=0,\n buffer=0)\n bwd_memory_cost = MemoryCost(activation=activation_size(input_tensor),\n parameter=0,\n temp=activation_size(input_tensor),\n buffer=0)\n\n # total cost is the sum of forward and backward cost\n total_cost = MemoryCost(activation=fwd_memory_cost.activation + bwd_memory_cost.activation,\n parameter=fwd_memory_cost.parameter + bwd_memory_cost.parameter,\n temp=fwd_memory_cost.temp + bwd_memory_cost.temp,\n buffer=fwd_memory_cost.buffer + bwd_memory_cost.buffer)\n\n memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_cost)\n\n # store fwd_in, fwd_buffer, fwd_out\n fwd_in = []\n fwd_buffer = [torch.zeros_like(output_tensor, device='meta')]\n fwd_out = [torch.zeros_like(output_tensor, device='meta')]\n\n return compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out\n", "path": "colossalai/auto_parallel/meta_profiler/meta_registry/activation.py"}]}
1,381
692
gh_patches_debug_37193
rasdani/github-patches
git_diff
fidals__shopelectro-233
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Change 1C export algo for Tags В этой задаче речь идёт только о файле тегов: shopelectro/management/commands/_update_catalog/update_tags.py 27.11.18 Произошла неприятная история. Контентщик выгрузил нес-ко тегов для гирлянд, но они пропали от нашей выгрузки. Сейчас выгрузка Тегов просто сносит всё что есть и заново накатывает из 1С. Мы немного изменим эту политику. Реализуй такой алгоритм: - все Теги, что зашли из 1С, заливаем в базу с перезаписью - все Теги, что есть в базе, но нет в 1С, не трогаем --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/management/commands/_update_catalog/utils.py` Content: ``` 1 import glob 2 import logging 3 import os 4 import shutil 5 import subprocess 6 import time 7 from contextlib import contextmanager 8 from itertools import chain 9 from typing import Iterator, Dict 10 from uuid import UUID 11 from xml.etree import ElementTree 12 13 import requests 14 from django.conf import settings 15 16 17 logger = logging.getLogger(__name__) 18 Data = Dict[str, str] 19 NOT_SAVE_TEMPLATE = '{entity} with name="{name}" has no {field}. It\'ll not be' \ 20 ' saved' 21 22 23 def is_correct_uuid(uuid_): 24 try: 25 val = UUID(uuid_) 26 except (ValueError, TypeError): 27 return False 28 return str(val) == uuid_ 29 30 31 class XmlFile: 32 33 namespace = '{urn:1C.ru:commerceml_2}' 34 35 def __init__(self, fetch_callback, xml_path_pattern, xpath_queries, 36 extra_options=None): 37 self.fetch_callback = fetch_callback 38 self.xml_path_pattern = xml_path_pattern 39 self.xpath_queries = xpath_queries 40 self.extra_options = extra_options or {} 41 42 @property 43 def parsed_files(self): 44 """Get parsed xml files, that matched the path pattern.""" 45 xml_files = glob.glob(os.path.join( 46 settings.ASSETS_DIR, self.xml_path_pattern 47 )) 48 assert xml_files, 'Files on path {} does not exist.'.format( 49 self.xml_path_pattern 50 ) 51 return [ElementTree.parse(file) for file in xml_files] 52 53 @property 54 def xpaths(self): 55 """Get xpath queries for xml.""" 56 return { 57 name: query.format(self.namespace) 58 for name, query in self.xpath_queries.items() 59 } 60 61 def get_data(self) -> Iterator: 62 """ 63 Get data from xml files. 64 65 Example files with products names or prices. 66 """ 67 return chain.from_iterable( 68 self.fetch_callback(file, self) 69 for file in self.parsed_files 70 ) 71 72 73 @contextmanager 74 def collect_errors(error_types: tuple): 75 errors = [] 76 77 @contextmanager 78 def collect(): 79 try: 80 yield 81 except error_types as error: 82 errors.append(error) 83 yield collect 84 if errors: 85 raise errors[0] 86 87 88 @contextmanager 89 def download_catalog(destination): 90 """Download catalog's xml files and delete after handle them.""" 91 wget_command = ( 92 'wget -r -P {} ftp://{}:{}@{}/webdata' 93 ' 2>&1 | grep "время\|time\|Downloaded"'.format( 94 destination, 95 settings.FTP_USER, 96 settings.FTP_PASS, 97 settings.FTP_IP, 98 ) 99 ) 100 101 subprocess.run(wget_command, shell=True) 102 assert os.path.exists(os.path.join( 103 destination, settings.FTP_IP)), 'Files do not downloaded...' 104 logger.info('Download catalog - completed...') 105 106 try: 107 yield 108 finally: 109 # remove downloaded data 110 shutil.rmtree(os.path.join(destination, settings.FTP_IP)) 111 112 113 def report(error): 114 report_url = getattr(settings, 'SLACK_REPORT_URL', None) 115 if report_url is not None: 116 requests.post( 117 url=report_url, 118 json={ 119 'text': '*Не удалось обновить каталог Shopelectro.*\n' 120 '*Время*: {}\n' 121 '*Ошибка*: {}'.format(time.ctime(), error), 122 } 123 ) 124 ``` Path: `shopelectro/management/commands/_update_catalog/update_tags.py` Content: ``` 1 import logging 2 from copy import deepcopy 3 from itertools import chain 4 from typing import Iterator, Dict 5 from xml.etree.ElementTree import Element 6 7 from django.db import transaction 8 9 from shopelectro.management.commands._update_catalog.utils import ( 10 XmlFile, is_correct_uuid, UUID, Data, 11 ) 12 from shopelectro.models import Tag, TagGroup 13 14 15 logger = logging.getLogger(__name__) 16 17 18 def fetch_tags(root: Element, config: XmlFile): 19 def get_uuid_name_pair( 20 element: Element, 21 uuid_xpath: str, 22 name_xpath: str, 23 ): 24 uuid = element.find(uuid_xpath).text 25 name = element.find(name_xpath).text 26 27 return uuid, name 28 29 tag_groups = root.findall(config.xpaths['tag_groups']) 30 for group in tag_groups: 31 group_uuid, group_name = get_uuid_name_pair( 32 group, 33 config.xpaths['tag_group_uuid'], 34 config.xpaths['tag_group_name'], 35 ) 36 37 tags = group.findall(config.xpaths['tags']) 38 tags_data = ( 39 get_uuid_name_pair( 40 tag, 41 config.xpaths['tag_uuid'], 42 config.xpaths['tag_name'], 43 ) for tag in tags 44 ) 45 46 yield group_uuid, { 47 'name': group_name, 48 'tags_data': tags_data, 49 } 50 51 tag_file = XmlFile( 52 fetch_callback=fetch_tags, 53 xml_path_pattern='**/webdata/**/properties/**/import*.xml', 54 xpath_queries={ 55 'tag_groups': './/{}Свойства/', 56 'tag_group_uuid': '.{}Ид', 57 'tag_group_name': '.{}Наименование', 58 'tags': '.{}ВариантыЗначений/', 59 'tag_name': '.{}Значение', 60 'tag_uuid': '.{}ИдЗначения', 61 }, 62 ) 63 64 65 @transaction.atomic 66 def create_or_update(data: Dict[UUID, Data]): 67 group_data = deepcopy(data) 68 69 created_groups_count = 0 70 created_tags_count = 0 71 72 for group_uuid, data_ in group_data.items(): 73 tags = data_.pop('tags') 74 75 group, group_created = TagGroup.objects.update_or_create( 76 uuid=group_uuid, defaults=data_ 77 ) 78 79 created_groups_count += int(group_created) 80 81 for tag_uuid, tag_data in tags.items(): 82 _, tag_created = Tag.objects.update_or_create( 83 uuid=tag_uuid, 84 defaults={**tag_data, 'group': group} 85 ) 86 87 created_tags_count += int(tag_created) 88 89 logger.info(f'{created_groups_count} tag groups were created.') 90 logger.info(f'{created_tags_count} tags were created.') 91 92 93 @transaction.atomic 94 def delete(group_data: Dict[UUID, Data]): 95 group_data = deepcopy(group_data) 96 97 group_uuids = group_data.keys() 98 tag_uuids = set(chain.from_iterable( 99 data['tags'].keys() 100 for data in group_data.values() 101 )) 102 103 if not (group_uuids and tag_uuids): 104 return 105 106 group_count, _ = TagGroup.objects.exclude(uuid__in=group_uuids).delete() 107 tag_count, _ = Tag.objects.exclude(uuid__in=tag_uuids).delete() 108 109 logger.info(f'{group_count} tag groups and {tag_count} tags were deleted.') 110 111 112 def prepare_data(group_data: Iterator) -> Dict[UUID, Data]: 113 def assembly_structure(group_uuid: str, group_data_: dict): 114 tags_data = group_data_.pop('tags_data', []) 115 tags = { 116 tag_uuid: {'name': tag_name} 117 for tag_uuid, tag_name in tags_data 118 if is_correct_uuid(tag_uuid) 119 } 120 121 return ( 122 group_uuid, { 123 **group_data_, 124 'tags': tags 125 } 126 ) 127 128 return dict( 129 assembly_structure(group_uuid, data) 130 for group_uuid, data in group_data 131 if is_correct_uuid(group_uuid) 132 ) 133 134 135 def main(*args, **kwargs): 136 cleared_group_data = prepare_data(tag_file.get_data()) 137 create_or_update(cleared_group_data) 138 delete(cleared_group_data) 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/management/commands/_update_catalog/update_tags.py b/shopelectro/management/commands/_update_catalog/update_tags.py --- a/shopelectro/management/commands/_update_catalog/update_tags.py +++ b/shopelectro/management/commands/_update_catalog/update_tags.py @@ -1,13 +1,12 @@ import logging from copy import deepcopy -from itertools import chain from typing import Iterator, Dict from xml.etree.ElementTree import Element from django.db import transaction from shopelectro.management.commands._update_catalog.utils import ( - XmlFile, is_correct_uuid, UUID, Data, + Data, is_correct_uuid, UUID_TYPE, XmlFile ) from shopelectro.models import Tag, TagGroup @@ -63,7 +62,7 @@ @transaction.atomic -def create_or_update(data: Dict[UUID, Data]): +def create_or_update(data: Dict[UUID_TYPE, Data]): group_data = deepcopy(data) created_groups_count = 0 @@ -90,26 +89,7 @@ logger.info(f'{created_tags_count} tags were created.') -@transaction.atomic -def delete(group_data: Dict[UUID, Data]): - group_data = deepcopy(group_data) - - group_uuids = group_data.keys() - tag_uuids = set(chain.from_iterable( - data['tags'].keys() - for data in group_data.values() - )) - - if not (group_uuids and tag_uuids): - return - - group_count, _ = TagGroup.objects.exclude(uuid__in=group_uuids).delete() - tag_count, _ = Tag.objects.exclude(uuid__in=tag_uuids).delete() - - logger.info(f'{group_count} tag groups and {tag_count} tags were deleted.') - - -def prepare_data(group_data: Iterator) -> Dict[UUID, Data]: +def prepare_data(group_data: Iterator) -> Dict[UUID_TYPE, Data]: def assembly_structure(group_uuid: str, group_data_: dict): tags_data = group_data_.pop('tags_data', []) tags = { @@ -135,4 +115,3 @@ def main(*args, **kwargs): cleared_group_data = prepare_data(tag_file.get_data()) create_or_update(cleared_group_data) - delete(cleared_group_data) diff --git a/shopelectro/management/commands/_update_catalog/utils.py b/shopelectro/management/commands/_update_catalog/utils.py --- a/shopelectro/management/commands/_update_catalog/utils.py +++ b/shopelectro/management/commands/_update_catalog/utils.py @@ -15,7 +15,8 @@ logger = logging.getLogger(__name__) -Data = Dict[str, str] +UUID_TYPE = str +Data = Dict[str, Dict[str, dict]] NOT_SAVE_TEMPLATE = '{entity} with name="{name}" has no {field}. It\'ll not be' \ ' saved'
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_tags.py b/shopelectro/management/commands/_update_catalog/update_tags.py\n--- a/shopelectro/management/commands/_update_catalog/update_tags.py\n+++ b/shopelectro/management/commands/_update_catalog/update_tags.py\n@@ -1,13 +1,12 @@\n import logging\n from copy import deepcopy\n-from itertools import chain\n from typing import Iterator, Dict\n from xml.etree.ElementTree import Element\n \n from django.db import transaction\n \n from shopelectro.management.commands._update_catalog.utils import (\n- XmlFile, is_correct_uuid, UUID, Data,\n+ Data, is_correct_uuid, UUID_TYPE, XmlFile\n )\n from shopelectro.models import Tag, TagGroup\n \n@@ -63,7 +62,7 @@\n \n \n @transaction.atomic\n-def create_or_update(data: Dict[UUID, Data]):\n+def create_or_update(data: Dict[UUID_TYPE, Data]):\n group_data = deepcopy(data)\n \n created_groups_count = 0\n@@ -90,26 +89,7 @@\n logger.info(f'{created_tags_count} tags were created.')\n \n \n-@transaction.atomic\n-def delete(group_data: Dict[UUID, Data]):\n- group_data = deepcopy(group_data)\n-\n- group_uuids = group_data.keys()\n- tag_uuids = set(chain.from_iterable(\n- data['tags'].keys()\n- for data in group_data.values()\n- ))\n-\n- if not (group_uuids and tag_uuids):\n- return\n-\n- group_count, _ = TagGroup.objects.exclude(uuid__in=group_uuids).delete()\n- tag_count, _ = Tag.objects.exclude(uuid__in=tag_uuids).delete()\n-\n- logger.info(f'{group_count} tag groups and {tag_count} tags were deleted.')\n-\n-\n-def prepare_data(group_data: Iterator) -> Dict[UUID, Data]:\n+def prepare_data(group_data: Iterator) -> Dict[UUID_TYPE, Data]:\n def assembly_structure(group_uuid: str, group_data_: dict):\n tags_data = group_data_.pop('tags_data', [])\n tags = {\n@@ -135,4 +115,3 @@\n def main(*args, **kwargs):\n cleared_group_data = prepare_data(tag_file.get_data())\n create_or_update(cleared_group_data)\n- delete(cleared_group_data)\ndiff --git a/shopelectro/management/commands/_update_catalog/utils.py b/shopelectro/management/commands/_update_catalog/utils.py\n--- a/shopelectro/management/commands/_update_catalog/utils.py\n+++ b/shopelectro/management/commands/_update_catalog/utils.py\n@@ -15,7 +15,8 @@\n \n \n logger = logging.getLogger(__name__)\n-Data = Dict[str, str]\n+UUID_TYPE = str\n+Data = Dict[str, Dict[str, dict]]\n NOT_SAVE_TEMPLATE = '{entity} with name=\"{name}\" has no {field}. It\\'ll not be' \\\n ' saved'\n", "issue": "Change 1C export algo for Tags\n\u0412 \u044d\u0442\u043e\u0439 \u0437\u0430\u0434\u0430\u0447\u0435 \u0440\u0435\u0447\u044c \u0438\u0434\u0451\u0442 \u0442\u043e\u043b\u044c\u043a\u043e \u043e \u0444\u0430\u0439\u043b\u0435 \u0442\u0435\u0433\u043e\u0432:\r\nshopelectro/management/commands/_update_catalog/update_tags.py\r\n\r\n27.11.18 \u041f\u0440\u043e\u0438\u0437\u043e\u0448\u043b\u0430 \u043d\u0435\u043f\u0440\u0438\u044f\u0442\u043d\u0430\u044f \u0438\u0441\u0442\u043e\u0440\u0438\u044f.\r\n\u041a\u043e\u043d\u0442\u0435\u043d\u0442\u0449\u0438\u043a \u0432\u044b\u0433\u0440\u0443\u0437\u0438\u043b \u043d\u0435\u0441-\u043a\u043e \u0442\u0435\u0433\u043e\u0432 \u0434\u043b\u044f \u0433\u0438\u0440\u043b\u044f\u043d\u0434, \u043d\u043e \u043e\u043d\u0438 \u043f\u0440\u043e\u043f\u0430\u043b\u0438 \u043e\u0442 \u043d\u0430\u0448\u0435\u0439 \u0432\u044b\u0433\u0440\u0443\u0437\u043a\u0438.\r\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0432\u044b\u0433\u0440\u0443\u0437\u043a\u0430 \u0422\u0435\u0433\u043e\u0432 \u043f\u0440\u043e\u0441\u0442\u043e \u0441\u043d\u043e\u0441\u0438\u0442 \u0432\u0441\u0451 \u0447\u0442\u043e \u0435\u0441\u0442\u044c \u0438 \u0437\u0430\u043d\u043e\u0432\u043e \u043d\u0430\u043a\u0430\u0442\u044b\u0432\u0430\u0435\u0442 \u0438\u0437 1\u0421. \u041c\u044b \u043d\u0435\u043c\u043d\u043e\u0433\u043e \u0438\u0437\u043c\u0435\u043d\u0438\u043c \u044d\u0442\u0443 \u043f\u043e\u043b\u0438\u0442\u0438\u043a\u0443.\r\n\r\n\u0420\u0435\u0430\u043b\u0438\u0437\u0443\u0439 \u0442\u0430\u043a\u043e\u0439 \u0430\u043b\u0433\u043e\u0440\u0438\u0442\u043c:\r\n- \u0432\u0441\u0435 \u0422\u0435\u0433\u0438, \u0447\u0442\u043e \u0437\u0430\u0448\u043b\u0438 \u0438\u0437 1\u0421, \u0437\u0430\u043b\u0438\u0432\u0430\u0435\u043c \u0432 \u0431\u0430\u0437\u0443 \u0441 \u043f\u0435\u0440\u0435\u0437\u0430\u043f\u0438\u0441\u044c\u044e\r\n- \u0432\u0441\u0435 \u0422\u0435\u0433\u0438, \u0447\u0442\u043e \u0435\u0441\u0442\u044c \u0432 \u0431\u0430\u0437\u0435, \u043d\u043e \u043d\u0435\u0442 \u0432 1\u0421, \u043d\u0435 \u0442\u0440\u043e\u0433\u0430\u0435\u043c\r\n\r\n\n", "before_files": [{"content": "import glob\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport time\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom typing import Iterator, Dict\nfrom uuid import UUID\nfrom xml.etree import ElementTree\n\nimport requests\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(__name__)\nData = Dict[str, str]\nNOT_SAVE_TEMPLATE = '{entity} with name=\"{name}\" has no {field}. It\\'ll not be' \\\n ' saved'\n\n\ndef is_correct_uuid(uuid_):\n try:\n val = UUID(uuid_)\n except (ValueError, TypeError):\n return False\n return str(val) == uuid_\n\n\nclass XmlFile:\n\n namespace = '{urn:1C.ru:commerceml_2}'\n\n def __init__(self, fetch_callback, xml_path_pattern, xpath_queries,\n extra_options=None):\n self.fetch_callback = fetch_callback\n self.xml_path_pattern = xml_path_pattern\n self.xpath_queries = xpath_queries\n self.extra_options = extra_options or {}\n\n @property\n def parsed_files(self):\n \"\"\"Get parsed xml files, that matched the path pattern.\"\"\"\n xml_files = glob.glob(os.path.join(\n settings.ASSETS_DIR, self.xml_path_pattern\n ))\n assert xml_files, 'Files on path {} does not exist.'.format(\n self.xml_path_pattern\n )\n return [ElementTree.parse(file) for file in xml_files]\n\n @property\n def xpaths(self):\n \"\"\"Get xpath queries for xml.\"\"\"\n return {\n name: query.format(self.namespace)\n for name, query in self.xpath_queries.items()\n }\n\n def get_data(self) -> Iterator:\n \"\"\"\n Get data from xml files.\n\n Example files with products names or prices.\n \"\"\"\n return chain.from_iterable(\n self.fetch_callback(file, self)\n for file in self.parsed_files\n )\n\n\n@contextmanager\ndef collect_errors(error_types: tuple):\n errors = []\n\n @contextmanager\n def collect():\n try:\n yield\n except error_types as error:\n errors.append(error)\n yield collect\n if errors:\n raise errors[0]\n\n\n@contextmanager\ndef download_catalog(destination):\n \"\"\"Download catalog's xml files and delete after handle them.\"\"\"\n wget_command = (\n 'wget -r -P {} ftp://{}:{}@{}/webdata'\n ' 2>&1 | grep \"\u0432\u0440\u0435\u043c\u044f\\|time\\|Downloaded\"'.format(\n destination,\n settings.FTP_USER,\n settings.FTP_PASS,\n settings.FTP_IP,\n )\n )\n\n subprocess.run(wget_command, shell=True)\n assert os.path.exists(os.path.join(\n destination, settings.FTP_IP)), 'Files do not downloaded...'\n logger.info('Download catalog - completed...')\n\n try:\n yield\n finally:\n # remove downloaded data\n shutil.rmtree(os.path.join(destination, settings.FTP_IP))\n\n\ndef report(error):\n report_url = getattr(settings, 'SLACK_REPORT_URL', None)\n if report_url is not None:\n requests.post(\n url=report_url,\n json={\n 'text': '*\u041d\u0435 \u0443\u0434\u0430\u043b\u043e\u0441\u044c \u043e\u0431\u043d\u043e\u0432\u0438\u0442\u044c \u043a\u0430\u0442\u0430\u043b\u043e\u0433 Shopelectro.*\\n'\n '*\u0412\u0440\u0435\u043c\u044f*: {}\\n'\n '*\u041e\u0448\u0438\u0431\u043a\u0430*: {}'.format(time.ctime(), error),\n }\n )\n", "path": "shopelectro/management/commands/_update_catalog/utils.py"}, {"content": "import logging\nfrom copy import deepcopy\nfrom itertools import chain\nfrom typing import Iterator, Dict\nfrom xml.etree.ElementTree import Element\n\nfrom django.db import transaction\n\nfrom shopelectro.management.commands._update_catalog.utils import (\n XmlFile, is_correct_uuid, UUID, Data,\n)\nfrom shopelectro.models import Tag, TagGroup\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_tags(root: Element, config: XmlFile):\n def get_uuid_name_pair(\n element: Element,\n uuid_xpath: str,\n name_xpath: str,\n ):\n uuid = element.find(uuid_xpath).text\n name = element.find(name_xpath).text\n\n return uuid, name\n\n tag_groups = root.findall(config.xpaths['tag_groups'])\n for group in tag_groups:\n group_uuid, group_name = get_uuid_name_pair(\n group,\n config.xpaths['tag_group_uuid'],\n config.xpaths['tag_group_name'],\n )\n\n tags = group.findall(config.xpaths['tags'])\n tags_data = (\n get_uuid_name_pair(\n tag,\n config.xpaths['tag_uuid'],\n config.xpaths['tag_name'],\n ) for tag in tags\n )\n\n yield group_uuid, {\n 'name': group_name,\n 'tags_data': tags_data,\n }\n\ntag_file = XmlFile(\n fetch_callback=fetch_tags,\n xml_path_pattern='**/webdata/**/properties/**/import*.xml',\n xpath_queries={\n 'tag_groups': './/{}\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u0430/',\n 'tag_group_uuid': '.{}\u0418\u0434',\n 'tag_group_name': '.{}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435',\n 'tags': '.{}\u0412\u0430\u0440\u0438\u0430\u043d\u0442\u044b\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0439/',\n 'tag_name': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n 'tag_uuid': '.{}\u0418\u0434\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f',\n },\n)\n\n\n@transaction.atomic\ndef create_or_update(data: Dict[UUID, Data]):\n group_data = deepcopy(data)\n\n created_groups_count = 0\n created_tags_count = 0\n\n for group_uuid, data_ in group_data.items():\n tags = data_.pop('tags')\n\n group, group_created = TagGroup.objects.update_or_create(\n uuid=group_uuid, defaults=data_\n )\n\n created_groups_count += int(group_created)\n\n for tag_uuid, tag_data in tags.items():\n _, tag_created = Tag.objects.update_or_create(\n uuid=tag_uuid,\n defaults={**tag_data, 'group': group}\n )\n\n created_tags_count += int(tag_created)\n\n logger.info(f'{created_groups_count} tag groups were created.')\n logger.info(f'{created_tags_count} tags were created.')\n\n\n@transaction.atomic\ndef delete(group_data: Dict[UUID, Data]):\n group_data = deepcopy(group_data)\n\n group_uuids = group_data.keys()\n tag_uuids = set(chain.from_iterable(\n data['tags'].keys()\n for data in group_data.values()\n ))\n\n if not (group_uuids and tag_uuids):\n return\n\n group_count, _ = TagGroup.objects.exclude(uuid__in=group_uuids).delete()\n tag_count, _ = Tag.objects.exclude(uuid__in=tag_uuids).delete()\n\n logger.info(f'{group_count} tag groups and {tag_count} tags were deleted.')\n\n\ndef prepare_data(group_data: Iterator) -> Dict[UUID, Data]:\n def assembly_structure(group_uuid: str, group_data_: dict):\n tags_data = group_data_.pop('tags_data', [])\n tags = {\n tag_uuid: {'name': tag_name}\n for tag_uuid, tag_name in tags_data\n if is_correct_uuid(tag_uuid)\n }\n\n return (\n group_uuid, {\n **group_data_,\n 'tags': tags\n }\n )\n\n return dict(\n assembly_structure(group_uuid, data)\n for group_uuid, data in group_data\n if is_correct_uuid(group_uuid)\n )\n\n\ndef main(*args, **kwargs):\n cleared_group_data = prepare_data(tag_file.get_data())\n create_or_update(cleared_group_data)\n delete(cleared_group_data)\n", "path": "shopelectro/management/commands/_update_catalog/update_tags.py"}], "after_files": [{"content": "import glob\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport time\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom typing import Iterator, Dict\nfrom uuid import UUID\nfrom xml.etree import ElementTree\n\nimport requests\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(__name__)\nUUID_TYPE = str\nData = Dict[str, Dict[str, dict]]\nNOT_SAVE_TEMPLATE = '{entity} with name=\"{name}\" has no {field}. It\\'ll not be' \\\n ' saved'\n\n\ndef is_correct_uuid(uuid_):\n try:\n val = UUID(uuid_)\n except (ValueError, TypeError):\n return False\n return str(val) == uuid_\n\n\nclass XmlFile:\n\n namespace = '{urn:1C.ru:commerceml_2}'\n\n def __init__(self, fetch_callback, xml_path_pattern, xpath_queries,\n extra_options=None):\n self.fetch_callback = fetch_callback\n self.xml_path_pattern = xml_path_pattern\n self.xpath_queries = xpath_queries\n self.extra_options = extra_options or {}\n\n @property\n def parsed_files(self):\n \"\"\"Get parsed xml files, that matched the path pattern.\"\"\"\n xml_files = glob.glob(os.path.join(\n settings.ASSETS_DIR, self.xml_path_pattern\n ))\n assert xml_files, 'Files on path {} does not exist.'.format(\n self.xml_path_pattern\n )\n return [ElementTree.parse(file) for file in xml_files]\n\n @property\n def xpaths(self):\n \"\"\"Get xpath queries for xml.\"\"\"\n return {\n name: query.format(self.namespace)\n for name, query in self.xpath_queries.items()\n }\n\n def get_data(self) -> Iterator:\n \"\"\"\n Get data from xml files.\n\n Example files with products names or prices.\n \"\"\"\n return chain.from_iterable(\n self.fetch_callback(file, self)\n for file in self.parsed_files\n )\n\n\n@contextmanager\ndef collect_errors(error_types: tuple):\n errors = []\n\n @contextmanager\n def collect():\n try:\n yield\n except error_types as error:\n errors.append(error)\n yield collect\n if errors:\n raise errors[0]\n\n\n@contextmanager\ndef download_catalog(destination):\n \"\"\"Download catalog's xml files and delete after handle them.\"\"\"\n wget_command = (\n 'wget -r -P {} ftp://{}:{}@{}/webdata'\n ' 2>&1 | grep \"\u0432\u0440\u0435\u043c\u044f\\|time\\|Downloaded\"'.format(\n destination,\n settings.FTP_USER,\n settings.FTP_PASS,\n settings.FTP_IP,\n )\n )\n\n subprocess.run(wget_command, shell=True)\n assert os.path.exists(os.path.join(\n destination, settings.FTP_IP)), 'Files do not downloaded...'\n logger.info('Download catalog - completed...')\n\n try:\n yield\n finally:\n # remove downloaded data\n shutil.rmtree(os.path.join(destination, settings.FTP_IP))\n\n\ndef report(error):\n report_url = getattr(settings, 'SLACK_REPORT_URL', None)\n if report_url is not None:\n requests.post(\n url=report_url,\n json={\n 'text': '*\u041d\u0435 \u0443\u0434\u0430\u043b\u043e\u0441\u044c \u043e\u0431\u043d\u043e\u0432\u0438\u0442\u044c \u043a\u0430\u0442\u0430\u043b\u043e\u0433 Shopelectro.*\\n'\n '*\u0412\u0440\u0435\u043c\u044f*: {}\\n'\n '*\u041e\u0448\u0438\u0431\u043a\u0430*: {}'.format(time.ctime(), error),\n }\n )\n", "path": "shopelectro/management/commands/_update_catalog/utils.py"}, {"content": "import logging\nfrom copy import deepcopy\nfrom typing import Iterator, Dict\nfrom xml.etree.ElementTree import Element\n\nfrom django.db import transaction\n\nfrom shopelectro.management.commands._update_catalog.utils import (\n Data, is_correct_uuid, UUID_TYPE, XmlFile\n)\nfrom shopelectro.models import Tag, TagGroup\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_tags(root: Element, config: XmlFile):\n def get_uuid_name_pair(\n element: Element,\n uuid_xpath: str,\n name_xpath: str,\n ):\n uuid = element.find(uuid_xpath).text\n name = element.find(name_xpath).text\n\n return uuid, name\n\n tag_groups = root.findall(config.xpaths['tag_groups'])\n for group in tag_groups:\n group_uuid, group_name = get_uuid_name_pair(\n group,\n config.xpaths['tag_group_uuid'],\n config.xpaths['tag_group_name'],\n )\n\n tags = group.findall(config.xpaths['tags'])\n tags_data = (\n get_uuid_name_pair(\n tag,\n config.xpaths['tag_uuid'],\n config.xpaths['tag_name'],\n ) for tag in tags\n )\n\n yield group_uuid, {\n 'name': group_name,\n 'tags_data': tags_data,\n }\n\ntag_file = XmlFile(\n fetch_callback=fetch_tags,\n xml_path_pattern='**/webdata/**/properties/**/import*.xml',\n xpath_queries={\n 'tag_groups': './/{}\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u0430/',\n 'tag_group_uuid': '.{}\u0418\u0434',\n 'tag_group_name': '.{}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435',\n 'tags': '.{}\u0412\u0430\u0440\u0438\u0430\u043d\u0442\u044b\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0439/',\n 'tag_name': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n 'tag_uuid': '.{}\u0418\u0434\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f',\n },\n)\n\n\n@transaction.atomic\ndef create_or_update(data: Dict[UUID_TYPE, Data]):\n group_data = deepcopy(data)\n\n created_groups_count = 0\n created_tags_count = 0\n\n for group_uuid, data_ in group_data.items():\n tags = data_.pop('tags')\n\n group, group_created = TagGroup.objects.update_or_create(\n uuid=group_uuid, defaults=data_\n )\n\n created_groups_count += int(group_created)\n\n for tag_uuid, tag_data in tags.items():\n _, tag_created = Tag.objects.update_or_create(\n uuid=tag_uuid,\n defaults={**tag_data, 'group': group}\n )\n\n created_tags_count += int(tag_created)\n\n logger.info(f'{created_groups_count} tag groups were created.')\n logger.info(f'{created_tags_count} tags were created.')\n\n\ndef prepare_data(group_data: Iterator) -> Dict[UUID_TYPE, Data]:\n def assembly_structure(group_uuid: str, group_data_: dict):\n tags_data = group_data_.pop('tags_data', [])\n tags = {\n tag_uuid: {'name': tag_name}\n for tag_uuid, tag_name in tags_data\n if is_correct_uuid(tag_uuid)\n }\n\n return (\n group_uuid, {\n **group_data_,\n 'tags': tags\n }\n )\n\n return dict(\n assembly_structure(group_uuid, data)\n for group_uuid, data in group_data\n if is_correct_uuid(group_uuid)\n )\n\n\ndef main(*args, **kwargs):\n cleared_group_data = prepare_data(tag_file.get_data())\n create_or_update(cleared_group_data)\n", "path": "shopelectro/management/commands/_update_catalog/update_tags.py"}]}
2,668
656
gh_patches_debug_2250
rasdani/github-patches
git_diff
mozilla__bugbug-3850
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Optimize the `bug_to_types()` functions to avoid redundant the execution of string lowering The `bug["whiteboard"].lower()` statement is performed multiple times in the function. Instead, we could store the lowercase value in a variable (e.g., `bug_whiteboard`) and reuse it where needed. Examples: https://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L46 https://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L50 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/models/bugtype.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import logging 7 from typing import Iterable, Optional 8 9 import numpy as np 10 import xgboost 11 from sklearn.compose import ColumnTransformer 12 from sklearn.feature_extraction import DictVectorizer 13 from sklearn.multiclass import OneVsRestClassifier 14 from sklearn.pipeline import Pipeline 15 16 from bugbug import bug_features, bugzilla, feature_cleanup, utils 17 from bugbug.model import BugModel 18 19 logger = logging.getLogger(__name__) 20 21 KEYWORD_DICT = { 22 "sec-": "security", 23 "csectype-": "security", 24 "memory-": "memory", 25 "crash": "crash", 26 "crashreportid": "crash", 27 "perf": "performance", 28 "topperf": "performance", 29 "main-thread-io": "performance", 30 "power": "power", 31 } 32 TYPE_LIST = sorted(set(KEYWORD_DICT.values())) 33 34 35 def bug_to_types( 36 bug: bugzilla.BugDict, bug_map: Optional[dict[int, bugzilla.BugDict]] = None 37 ) -> list[str]: 38 types = set() 39 40 bug_whiteboard = bug["whiteboard"].lower() 41 42 if any( 43 f"{whiteboard_text}" in bug_whiteboard 44 for whiteboard_text in ("overhead", "memshrink") 45 ): 46 types.add("memory") 47 48 if "[power" in bug_whiteboard: 49 types.add("power") 50 51 if any( 52 f"[{whiteboard_text}" in bug_whiteboard 53 for whiteboard_text in ( 54 "fxperf", 55 "fxperfsize", 56 "snappy", 57 "pdfjs-c-performance", 58 "pdfjs-performance", 59 "sp3", 60 ) 61 ): 62 types.add("performance") 63 64 if any( 65 f"[{whiteboard_text}" in bug["whiteboard"].lower() 66 for whiteboard_text in ("client-bounty-form", "sec-survey") 67 ): 68 types.add("security") 69 70 if "cf_performance" in bug and bug["cf_performance"] not in ("---", "?"): 71 types.add("performance") 72 73 if "cf_crash_signature" in bug and bug["cf_crash_signature"] not in ("", "---"): 74 types.add("crash") 75 76 if bug_map is not None: 77 for bug_id in bug["blocks"]: 78 if bug_id not in bug_map: 79 continue 80 81 alias = bug_map[bug_id]["alias"] 82 if alias and alias.startswith("memshrink"): 83 types.add("memory") 84 85 for keyword_start, type in KEYWORD_DICT.items(): 86 if any(keyword.startswith(keyword_start) for keyword in bug["keywords"]): 87 types.add(type) 88 89 return list(types) 90 91 92 class BugTypeModel(BugModel): 93 def __init__(self, lemmatization=False, historical=False): 94 BugModel.__init__(self, lemmatization) 95 96 self.calculate_importance = False 97 98 feature_extractors = [ 99 bug_features.HasSTR(), 100 bug_features.Severity(), 101 # Ignore keywords that would make the ML completely skewed 102 # (we are going to use them as 100% rules in the evaluation phase). 103 bug_features.Keywords(set(KEYWORD_DICT.keys())), 104 bug_features.IsCoverityIssue(), 105 bug_features.HasCrashSignature(), 106 bug_features.HasURL(), 107 bug_features.HasW3CURL(), 108 bug_features.HasGithubURL(), 109 bug_features.Whiteboard(), 110 bug_features.Patches(), 111 bug_features.Landings(), 112 bug_features.BlockedBugsNumber(), 113 bug_features.EverAffected(), 114 bug_features.AffectedThenUnaffected(), 115 bug_features.Product(), 116 bug_features.Component(), 117 ] 118 119 cleanup_functions = [ 120 feature_cleanup.url(), 121 feature_cleanup.fileref(), 122 feature_cleanup.synonyms(), 123 ] 124 125 self.extraction_pipeline = Pipeline( 126 [ 127 ( 128 "bug_extractor", 129 bug_features.BugExtractor(feature_extractors, cleanup_functions), 130 ), 131 ( 132 "union", 133 ColumnTransformer( 134 [ 135 ("data", DictVectorizer(), "data"), 136 ("title", self.text_vectorizer(min_df=0.001), "title"), 137 ( 138 "first_comment", 139 self.text_vectorizer(min_df=0.001), 140 "first_comment", 141 ), 142 ( 143 "comments", 144 self.text_vectorizer(min_df=0.001), 145 "comments", 146 ), 147 ] 148 ), 149 ), 150 ] 151 ) 152 153 self.hyperparameter = {"n_jobs": utils.get_physical_cpu_count()} 154 self.clf = OneVsRestClassifier(xgboost.XGBClassifier(**self.hyperparameter)) 155 156 def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]: 157 classes = {} 158 159 bug_map = {bug["id"]: bug for bug in bugzilla.get_bugs()} 160 161 for bug_data in bug_map.values(): 162 target = np.zeros(len(TYPE_LIST)) 163 for type_ in bug_to_types(bug_data, bug_map): 164 target[TYPE_LIST.index(type_)] = 1 165 166 classes[int(bug_data["id"])] = target 167 168 for type_ in TYPE_LIST: 169 logger.info( 170 "%d %s bugs", 171 sum( 172 1 173 for target in classes.values() 174 if target[TYPE_LIST.index(type_)] == 1 175 ), 176 type_, 177 ) 178 179 return classes, TYPE_LIST 180 181 def get_feature_names(self): 182 return self.extraction_pipeline.named_steps["union"].get_feature_names_out() 183 184 def overwrite_classes( 185 self, 186 bugs: Iterable[bugzilla.BugDict], 187 classes: dict[int, np.ndarray], 188 probabilities: bool, 189 ): 190 for i, bug in enumerate(bugs): 191 for type_ in bug_to_types(bug): 192 if probabilities: 193 classes[i][TYPE_LIST.index(type_)] = 1.0 194 else: 195 classes[i][TYPE_LIST.index(type_)] = 1 196 197 return classes 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/models/bugtype.py b/bugbug/models/bugtype.py --- a/bugbug/models/bugtype.py +++ b/bugbug/models/bugtype.py @@ -62,7 +62,7 @@ types.add("performance") if any( - f"[{whiteboard_text}" in bug["whiteboard"].lower() + f"[{whiteboard_text}" in bug_whiteboard for whiteboard_text in ("client-bounty-form", "sec-survey") ): types.add("security")
{"golden_diff": "diff --git a/bugbug/models/bugtype.py b/bugbug/models/bugtype.py\n--- a/bugbug/models/bugtype.py\n+++ b/bugbug/models/bugtype.py\n@@ -62,7 +62,7 @@\n types.add(\"performance\")\n \n if any(\n- f\"[{whiteboard_text}\" in bug[\"whiteboard\"].lower()\n+ f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"client-bounty-form\", \"sec-survey\")\n ):\n types.add(\"security\")\n", "issue": "Optimize the `bug_to_types()` functions to avoid redundant the execution of string lowering\nThe `bug[\"whiteboard\"].lower()` statement is performed multiple times in the function. Instead, we could store the lowercase value in a variable (e.g., `bug_whiteboard`) and reuse it where needed.\r\n\r\nExamples:\r\n\r\nhttps://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L46\r\n\r\nhttps://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L50\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable, Optional\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\nKEYWORD_DICT = {\n \"sec-\": \"security\",\n \"csectype-\": \"security\",\n \"memory-\": \"memory\",\n \"crash\": \"crash\",\n \"crashreportid\": \"crash\",\n \"perf\": \"performance\",\n \"topperf\": \"performance\",\n \"main-thread-io\": \"performance\",\n \"power\": \"power\",\n}\nTYPE_LIST = sorted(set(KEYWORD_DICT.values()))\n\n\ndef bug_to_types(\n bug: bugzilla.BugDict, bug_map: Optional[dict[int, bugzilla.BugDict]] = None\n) -> list[str]:\n types = set()\n\n bug_whiteboard = bug[\"whiteboard\"].lower()\n\n if any(\n f\"{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"overhead\", \"memshrink\")\n ):\n types.add(\"memory\")\n\n if \"[power\" in bug_whiteboard:\n types.add(\"power\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\n \"fxperf\",\n \"fxperfsize\",\n \"snappy\",\n \"pdfjs-c-performance\",\n \"pdfjs-performance\",\n \"sp3\",\n )\n ):\n types.add(\"performance\")\n\n if any(\n f\"[{whiteboard_text}\" in bug[\"whiteboard\"].lower()\n for whiteboard_text in (\"client-bounty-form\", \"sec-survey\")\n ):\n types.add(\"security\")\n\n if \"cf_performance\" in bug and bug[\"cf_performance\"] not in (\"---\", \"?\"):\n types.add(\"performance\")\n\n if \"cf_crash_signature\" in bug and bug[\"cf_crash_signature\"] not in (\"\", \"---\"):\n types.add(\"crash\")\n\n if bug_map is not None:\n for bug_id in bug[\"blocks\"]:\n if bug_id not in bug_map:\n continue\n\n alias = bug_map[bug_id][\"alias\"]\n if alias and alias.startswith(\"memshrink\"):\n types.add(\"memory\")\n\n for keyword_start, type in KEYWORD_DICT.items():\n if any(keyword.startswith(keyword_start) for keyword in bug[\"keywords\"]):\n types.add(type)\n\n return list(types)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(set(KEYWORD_DICT.keys())),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.hyperparameter = {\"n_jobs\": utils.get_physical_cpu_count()}\n self.clf = OneVsRestClassifier(xgboost.XGBClassifier(**self.hyperparameter))\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(TYPE_LIST))\n for type_ in bug_to_types(bug_data, bug_map):\n target[TYPE_LIST.index(type_)] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n for type_ in TYPE_LIST:\n logger.info(\n \"%d %s bugs\",\n sum(\n 1\n for target in classes.values()\n if target[TYPE_LIST.index(type_)] == 1\n ),\n type_,\n )\n\n return classes, TYPE_LIST\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n for i, bug in enumerate(bugs):\n for type_ in bug_to_types(bug):\n if probabilities:\n classes[i][TYPE_LIST.index(type_)] = 1.0\n else:\n classes[i][TYPE_LIST.index(type_)] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable, Optional\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\nKEYWORD_DICT = {\n \"sec-\": \"security\",\n \"csectype-\": \"security\",\n \"memory-\": \"memory\",\n \"crash\": \"crash\",\n \"crashreportid\": \"crash\",\n \"perf\": \"performance\",\n \"topperf\": \"performance\",\n \"main-thread-io\": \"performance\",\n \"power\": \"power\",\n}\nTYPE_LIST = sorted(set(KEYWORD_DICT.values()))\n\n\ndef bug_to_types(\n bug: bugzilla.BugDict, bug_map: Optional[dict[int, bugzilla.BugDict]] = None\n) -> list[str]:\n types = set()\n\n bug_whiteboard = bug[\"whiteboard\"].lower()\n\n if any(\n f\"{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"overhead\", \"memshrink\")\n ):\n types.add(\"memory\")\n\n if \"[power\" in bug_whiteboard:\n types.add(\"power\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\n \"fxperf\",\n \"fxperfsize\",\n \"snappy\",\n \"pdfjs-c-performance\",\n \"pdfjs-performance\",\n \"sp3\",\n )\n ):\n types.add(\"performance\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"client-bounty-form\", \"sec-survey\")\n ):\n types.add(\"security\")\n\n if \"cf_performance\" in bug and bug[\"cf_performance\"] not in (\"---\", \"?\"):\n types.add(\"performance\")\n\n if \"cf_crash_signature\" in bug and bug[\"cf_crash_signature\"] not in (\"\", \"---\"):\n types.add(\"crash\")\n\n if bug_map is not None:\n for bug_id in bug[\"blocks\"]:\n if bug_id not in bug_map:\n continue\n\n alias = bug_map[bug_id][\"alias\"]\n if alias and alias.startswith(\"memshrink\"):\n types.add(\"memory\")\n\n for keyword_start, type in KEYWORD_DICT.items():\n if any(keyword.startswith(keyword_start) for keyword in bug[\"keywords\"]):\n types.add(type)\n\n return list(types)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(set(KEYWORD_DICT.keys())),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.hyperparameter = {\"n_jobs\": utils.get_physical_cpu_count()}\n self.clf = OneVsRestClassifier(xgboost.XGBClassifier(**self.hyperparameter))\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(TYPE_LIST))\n for type_ in bug_to_types(bug_data, bug_map):\n target[TYPE_LIST.index(type_)] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n for type_ in TYPE_LIST:\n logger.info(\n \"%d %s bugs\",\n sum(\n 1\n for target in classes.values()\n if target[TYPE_LIST.index(type_)] == 1\n ),\n type_,\n )\n\n return classes, TYPE_LIST\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n for i, bug in enumerate(bugs):\n for type_ in bug_to_types(bug):\n if probabilities:\n classes[i][TYPE_LIST.index(type_)] = 1.0\n else:\n classes[i][TYPE_LIST.index(type_)] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py"}]}
2,260
123
gh_patches_debug_4261
rasdani/github-patches
git_diff
Nitrate__Nitrate-406
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix call to DurationField.from_db_value() which will be removed in Django 3.0 Lots of such warning are output. ``` src/tests/xmlrpc/test_testcaseplan.py::TestCasePlanGet::test_get_with_negative_case_id /testenv/lib/python3.7/site-packages/django/db/models/sql/compiler.py:995: RemovedInDjango30Warning: Remove the context parameter from DurationField.from_db_value(). Support for it will be removed in Django 3.0. RemovedInDjango30Warning, ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/tcms/core/models/fields.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import datetime 3 import six 4 5 from django.core.exceptions import ValidationError 6 from django.db.models.fields import IntegerField 7 from django.db.models.fields import BooleanField 8 9 from tcms.core.forms.fields import DurationField as DurationFormField 10 11 try: 12 from pymysql.constants import FIELD_TYPE 13 except ImportError: 14 # Refer to tcms/__init__.py for details. 15 pass 16 else: 17 from django.db.backends.mysql.base import django_conversions 18 django_conversions.update({FIELD_TYPE.TIME: None}) 19 20 21 class DurationField(IntegerField): 22 """Duration field for test run 23 24 Value is stored as number of seconds in database and presents in Nitrate in 25 timedelta type. 26 27 Value should also be able to be serialized to integer as seconds, and then 28 deserialized from value of seconds. 29 """ 30 31 def to_python(self, value): 32 if isinstance(value, six.integer_types): 33 return datetime.timedelta(seconds=value) 34 elif isinstance(value, datetime.timedelta): 35 return value 36 else: 37 raise TypeError('Unable to convert %s to timedelta.' % value) 38 39 def from_db_value(self, value, expression, connection, context): 40 if value is None: 41 return value 42 return datetime.timedelta(seconds=value) 43 44 def get_db_prep_value(self, value, connection, prepared=True): 45 """convert datetime.timedelta to seconds. 46 47 1 day equal to 86400 seconds 48 """ 49 if isinstance(value, datetime.timedelta): 50 return value.seconds + (86400 * value.days) 51 else: 52 value = super(DurationField, self).get_db_prep_value( 53 value, connection, prepared) 54 return value 55 56 def formfield(self, form_class=DurationFormField, **kwargs): 57 defaults = {'help_text': 'Enter duration in the format: DDHHMM'} 58 defaults.update(kwargs) 59 return form_class(**defaults) 60 61 62 class NitrateBooleanField(BooleanField): 63 """Custom boolean field to allow accepting arbitrary bool values""" 64 65 def to_python(self, value): 66 if value in (1, '1', 'true', 'True', True): 67 return True 68 if value in (0, '0', 'false', 'False', False): 69 return False 70 raise ValidationError( 71 '{} is not recognized as a bool value.'.format(value)) 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/tcms/core/models/fields.py b/src/tcms/core/models/fields.py --- a/src/tcms/core/models/fields.py +++ b/src/tcms/core/models/fields.py @@ -36,7 +36,7 @@ else: raise TypeError('Unable to convert %s to timedelta.' % value) - def from_db_value(self, value, expression, connection, context): + def from_db_value(self, value, *args, **kwargs): if value is None: return value return datetime.timedelta(seconds=value)
{"golden_diff": "diff --git a/src/tcms/core/models/fields.py b/src/tcms/core/models/fields.py\n--- a/src/tcms/core/models/fields.py\n+++ b/src/tcms/core/models/fields.py\n@@ -36,7 +36,7 @@\n else:\n raise TypeError('Unable to convert %s to timedelta.' % value)\n \n- def from_db_value(self, value, expression, connection, context):\n+ def from_db_value(self, value, *args, **kwargs):\n if value is None:\n return value\n return datetime.timedelta(seconds=value)\n", "issue": "Fix call to DurationField.from_db_value() which will be removed in Django 3.0\nLots of such warning are output.\r\n\r\n```\r\nsrc/tests/xmlrpc/test_testcaseplan.py::TestCasePlanGet::test_get_with_negative_case_id\r\n /testenv/lib/python3.7/site-packages/django/db/models/sql/compiler.py:995: RemovedInDjango30Warning: Remove the context parameter from DurationField.from_db_value(). Support for it will be removed in Django 3.0.\r\n RemovedInDjango30Warning,\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport six\n\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.fields import IntegerField\nfrom django.db.models.fields import BooleanField\n\nfrom tcms.core.forms.fields import DurationField as DurationFormField\n\ntry:\n from pymysql.constants import FIELD_TYPE\nexcept ImportError:\n # Refer to tcms/__init__.py for details.\n pass\nelse:\n from django.db.backends.mysql.base import django_conversions\n django_conversions.update({FIELD_TYPE.TIME: None})\n\n\nclass DurationField(IntegerField):\n \"\"\"Duration field for test run\n\n Value is stored as number of seconds in database and presents in Nitrate in\n timedelta type.\n\n Value should also be able to be serialized to integer as seconds, and then\n deserialized from value of seconds.\n \"\"\"\n\n def to_python(self, value):\n if isinstance(value, six.integer_types):\n return datetime.timedelta(seconds=value)\n elif isinstance(value, datetime.timedelta):\n return value\n else:\n raise TypeError('Unable to convert %s to timedelta.' % value)\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return value\n return datetime.timedelta(seconds=value)\n\n def get_db_prep_value(self, value, connection, prepared=True):\n \"\"\"convert datetime.timedelta to seconds.\n\n 1 day equal to 86400 seconds\n \"\"\"\n if isinstance(value, datetime.timedelta):\n return value.seconds + (86400 * value.days)\n else:\n value = super(DurationField, self).get_db_prep_value(\n value, connection, prepared)\n return value\n\n def formfield(self, form_class=DurationFormField, **kwargs):\n defaults = {'help_text': 'Enter duration in the format: DDHHMM'}\n defaults.update(kwargs)\n return form_class(**defaults)\n\n\nclass NitrateBooleanField(BooleanField):\n \"\"\"Custom boolean field to allow accepting arbitrary bool values\"\"\"\n\n def to_python(self, value):\n if value in (1, '1', 'true', 'True', True):\n return True\n if value in (0, '0', 'false', 'False', False):\n return False\n raise ValidationError(\n '{} is not recognized as a bool value.'.format(value))\n", "path": "src/tcms/core/models/fields.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport six\n\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.fields import IntegerField\nfrom django.db.models.fields import BooleanField\n\nfrom tcms.core.forms.fields import DurationField as DurationFormField\n\ntry:\n from pymysql.constants import FIELD_TYPE\nexcept ImportError:\n # Refer to tcms/__init__.py for details.\n pass\nelse:\n from django.db.backends.mysql.base import django_conversions\n django_conversions.update({FIELD_TYPE.TIME: None})\n\n\nclass DurationField(IntegerField):\n \"\"\"Duration field for test run\n\n Value is stored as number of seconds in database and presents in Nitrate in\n timedelta type.\n\n Value should also be able to be serialized to integer as seconds, and then\n deserialized from value of seconds.\n \"\"\"\n\n def to_python(self, value):\n if isinstance(value, six.integer_types):\n return datetime.timedelta(seconds=value)\n elif isinstance(value, datetime.timedelta):\n return value\n else:\n raise TypeError('Unable to convert %s to timedelta.' % value)\n\n def from_db_value(self, value, *args, **kwargs):\n if value is None:\n return value\n return datetime.timedelta(seconds=value)\n\n def get_db_prep_value(self, value, connection, prepared=True):\n \"\"\"convert datetime.timedelta to seconds.\n\n 1 day equal to 86400 seconds\n \"\"\"\n if isinstance(value, datetime.timedelta):\n return value.seconds + (86400 * value.days)\n else:\n value = super(DurationField, self).get_db_prep_value(\n value, connection, prepared)\n return value\n\n def formfield(self, form_class=DurationFormField, **kwargs):\n defaults = {'help_text': 'Enter duration in the format: DDHHMM'}\n defaults.update(kwargs)\n return form_class(**defaults)\n\n\nclass NitrateBooleanField(BooleanField):\n \"\"\"Custom boolean field to allow accepting arbitrary bool values\"\"\"\n\n def to_python(self, value):\n if value in (1, '1', 'true', 'True', True):\n return True\n if value in (0, '0', 'false', 'False', False):\n return False\n raise ValidationError(\n '{} is not recognized as a bool value.'.format(value))\n", "path": "src/tcms/core/models/fields.py"}]}
1,007
124
gh_patches_debug_34925
rasdani/github-patches
git_diff
mosaicml__composer-1174
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Consistency in docs [State, time, callbacks] I am re-writing a callback that was working on composer 0.5 and now it does not. I see that there is a new class `time` and that `state.epoch` is not there anymore, the problem is that the docs still point to that. Like [here](https://docs.mosaicml.com/en/v0.7.1/trainer/logging.html#logging-additional-data) or that `Logger` cannot be imported from `composer` (`from composer.loggers import Logger`) I am having a hard time creating a custom callback, maybe a tutorial with the new API? The callback is for logging images to wandb: ```python import wandb from composer import Callback, State, Logger class LogPredictions(Callback): def __init__(self, num_samples=100, seed=1234): super().__init__() self.num_samples = num_samples self.data = [] def eval_batch_end(self, state: State, logger: Logger): """Compute predictions per batch and stores them on self.data""" if state.epoch == state.max_duration: #on last val epoch if len(self.data) < self.num_samples: n = self.num_samples x, y = state.batch_pair outputs = state.outputs.argmax(-1) data = [[wandb.Image(x_i), y_i, y_pred] for x_i, y_i, y_pred in list(zip(x[:n], y[:n], outputs[:n]))] self.data += data def eval_end(self, state: State, logger: Logger): "Create a wandb.Table and logs it" columns = ['image', 'ground truth', 'prediction'] table = wandb.Table(columns=columns, data=self.data[:self.num_samples]) wandb.log({'sample_table':table}, step=int(state.timer.batch)) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/doctest_cleanup.py` Content: ``` 1 # Copyright 2022 MosaicML Composer authors 2 # SPDX-License-Identifier: Apache-2.0 3 4 """Cleanup script that is executed at the end of each doctest.""" 5 6 import os 7 8 # variables are defined in doctest_fixtures.py 9 # pyright: reportUndefinedVariable=none 10 11 # tmpdir and cwd were defined in doctest_fixtures.py 12 13 os.chdir(cwd) 14 15 tmpdir.cleanup() 16 ``` Path: `composer/__init__.py` Content: ``` 1 # Copyright 2022 MosaicML Composer authors 2 # SPDX-License-Identifier: Apache-2.0 3 4 """Composer.""" 5 6 from composer import algorithms as algorithms 7 from composer import callbacks as callbacks 8 from composer import datasets as datasets 9 from composer import functional as functional 10 from composer import loggers as loggers 11 from composer import models as models 12 from composer import optim as optim 13 from composer import profiler as profiler 14 from composer import trainer as trainer 15 from composer import utils as utils 16 from composer.core import Algorithm as Algorithm 17 from composer.core import Callback as Callback 18 from composer.core import DataSpec as DataSpec 19 from composer.core import Engine as Engine 20 from composer.core import Evaluator as Evaluator 21 from composer.core import Event as Event 22 from composer.core import State as State 23 from composer.core import Time as Time 24 from composer.core import Timestamp as Timestamp 25 from composer.core import TimeUnit as TimeUnit 26 from composer.core import types as types 27 from composer.models import ComposerModel as ComposerModel 28 from composer.trainer import Trainer as Trainer 29 30 __version__ = '0.7.0' 31 ``` Path: `docs/source/doctest_fixtures.py` Content: ``` 1 # Copyright 2022 MosaicML Composer authors 2 # SPDX-License-Identifier: Apache-2.0 3 4 # disabling general type issues because of monkeypatching 5 #yright: reportGeneralTypeIssues=none 6 7 """Fixtures available in doctests. 8 9 The script is run before any doctests are executed, 10 so all imports and variables are available in any doctest. 11 The output of this setup script does not show up in the documentation. 12 """ 13 import os 14 import sys 15 import tempfile 16 from typing import Any 17 from typing import Callable as Callable 18 19 import numpy as np 20 import torch 21 import torch.optim 22 import torch.utils.data 23 from PIL import Image 24 from torch.optim.lr_scheduler import CosineAnnealingLR 25 26 import composer 27 import composer.loggers 28 import composer.loggers.object_store_logger 29 import composer.trainer 30 import composer.trainer.trainer 31 import composer.utils 32 import composer.utils.checkpoint 33 import composer.utils.file_helpers 34 from composer import Trainer 35 from composer.core import Algorithm as Algorithm 36 from composer.core import Callback as Callback 37 from composer.core import DataSpec as DataSpec 38 from composer.core import Engine as Engine 39 from composer.core import Evaluator as Evaluator 40 from composer.core import Event as Event 41 from composer.core import State as State 42 from composer.core import Time as Time 43 from composer.core import Timestamp as Timestamp 44 from composer.core import TimeUnit as TimeUnit 45 from composer.core import types as types 46 from composer.datasets.synthetic import SyntheticBatchPairDataset 47 from composer.loggers import InMemoryLogger as InMemoryLogger 48 from composer.loggers import Logger as Logger 49 from composer.loggers import LogLevel as LogLevel 50 from composer.loggers import ObjectStoreLogger 51 from composer.models import ComposerModel as ComposerModel 52 from composer.optim.scheduler import ConstantScheduler 53 from composer.utils import LibcloudObjectStore 54 from composer.utils import ensure_tuple as ensure_tuple 55 56 # Need to insert the repo root at the beginning of the path, since there may be other modules named `tests` 57 # Assuming that docs generation is running from the `docs` directory 58 _docs_dir = os.path.abspath('.') 59 _repo_root = os.path.dirname(_docs_dir) 60 if sys.path[0] != _repo_root: 61 sys.path.insert(0, _repo_root) 62 63 from tests.common import SimpleModel 64 65 # Change the cwd to be the tempfile, so we don't pollute the documentation source folder 66 tmpdir = tempfile.TemporaryDirectory() 67 cwd = os.path.abspath('.') 68 os.chdir(tmpdir.name) 69 70 num_channels = 3 71 num_classes = 10 72 data_shape = (num_channels, 5, 5) 73 74 Model = SimpleModel 75 76 model = SimpleModel(num_channels, num_classes) 77 78 optimizer = torch.optim.SGD(model.parameters(), lr=0.001) 79 80 scheduler = CosineAnnealingLR(optimizer, T_max=1) 81 82 dataset = SyntheticBatchPairDataset( 83 total_dataset_size=100, 84 data_shape=data_shape, 85 num_classes=num_classes, 86 num_unique_samples_to_create=10, 87 ) 88 89 train_dataset = dataset 90 eval_dataset = dataset 91 92 batch_size = 10 93 94 train_dataloader = torch.utils.data.DataLoader( 95 train_dataset, 96 batch_size=batch_size, 97 num_workers=0, 98 pin_memory=False, 99 drop_last=True, 100 ) 101 102 eval_dataloader = torch.utils.data.DataLoader( 103 eval_dataset, 104 batch_size=batch_size, 105 num_workers=0, 106 pin_memory=False, 107 drop_last=False, 108 ) 109 110 state = State( 111 rank_zero_seed=0, 112 model=model, 113 run_name='run_name', 114 optimizers=optimizer, 115 grad_accum=1, 116 dataloader=train_dataloader, 117 dataloader_label='train', 118 max_duration='1ep', 119 precision='fp32', 120 ) 121 122 logger = Logger(state) 123 124 engine = Engine(state, logger) 125 126 image = Image.fromarray(np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8)) 127 128 # error: "randn" is not a known member of module (reportGeneralTypeIssues) 129 X_example = torch.randn(batch_size, num_channels, 32, 32) # type: ignore 130 # error: "randn" is not a known member of module (reportGeneralTypeIssues) 131 logits = torch.randn(batch_size, num_classes) # type: ignore 132 # error: "randint" is not a known member of module (reportGeneralTypeIssues) 133 y_example = torch.randint(num_classes, (batch_size,)) # type: ignore 134 135 136 def loss_fun(output, target, reduction='none'): 137 """Dummy loss function.""" 138 return torch.ones_like(target) 139 140 141 # Patch Trainer __init__ function to replace arguments while preserving type 142 _original_trainer_init = Trainer.__init__ 143 144 145 def _new_trainer_init(self, fake_ellipses: None = None, **kwargs: Any): 146 if 'model' not in kwargs: 147 kwargs['model'] = model 148 if 'optimizers' not in kwargs: 149 kwargs['optimizers'] = torch.optim.SGD(kwargs['model'].parameters(), lr=0.01) 150 if 'schedulers' not in kwargs: 151 kwargs['schedulers'] = ConstantScheduler() 152 if 'max_duration' not in kwargs: 153 kwargs['max_duration'] = '1ep' 154 if 'train_dataloader' not in kwargs: 155 kwargs['train_dataloader'] = train_dataloader 156 if 'eval_dataloader' not in kwargs: 157 kwargs['eval_dataloader'] = eval_dataloader 158 if 'progress_bar' not in kwargs: 159 kwargs['progress_bar'] = False # hide tqdm logging 160 if 'log_to_console' not in kwargs: 161 kwargs['log_to_console'] = False # hide console logging 162 _original_trainer_init(self, **kwargs) 163 164 165 Trainer.__init__ = _new_trainer_init 166 167 168 # Do not attempt to validate cloud credentials 169 def _do_not_validate(*args, **kwargs) -> None: 170 pass 171 172 173 composer.loggers.object_store_logger._validate_credentials = _do_not_validate # type: ignore 174 175 # Patch ObjectStoreLogger __init__ function to replace arguments while preserving type 176 _original_objectStoreLogger_init = ObjectStoreLogger.__init__ 177 178 179 def _new_objectStoreLogger_init(self, fake_ellipses: None = None, **kwargs: Any): 180 os.makedirs('./object_store', exist_ok=True) 181 kwargs.update(use_procs=False, 182 num_concurrent_uploads=1, 183 object_store_cls=LibcloudObjectStore, 184 object_store_kwargs={ 185 'provider': 'local', 186 'container': '.', 187 'provider_kwargs': { 188 'key': os.path.abspath('./object_store'), 189 }, 190 }) 191 _original_objectStoreLogger_init(self, **kwargs) 192 193 194 ObjectStoreLogger.__init__ = _new_objectStoreLogger_init # type: ignore 195 196 # Patch ObjectStore __init__ function to replace arguments while preserving type 197 _original_libcloudObjectStore_init = LibcloudObjectStore.__init__ 198 199 200 def _new_libcloudObjectStore_init(self, fake_ellipses: None = None, **kwargs: Any): 201 os.makedirs('./object_store', exist_ok=True) 202 kwargs.update( 203 provider='local', 204 container='.', 205 provider_kwargs={ 206 'key': os.path.abspath('./object_store'), 207 }, 208 ) 209 _original_libcloudObjectStore_init(self, **kwargs) 210 211 212 LibcloudObjectStore.__init__ = _new_libcloudObjectStore_init # type: ignore 213 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/composer/__init__.py b/composer/__init__.py --- a/composer/__init__.py +++ b/composer/__init__.py @@ -3,28 +3,25 @@ """Composer.""" -from composer import algorithms as algorithms -from composer import callbacks as callbacks -from composer import datasets as datasets -from composer import functional as functional -from composer import loggers as loggers -from composer import models as models -from composer import optim as optim -from composer import profiler as profiler -from composer import trainer as trainer -from composer import utils as utils -from composer.core import Algorithm as Algorithm -from composer.core import Callback as Callback -from composer.core import DataSpec as DataSpec -from composer.core import Engine as Engine -from composer.core import Evaluator as Evaluator -from composer.core import Event as Event -from composer.core import State as State -from composer.core import Time as Time -from composer.core import Timestamp as Timestamp -from composer.core import TimeUnit as TimeUnit -from composer.core import types as types -from composer.models import ComposerModel as ComposerModel -from composer.trainer import Trainer as Trainer +from composer.core import Algorithm, Callback, DataSpec, Engine, Evaluator, Event, State, Time, Timestamp, TimeUnit +from composer.loggers import Logger +from composer.models import ComposerModel +from composer.trainer import Trainer + +__all__ = [ + 'Algorithm', + 'Callback', + 'DataSpec', + 'Engine', + 'Evaluator', + 'Event', + 'State', + 'Time', + 'Timestamp', + 'TimeUnit', + 'Logger', + 'ComposerModel', + 'Trainer', +] __version__ = '0.7.0' diff --git a/docs/source/doctest_cleanup.py b/docs/source/doctest_cleanup.py --- a/docs/source/doctest_cleanup.py +++ b/docs/source/doctest_cleanup.py @@ -4,6 +4,7 @@ """Cleanup script that is executed at the end of each doctest.""" import os +import shutil # variables are defined in doctest_fixtures.py # pyright: reportUndefinedVariable=none @@ -12,4 +13,7 @@ os.chdir(cwd) -tmpdir.cleanup() +try: + shutil.rmtree(tmpdir) +except OSError: + pass diff --git a/docs/source/doctest_fixtures.py b/docs/source/doctest_fixtures.py --- a/docs/source/doctest_fixtures.py +++ b/docs/source/doctest_fixtures.py @@ -63,9 +63,9 @@ from tests.common import SimpleModel # Change the cwd to be the tempfile, so we don't pollute the documentation source folder -tmpdir = tempfile.TemporaryDirectory() +tmpdir = tempfile.mkdtemp() cwd = os.path.abspath('.') -os.chdir(tmpdir.name) +os.chdir(tmpdir) num_channels = 3 num_classes = 10
{"golden_diff": "diff --git a/composer/__init__.py b/composer/__init__.py\n--- a/composer/__init__.py\n+++ b/composer/__init__.py\n@@ -3,28 +3,25 @@\n \n \"\"\"Composer.\"\"\"\n \n-from composer import algorithms as algorithms\n-from composer import callbacks as callbacks\n-from composer import datasets as datasets\n-from composer import functional as functional\n-from composer import loggers as loggers\n-from composer import models as models\n-from composer import optim as optim\n-from composer import profiler as profiler\n-from composer import trainer as trainer\n-from composer import utils as utils\n-from composer.core import Algorithm as Algorithm\n-from composer.core import Callback as Callback\n-from composer.core import DataSpec as DataSpec\n-from composer.core import Engine as Engine\n-from composer.core import Evaluator as Evaluator\n-from composer.core import Event as Event\n-from composer.core import State as State\n-from composer.core import Time as Time\n-from composer.core import Timestamp as Timestamp\n-from composer.core import TimeUnit as TimeUnit\n-from composer.core import types as types\n-from composer.models import ComposerModel as ComposerModel\n-from composer.trainer import Trainer as Trainer\n+from composer.core import Algorithm, Callback, DataSpec, Engine, Evaluator, Event, State, Time, Timestamp, TimeUnit\n+from composer.loggers import Logger\n+from composer.models import ComposerModel\n+from composer.trainer import Trainer\n+\n+__all__ = [\n+ 'Algorithm',\n+ 'Callback',\n+ 'DataSpec',\n+ 'Engine',\n+ 'Evaluator',\n+ 'Event',\n+ 'State',\n+ 'Time',\n+ 'Timestamp',\n+ 'TimeUnit',\n+ 'Logger',\n+ 'ComposerModel',\n+ 'Trainer',\n+]\n \n __version__ = '0.7.0'\ndiff --git a/docs/source/doctest_cleanup.py b/docs/source/doctest_cleanup.py\n--- a/docs/source/doctest_cleanup.py\n+++ b/docs/source/doctest_cleanup.py\n@@ -4,6 +4,7 @@\n \"\"\"Cleanup script that is executed at the end of each doctest.\"\"\"\n \n import os\n+import shutil\n \n # variables are defined in doctest_fixtures.py\n # pyright: reportUndefinedVariable=none\n@@ -12,4 +13,7 @@\n \n os.chdir(cwd)\n \n-tmpdir.cleanup()\n+try:\n+ shutil.rmtree(tmpdir)\n+except OSError:\n+ pass\ndiff --git a/docs/source/doctest_fixtures.py b/docs/source/doctest_fixtures.py\n--- a/docs/source/doctest_fixtures.py\n+++ b/docs/source/doctest_fixtures.py\n@@ -63,9 +63,9 @@\n from tests.common import SimpleModel\n \n # Change the cwd to be the tempfile, so we don't pollute the documentation source folder\n-tmpdir = tempfile.TemporaryDirectory()\n+tmpdir = tempfile.mkdtemp()\n cwd = os.path.abspath('.')\n-os.chdir(tmpdir.name)\n+os.chdir(tmpdir)\n \n num_channels = 3\n num_classes = 10\n", "issue": "Consistency in docs [State, time, callbacks]\nI am re-writing a callback that was working on composer 0.5 and now it does not.\r\nI see that there is a new class `time` and that `state.epoch` is not there anymore, the problem is that the docs still point to that. Like [here](https://docs.mosaicml.com/en/v0.7.1/trainer/logging.html#logging-additional-data) or that `Logger` cannot be imported from `composer` (`from composer.loggers import Logger`)\r\nI am having a hard time creating a custom callback, maybe a tutorial with the new API?\r\n\r\nThe callback is for logging images to wandb:\r\n```python\r\nimport wandb\r\nfrom composer import Callback, State, Logger\r\n\r\nclass LogPredictions(Callback):\r\n def __init__(self, num_samples=100, seed=1234):\r\n super().__init__()\r\n self.num_samples = num_samples\r\n self.data = []\r\n \r\n def eval_batch_end(self, state: State, logger: Logger):\r\n \"\"\"Compute predictions per batch and stores them on self.data\"\"\"\r\n if state.epoch == state.max_duration: #on last val epoch\r\n if len(self.data) < self.num_samples:\r\n n = self.num_samples\r\n x, y = state.batch_pair\r\n outputs = state.outputs.argmax(-1)\r\n data = [[wandb.Image(x_i), y_i, y_pred] for x_i, y_i, y_pred in list(zip(x[:n], y[:n], outputs[:n]))]\r\n self.data += data\r\n \r\n def eval_end(self, state: State, logger: Logger):\r\n \"Create a wandb.Table and logs it\"\r\n columns = ['image', 'ground truth', 'prediction']\r\n table = wandb.Table(columns=columns, data=self.data[:self.num_samples])\r\n wandb.log({'sample_table':table}, step=int(state.timer.batch))\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Cleanup script that is executed at the end of each doctest.\"\"\"\n\nimport os\n\n# variables are defined in doctest_fixtures.py\n# pyright: reportUndefinedVariable=none\n\n# tmpdir and cwd were defined in doctest_fixtures.py\n\nos.chdir(cwd)\n\ntmpdir.cleanup()\n", "path": "docs/source/doctest_cleanup.py"}, {"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Composer.\"\"\"\n\nfrom composer import algorithms as algorithms\nfrom composer import callbacks as callbacks\nfrom composer import datasets as datasets\nfrom composer import functional as functional\nfrom composer import loggers as loggers\nfrom composer import models as models\nfrom composer import optim as optim\nfrom composer import profiler as profiler\nfrom composer import trainer as trainer\nfrom composer import utils as utils\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Evaluator as Evaluator\nfrom composer.core import Event as Event\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timestamp as Timestamp\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.models import ComposerModel as ComposerModel\nfrom composer.trainer import Trainer as Trainer\n\n__version__ = '0.7.0'\n", "path": "composer/__init__.py"}, {"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n# disabling general type issues because of monkeypatching\n#yright: reportGeneralTypeIssues=none\n\n\"\"\"Fixtures available in doctests.\n\nThe script is run before any doctests are executed,\nso all imports and variables are available in any doctest.\nThe output of this setup script does not show up in the documentation.\n\"\"\"\nimport os\nimport sys\nimport tempfile\nfrom typing import Any\nfrom typing import Callable as Callable\n\nimport numpy as np\nimport torch\nimport torch.optim\nimport torch.utils.data\nfrom PIL import Image\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nimport composer\nimport composer.loggers\nimport composer.loggers.object_store_logger\nimport composer.trainer\nimport composer.trainer.trainer\nimport composer.utils\nimport composer.utils.checkpoint\nimport composer.utils.file_helpers\nfrom composer import Trainer\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Evaluator as Evaluator\nfrom composer.core import Event as Event\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timestamp as Timestamp\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.datasets.synthetic import SyntheticBatchPairDataset\nfrom composer.loggers import InMemoryLogger as InMemoryLogger\nfrom composer.loggers import Logger as Logger\nfrom composer.loggers import LogLevel as LogLevel\nfrom composer.loggers import ObjectStoreLogger\nfrom composer.models import ComposerModel as ComposerModel\nfrom composer.optim.scheduler import ConstantScheduler\nfrom composer.utils import LibcloudObjectStore\nfrom composer.utils import ensure_tuple as ensure_tuple\n\n# Need to insert the repo root at the beginning of the path, since there may be other modules named `tests`\n# Assuming that docs generation is running from the `docs` directory\n_docs_dir = os.path.abspath('.')\n_repo_root = os.path.dirname(_docs_dir)\nif sys.path[0] != _repo_root:\n sys.path.insert(0, _repo_root)\n\nfrom tests.common import SimpleModel\n\n# Change the cwd to be the tempfile, so we don't pollute the documentation source folder\ntmpdir = tempfile.TemporaryDirectory()\ncwd = os.path.abspath('.')\nos.chdir(tmpdir.name)\n\nnum_channels = 3\nnum_classes = 10\ndata_shape = (num_channels, 5, 5)\n\nModel = SimpleModel\n\nmodel = SimpleModel(num_channels, num_classes)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\nscheduler = CosineAnnealingLR(optimizer, T_max=1)\n\ndataset = SyntheticBatchPairDataset(\n total_dataset_size=100,\n data_shape=data_shape,\n num_classes=num_classes,\n num_unique_samples_to_create=10,\n)\n\ntrain_dataset = dataset\neval_dataset = dataset\n\nbatch_size = 10\n\ntrain_dataloader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=0,\n pin_memory=False,\n drop_last=True,\n)\n\neval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n num_workers=0,\n pin_memory=False,\n drop_last=False,\n)\n\nstate = State(\n rank_zero_seed=0,\n model=model,\n run_name='run_name',\n optimizers=optimizer,\n grad_accum=1,\n dataloader=train_dataloader,\n dataloader_label='train',\n max_duration='1ep',\n precision='fp32',\n)\n\nlogger = Logger(state)\n\nengine = Engine(state, logger)\n\nimage = Image.fromarray(np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8))\n\n# error: \"randn\" is not a known member of module (reportGeneralTypeIssues)\nX_example = torch.randn(batch_size, num_channels, 32, 32) # type: ignore\n# error: \"randn\" is not a known member of module (reportGeneralTypeIssues)\nlogits = torch.randn(batch_size, num_classes) # type: ignore\n# error: \"randint\" is not a known member of module (reportGeneralTypeIssues)\ny_example = torch.randint(num_classes, (batch_size,)) # type: ignore\n\n\ndef loss_fun(output, target, reduction='none'):\n \"\"\"Dummy loss function.\"\"\"\n return torch.ones_like(target)\n\n\n# Patch Trainer __init__ function to replace arguments while preserving type\n_original_trainer_init = Trainer.__init__\n\n\ndef _new_trainer_init(self, fake_ellipses: None = None, **kwargs: Any):\n if 'model' not in kwargs:\n kwargs['model'] = model\n if 'optimizers' not in kwargs:\n kwargs['optimizers'] = torch.optim.SGD(kwargs['model'].parameters(), lr=0.01)\n if 'schedulers' not in kwargs:\n kwargs['schedulers'] = ConstantScheduler()\n if 'max_duration' not in kwargs:\n kwargs['max_duration'] = '1ep'\n if 'train_dataloader' not in kwargs:\n kwargs['train_dataloader'] = train_dataloader\n if 'eval_dataloader' not in kwargs:\n kwargs['eval_dataloader'] = eval_dataloader\n if 'progress_bar' not in kwargs:\n kwargs['progress_bar'] = False # hide tqdm logging\n if 'log_to_console' not in kwargs:\n kwargs['log_to_console'] = False # hide console logging\n _original_trainer_init(self, **kwargs)\n\n\nTrainer.__init__ = _new_trainer_init\n\n\n# Do not attempt to validate cloud credentials\ndef _do_not_validate(*args, **kwargs) -> None:\n pass\n\n\ncomposer.loggers.object_store_logger._validate_credentials = _do_not_validate # type: ignore\n\n# Patch ObjectStoreLogger __init__ function to replace arguments while preserving type\n_original_objectStoreLogger_init = ObjectStoreLogger.__init__\n\n\ndef _new_objectStoreLogger_init(self, fake_ellipses: None = None, **kwargs: Any):\n os.makedirs('./object_store', exist_ok=True)\n kwargs.update(use_procs=False,\n num_concurrent_uploads=1,\n object_store_cls=LibcloudObjectStore,\n object_store_kwargs={\n 'provider': 'local',\n 'container': '.',\n 'provider_kwargs': {\n 'key': os.path.abspath('./object_store'),\n },\n })\n _original_objectStoreLogger_init(self, **kwargs)\n\n\nObjectStoreLogger.__init__ = _new_objectStoreLogger_init # type: ignore\n\n# Patch ObjectStore __init__ function to replace arguments while preserving type\n_original_libcloudObjectStore_init = LibcloudObjectStore.__init__\n\n\ndef _new_libcloudObjectStore_init(self, fake_ellipses: None = None, **kwargs: Any):\n os.makedirs('./object_store', exist_ok=True)\n kwargs.update(\n provider='local',\n container='.',\n provider_kwargs={\n 'key': os.path.abspath('./object_store'),\n },\n )\n _original_libcloudObjectStore_init(self, **kwargs)\n\n\nLibcloudObjectStore.__init__ = _new_libcloudObjectStore_init # type: ignore\n", "path": "docs/source/doctest_fixtures.py"}], "after_files": [{"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Cleanup script that is executed at the end of each doctest.\"\"\"\n\nimport os\nimport shutil\n\n# variables are defined in doctest_fixtures.py\n# pyright: reportUndefinedVariable=none\n\n# tmpdir and cwd were defined in doctest_fixtures.py\n\nos.chdir(cwd)\n\ntry:\n shutil.rmtree(tmpdir)\nexcept OSError:\n pass\n", "path": "docs/source/doctest_cleanup.py"}, {"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Composer.\"\"\"\n\nfrom composer.core import Algorithm, Callback, DataSpec, Engine, Evaluator, Event, State, Time, Timestamp, TimeUnit\nfrom composer.loggers import Logger\nfrom composer.models import ComposerModel\nfrom composer.trainer import Trainer\n\n__all__ = [\n 'Algorithm',\n 'Callback',\n 'DataSpec',\n 'Engine',\n 'Evaluator',\n 'Event',\n 'State',\n 'Time',\n 'Timestamp',\n 'TimeUnit',\n 'Logger',\n 'ComposerModel',\n 'Trainer',\n]\n\n__version__ = '0.7.0'\n", "path": "composer/__init__.py"}, {"content": "# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\n# disabling general type issues because of monkeypatching\n#yright: reportGeneralTypeIssues=none\n\n\"\"\"Fixtures available in doctests.\n\nThe script is run before any doctests are executed,\nso all imports and variables are available in any doctest.\nThe output of this setup script does not show up in the documentation.\n\"\"\"\nimport os\nimport sys\nimport tempfile\nfrom typing import Any\nfrom typing import Callable as Callable\n\nimport numpy as np\nimport torch\nimport torch.optim\nimport torch.utils.data\nfrom PIL import Image\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nimport composer\nimport composer.loggers\nimport composer.loggers.object_store_logger\nimport composer.trainer\nimport composer.trainer.trainer\nimport composer.utils\nimport composer.utils.checkpoint\nimport composer.utils.file_helpers\nfrom composer import Trainer\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Evaluator as Evaluator\nfrom composer.core import Event as Event\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timestamp as Timestamp\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.datasets.synthetic import SyntheticBatchPairDataset\nfrom composer.loggers import InMemoryLogger as InMemoryLogger\nfrom composer.loggers import Logger as Logger\nfrom composer.loggers import LogLevel as LogLevel\nfrom composer.loggers import ObjectStoreLogger\nfrom composer.models import ComposerModel as ComposerModel\nfrom composer.optim.scheduler import ConstantScheduler\nfrom composer.utils import LibcloudObjectStore\nfrom composer.utils import ensure_tuple as ensure_tuple\n\n# Need to insert the repo root at the beginning of the path, since there may be other modules named `tests`\n# Assuming that docs generation is running from the `docs` directory\n_docs_dir = os.path.abspath('.')\n_repo_root = os.path.dirname(_docs_dir)\nif sys.path[0] != _repo_root:\n sys.path.insert(0, _repo_root)\n\nfrom tests.common import SimpleModel\n\n# Change the cwd to be the tempfile, so we don't pollute the documentation source folder\ntmpdir = tempfile.mkdtemp()\ncwd = os.path.abspath('.')\nos.chdir(tmpdir)\n\nnum_channels = 3\nnum_classes = 10\ndata_shape = (num_channels, 5, 5)\n\nModel = SimpleModel\n\nmodel = SimpleModel(num_channels, num_classes)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\nscheduler = CosineAnnealingLR(optimizer, T_max=1)\n\ndataset = SyntheticBatchPairDataset(\n total_dataset_size=100,\n data_shape=data_shape,\n num_classes=num_classes,\n num_unique_samples_to_create=10,\n)\n\ntrain_dataset = dataset\neval_dataset = dataset\n\nbatch_size = 10\n\ntrain_dataloader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=0,\n pin_memory=False,\n drop_last=True,\n)\n\neval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n num_workers=0,\n pin_memory=False,\n drop_last=False,\n)\n\nstate = State(\n rank_zero_seed=0,\n model=model,\n run_name='run_name',\n optimizers=optimizer,\n grad_accum=1,\n dataloader=train_dataloader,\n dataloader_label='train',\n max_duration='1ep',\n precision='fp32',\n)\n\nlogger = Logger(state)\n\nengine = Engine(state, logger)\n\nimage = Image.fromarray(np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8))\n\n# error: \"randn\" is not a known member of module (reportGeneralTypeIssues)\nX_example = torch.randn(batch_size, num_channels, 32, 32) # type: ignore\n# error: \"randn\" is not a known member of module (reportGeneralTypeIssues)\nlogits = torch.randn(batch_size, num_classes) # type: ignore\n# error: \"randint\" is not a known member of module (reportGeneralTypeIssues)\ny_example = torch.randint(num_classes, (batch_size,)) # type: ignore\n\n\ndef loss_fun(output, target, reduction='none'):\n \"\"\"Dummy loss function.\"\"\"\n return torch.ones_like(target)\n\n\n# Patch Trainer __init__ function to replace arguments while preserving type\n_original_trainer_init = Trainer.__init__\n\n\ndef _new_trainer_init(self, fake_ellipses: None = None, **kwargs: Any):\n if 'model' not in kwargs:\n kwargs['model'] = model\n if 'optimizers' not in kwargs:\n kwargs['optimizers'] = torch.optim.SGD(kwargs['model'].parameters(), lr=0.01)\n if 'schedulers' not in kwargs:\n kwargs['schedulers'] = ConstantScheduler()\n if 'max_duration' not in kwargs:\n kwargs['max_duration'] = '1ep'\n if 'train_dataloader' not in kwargs:\n kwargs['train_dataloader'] = train_dataloader\n if 'eval_dataloader' not in kwargs:\n kwargs['eval_dataloader'] = eval_dataloader\n if 'progress_bar' not in kwargs:\n kwargs['progress_bar'] = False # hide tqdm logging\n if 'log_to_console' not in kwargs:\n kwargs['log_to_console'] = False # hide console logging\n _original_trainer_init(self, **kwargs)\n\n\nTrainer.__init__ = _new_trainer_init\n\n\n# Do not attempt to validate cloud credentials\ndef _do_not_validate(*args, **kwargs) -> None:\n pass\n\n\ncomposer.loggers.object_store_logger._validate_credentials = _do_not_validate # type: ignore\n\n# Patch ObjectStoreLogger __init__ function to replace arguments while preserving type\n_original_objectStoreLogger_init = ObjectStoreLogger.__init__\n\n\ndef _new_objectStoreLogger_init(self, fake_ellipses: None = None, **kwargs: Any):\n os.makedirs('./object_store', exist_ok=True)\n kwargs.update(use_procs=False,\n num_concurrent_uploads=1,\n object_store_cls=LibcloudObjectStore,\n object_store_kwargs={\n 'provider': 'local',\n 'container': '.',\n 'provider_kwargs': {\n 'key': os.path.abspath('./object_store'),\n },\n })\n _original_objectStoreLogger_init(self, **kwargs)\n\n\nObjectStoreLogger.__init__ = _new_objectStoreLogger_init # type: ignore\n\n# Patch ObjectStore __init__ function to replace arguments while preserving type\n_original_libcloudObjectStore_init = LibcloudObjectStore.__init__\n\n\ndef _new_libcloudObjectStore_init(self, fake_ellipses: None = None, **kwargs: Any):\n os.makedirs('./object_store', exist_ok=True)\n kwargs.update(\n provider='local',\n container='.',\n provider_kwargs={\n 'key': os.path.abspath('./object_store'),\n },\n )\n _original_libcloudObjectStore_init(self, **kwargs)\n\n\nLibcloudObjectStore.__init__ = _new_libcloudObjectStore_init # type: ignore\n", "path": "docs/source/doctest_fixtures.py"}]}
3,224
637
gh_patches_debug_2642
rasdani/github-patches
git_diff
sunpy__sunpy-3676
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Removing astropy_helpers section in CONTRIBUTING.rst <!-- This comments are hidden when you submit the issue so you do not need to remove them! Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst Please be sure to check out our code of conduct: https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst --> <!-- Please have a search on our GitHub repository to see if a similar issue has already been posted. If a similar issue is closed, have a quick look to see if you are satisfied by the resolution. If not please go ahead and open an issue! --> ### Description <!-- Provide a general description of the bug. --> As of PR https://github.com/sunpy/sunpy/pull/3598, sunpy no longer needs `astropy_helpers`, and even it is removed from the package. I think there should not be a section of Astropy Helpers in contribution guidelines as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/version.py` Content: ``` 1 # This file is for compatibility with astropy_helpers 2 version = 'unknown.dev' 3 try: 4 from importlib_metadata import version as _version, PackageNotFoundError 5 version = _version('sunpy') 6 except ImportError: 7 from pkg_resources import get_distribution, DistributionNotFound 8 try: 9 version = get_distribution("sunpy").version 10 except DistributionNotFound: 11 pass 12 except PackageNotFoundError: 13 pass 14 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sunpy/version.py b/sunpy/version.py deleted file mode 100644 --- a/sunpy/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file is for compatibility with astropy_helpers -version = 'unknown.dev' -try: - from importlib_metadata import version as _version, PackageNotFoundError - version = _version('sunpy') -except ImportError: - from pkg_resources import get_distribution, DistributionNotFound - try: - version = get_distribution("sunpy").version - except DistributionNotFound: - pass -except PackageNotFoundError: - pass
{"golden_diff": "diff --git a/sunpy/version.py b/sunpy/version.py\ndeleted file mode 100644\n--- a/sunpy/version.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-# This file is for compatibility with astropy_helpers\n-version = 'unknown.dev'\n-try:\n- from importlib_metadata import version as _version, PackageNotFoundError\n- version = _version('sunpy')\n-except ImportError:\n- from pkg_resources import get_distribution, DistributionNotFound\n- try:\n- version = get_distribution(\"sunpy\").version\n- except DistributionNotFound:\n- pass\n-except PackageNotFoundError:\n- pass\n", "issue": "Removing astropy_helpers section in CONTRIBUTING.rst\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nAs of PR https://github.com/sunpy/sunpy/pull/3598, sunpy no longer needs `astropy_helpers`, and even it is removed from the package.\r\nI think there should not be a section of Astropy Helpers in contribution guidelines as well.\n", "before_files": [{"content": "# This file is for compatibility with astropy_helpers\nversion = 'unknown.dev'\ntry:\n from importlib_metadata import version as _version, PackageNotFoundError\n version = _version('sunpy')\nexcept ImportError:\n from pkg_resources import get_distribution, DistributionNotFound\n try:\n version = get_distribution(\"sunpy\").version\n except DistributionNotFound:\n pass\nexcept PackageNotFoundError:\n pass\n", "path": "sunpy/version.py"}], "after_files": [{"content": null, "path": "sunpy/version.py"}]}
576
148
gh_patches_debug_6051
rasdani/github-patches
git_diff
coala__coala-3888
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aspects/meta.py: Typo error <!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed. Chat: https://coala.io/chat Issue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues Documentation: https://docs.coala.io --> Replace `int` -> `in` in `search for tastes int the sub-aspectclass` difficulty/newcomer --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `coalib/bearlib/aspects/meta.py` Content: ``` 1 from inspect import getmembers, signature 2 3 from coala_utils.decorators import generate_repr 4 5 from .base import aspectbase 6 from .docs import Documentation 7 from .taste import Taste 8 9 10 class aspectclass(type): 11 """ 12 Metaclass for aspectclasses. 13 14 Root aspectclass is :class:`coalib.bearlib.aspectclasses.Root`. 15 """ 16 def __init__(cls, clsname, bases, clsattrs): 17 """ 18 Initializes the ``.subaspects`` dict on new aspectclasses. 19 """ 20 cls.subaspects = {} 21 22 @property 23 def tastes(cls): 24 """ 25 Get a dictionary of all taste names mapped to their 26 :class:`coalib.bearlib.aspectclasses.Taste` instances. 27 """ 28 if cls.parent: 29 return dict(cls.parent.tastes, **cls._tastes) 30 31 return dict(cls._tastes) 32 33 def subaspect(cls, subcls): 34 """ 35 The sub-aspectclass decorator. 36 37 See :class:`coalib.bearlib.aspectclasses.Root` for description 38 and usage. 39 """ 40 aspectname = subcls.__name__ 41 42 docs = getattr(subcls, 'docs', None) 43 aspectdocs = Documentation(subcls.__doc__, **{ 44 attr: getattr(docs, attr, '') for attr in 45 list(signature(Documentation).parameters.keys())[1:]}) 46 47 # search for tastes int the sub-aspectclass 48 subtastes = {} 49 for name, member in getmembers(subcls): 50 if isinstance(member, Taste): 51 # tell the taste its own name 52 member.name = name 53 subtastes[name] = member 54 55 class Sub(subcls, aspectbase, metaclass=aspectclass): 56 __module__ = subcls.__module__ 57 58 parent = cls 59 60 docs = aspectdocs 61 _tastes = subtastes 62 63 members = sorted(Sub.tastes) 64 if members: 65 Sub = generate_repr(*members)(Sub) 66 67 Sub.__name__ = aspectname 68 Sub.__qualname__ = '%s.%s' % (cls.__qualname__, aspectname) 69 cls.subaspects[aspectname] = Sub 70 setattr(cls, aspectname, Sub) 71 return Sub 72 73 def __repr__(cls): 74 return '<%s %s>' % (type(cls).__name__, repr(cls.__qualname__)) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/coalib/bearlib/aspects/meta.py b/coalib/bearlib/aspects/meta.py --- a/coalib/bearlib/aspects/meta.py +++ b/coalib/bearlib/aspects/meta.py @@ -44,7 +44,7 @@ attr: getattr(docs, attr, '') for attr in list(signature(Documentation).parameters.keys())[1:]}) - # search for tastes int the sub-aspectclass + # search for tastes in the sub-aspectclass subtastes = {} for name, member in getmembers(subcls): if isinstance(member, Taste):
{"golden_diff": "diff --git a/coalib/bearlib/aspects/meta.py b/coalib/bearlib/aspects/meta.py\n--- a/coalib/bearlib/aspects/meta.py\n+++ b/coalib/bearlib/aspects/meta.py\n@@ -44,7 +44,7 @@\n attr: getattr(docs, attr, '') for attr in\n list(signature(Documentation).parameters.keys())[1:]})\n \n- # search for tastes int the sub-aspectclass\n+ # search for tastes in the sub-aspectclass\n subtastes = {}\n for name, member in getmembers(subcls):\n if isinstance(member, Taste):\n", "issue": "aspects/meta.py: Typo error\n<!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed.\r\n\r\nChat: https://coala.io/chat\r\nIssue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues\r\nDocumentation: https://docs.coala.io\r\n-->\r\nReplace `int` -> `in` in `search for tastes int the sub-aspectclass`\r\n\r\ndifficulty/newcomer\n", "before_files": [{"content": "from inspect import getmembers, signature\n\nfrom coala_utils.decorators import generate_repr\n\nfrom .base import aspectbase\nfrom .docs import Documentation\nfrom .taste import Taste\n\n\nclass aspectclass(type):\n \"\"\"\n Metaclass for aspectclasses.\n\n Root aspectclass is :class:`coalib.bearlib.aspectclasses.Root`.\n \"\"\"\n def __init__(cls, clsname, bases, clsattrs):\n \"\"\"\n Initializes the ``.subaspects`` dict on new aspectclasses.\n \"\"\"\n cls.subaspects = {}\n\n @property\n def tastes(cls):\n \"\"\"\n Get a dictionary of all taste names mapped to their\n :class:`coalib.bearlib.aspectclasses.Taste` instances.\n \"\"\"\n if cls.parent:\n return dict(cls.parent.tastes, **cls._tastes)\n\n return dict(cls._tastes)\n\n def subaspect(cls, subcls):\n \"\"\"\n The sub-aspectclass decorator.\n\n See :class:`coalib.bearlib.aspectclasses.Root` for description\n and usage.\n \"\"\"\n aspectname = subcls.__name__\n\n docs = getattr(subcls, 'docs', None)\n aspectdocs = Documentation(subcls.__doc__, **{\n attr: getattr(docs, attr, '') for attr in\n list(signature(Documentation).parameters.keys())[1:]})\n\n # search for tastes int the sub-aspectclass\n subtastes = {}\n for name, member in getmembers(subcls):\n if isinstance(member, Taste):\n # tell the taste its own name\n member.name = name\n subtastes[name] = member\n\n class Sub(subcls, aspectbase, metaclass=aspectclass):\n __module__ = subcls.__module__\n\n parent = cls\n\n docs = aspectdocs\n _tastes = subtastes\n\n members = sorted(Sub.tastes)\n if members:\n Sub = generate_repr(*members)(Sub)\n\n Sub.__name__ = aspectname\n Sub.__qualname__ = '%s.%s' % (cls.__qualname__, aspectname)\n cls.subaspects[aspectname] = Sub\n setattr(cls, aspectname, Sub)\n return Sub\n\n def __repr__(cls):\n return '<%s %s>' % (type(cls).__name__, repr(cls.__qualname__))\n", "path": "coalib/bearlib/aspects/meta.py"}], "after_files": [{"content": "from inspect import getmembers, signature\n\nfrom coala_utils.decorators import generate_repr\n\nfrom .base import aspectbase\nfrom .docs import Documentation\nfrom .taste import Taste\n\n\nclass aspectclass(type):\n \"\"\"\n Metaclass for aspectclasses.\n\n Root aspectclass is :class:`coalib.bearlib.aspectclasses.Root`.\n \"\"\"\n def __init__(cls, clsname, bases, clsattrs):\n \"\"\"\n Initializes the ``.subaspects`` dict on new aspectclasses.\n \"\"\"\n cls.subaspects = {}\n\n @property\n def tastes(cls):\n \"\"\"\n Get a dictionary of all taste names mapped to their\n :class:`coalib.bearlib.aspectclasses.Taste` instances.\n \"\"\"\n if cls.parent:\n return dict(cls.parent.tastes, **cls._tastes)\n\n return dict(cls._tastes)\n\n def subaspect(cls, subcls):\n \"\"\"\n The sub-aspectclass decorator.\n\n See :class:`coalib.bearlib.aspectclasses.Root` for description\n and usage.\n \"\"\"\n aspectname = subcls.__name__\n\n docs = getattr(subcls, 'docs', None)\n aspectdocs = Documentation(subcls.__doc__, **{\n attr: getattr(docs, attr, '') for attr in\n list(signature(Documentation).parameters.keys())[1:]})\n\n # search for tastes in the sub-aspectclass\n subtastes = {}\n for name, member in getmembers(subcls):\n if isinstance(member, Taste):\n # tell the taste its own name\n member.name = name\n subtastes[name] = member\n\n class Sub(subcls, aspectbase, metaclass=aspectclass):\n __module__ = subcls.__module__\n\n parent = cls\n\n docs = aspectdocs\n _tastes = subtastes\n\n members = sorted(Sub.tastes)\n if members:\n Sub = generate_repr(*members)(Sub)\n\n Sub.__name__ = aspectname\n Sub.__qualname__ = '%s.%s' % (cls.__qualname__, aspectname)\n cls.subaspects[aspectname] = Sub\n setattr(cls, aspectname, Sub)\n return Sub\n\n def __repr__(cls):\n return '<%s %s>' % (type(cls).__name__, repr(cls.__qualname__))\n", "path": "coalib/bearlib/aspects/meta.py"}]}
1,075
139
gh_patches_debug_19130
rasdani/github-patches
git_diff
DataDog__dd-trace-py-3840
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Starlette/Fastapi: endpoint duration includes the duration of background tasks ### Which version of dd-trace-py are you using? ddtrace==0.55.4 ### Which version of pip are you using? 21.2.4 ### Which version of the libraries are you using? fastapi==0.68.2 starlette==0.14.2 ### How can we reproduce your problem? this would be a minimal proof of concept `app.py`, running through `ddtrace-run uvicorn app:app` ``` import asyncio from ddtrace import tracer from fastapi import FastAPI, BackgroundTasks app = FastAPI() async def some_background_task(): with tracer.start_span("some_background_task", activate=True): tracer.context_provider.activate(None) await asyncio.sleep(10) @app.get("/") async def main(background_tasks: BackgroundTasks): background_tasks.add_task(some_background_task) return "ok" ``` ### What is the result that you get? The duration of `/` is reported to be 10s, while the browser immediately receives the response. `some_background_task` is also reported with a duration of 10s. ### What is the result that you expected? I would expect that the reported endpoint duration matches the time it took to get the response, and that the background task is reported separately. Please don't mind that `tracer.context_provider.activate(None)` might be redundant here, adding it here to show what I have tried. FastAPI's `add_task` actually comes from starlette https://www.starlette.io/background/ I can understand why the endpoint duration includes the background task, this is the definition of starlette's `Response.__call__`: https://github.com/encode/starlette/blob/ada99beee530e7b841ce320bc6e66f6dbd9ad781/starlette/responses.py#L159 ``` async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) await send({"type": "http.response.body", "body": self.body}) if self.background is not None: await self.background() ``` The response header and body is sent, but the function itself is not finished until all background tasks have been processed. I believe that this is not what users of ddtrace would expect: the background tasks are used to return a response early without waiting for background operations to finish ; the reported endpoint duration should correspond to when the body was sent --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/asgi/middleware.py` Content: ``` 1 import sys 2 from typing import TYPE_CHECKING 3 4 import ddtrace 5 from ddtrace import config 6 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY 7 from ddtrace.ext import SpanTypes 8 from ddtrace.ext import http 9 10 from .. import trace_utils 11 from ...internal.compat import reraise 12 from ...internal.logger import get_logger 13 from .utils import guarantee_single_callable 14 15 16 if TYPE_CHECKING: 17 from typing import Any 18 from typing import Mapping 19 from typing import Optional 20 21 from ddtrace import Span 22 23 24 log = get_logger(__name__) 25 26 config._add( 27 "asgi", 28 dict(service_name=config._get_service(default="asgi"), request_span_name="asgi.request", distributed_tracing=True), 29 ) 30 31 ASGI_VERSION = "asgi.version" 32 ASGI_SPEC_VERSION = "asgi.spec_version" 33 34 35 def bytes_to_str(str_or_bytes): 36 return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes 37 38 39 def _extract_versions_from_scope(scope, integration_config): 40 tags = {} 41 42 http_version = scope.get("http_version") 43 if http_version: 44 tags[http.VERSION] = http_version 45 46 scope_asgi = scope.get("asgi") 47 48 if scope_asgi and "version" in scope_asgi: 49 tags[ASGI_VERSION] = scope_asgi["version"] 50 51 if scope_asgi and "spec_version" in scope_asgi: 52 tags[ASGI_SPEC_VERSION] = scope_asgi["spec_version"] 53 54 return tags 55 56 57 def _extract_headers(scope): 58 headers = scope.get("headers") 59 if headers: 60 # headers: (Iterable[[byte string, byte string]]) 61 return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers) 62 return {} 63 64 65 def _default_handle_exception_span(exc, span): 66 """Default handler for exception for span""" 67 span.set_tag(http.STATUS_CODE, 500) 68 69 70 def span_from_scope(scope): 71 # type: (Mapping[str, Any]) -> Optional[Span] 72 """Retrieve the top-level ASGI span from the scope.""" 73 return scope.get("datadog", {}).get("request_spans", [None])[0] 74 75 76 class TraceMiddleware: 77 """ 78 ASGI application middleware that traces the requests. 79 Args: 80 app: The ASGI application. 81 tracer: Custom tracer. Defaults to the global tracer. 82 """ 83 84 def __init__( 85 self, 86 app, 87 tracer=None, 88 integration_config=config.asgi, 89 handle_exception_span=_default_handle_exception_span, 90 span_modifier=None, 91 ): 92 self.app = guarantee_single_callable(app) 93 self.tracer = tracer or ddtrace.tracer 94 self.integration_config = integration_config 95 self.handle_exception_span = handle_exception_span 96 self.span_modifier = span_modifier 97 98 async def __call__(self, scope, receive, send): 99 if scope["type"] != "http": 100 return await self.app(scope, receive, send) 101 102 try: 103 headers = _extract_headers(scope) 104 except Exception: 105 log.warning("failed to decode headers for distributed tracing", exc_info=True) 106 headers = {} 107 else: 108 trace_utils.activate_distributed_headers( 109 self.tracer, int_config=self.integration_config, request_headers=headers 110 ) 111 112 resource = "{} {}".format(scope["method"], scope["path"]) 113 114 span = self.tracer.trace( 115 name=self.integration_config.get("request_span_name", "asgi.request"), 116 service=trace_utils.int_service(None, self.integration_config), 117 resource=resource, 118 span_type=SpanTypes.WEB, 119 ) 120 121 if "datadog" not in scope: 122 scope["datadog"] = {"request_spans": [span]} 123 else: 124 scope["datadog"]["request_spans"].append(span) 125 126 if self.span_modifier: 127 self.span_modifier(span, scope) 128 129 sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True) 130 if sample_rate is not None: 131 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) 132 133 method = scope.get("method") 134 server = scope.get("server") 135 if server and len(server) == 2: 136 port = server[1] 137 server_host = server[0] + (":" + str(port) if port is not None and port != 80 else "") 138 full_path = scope.get("root_path", "") + scope.get("path", "") 139 url = scope.get("scheme", "http") + "://" + server_host + full_path 140 else: 141 url = None 142 143 if self.integration_config.trace_query_string: 144 query_string = scope.get("query_string") 145 if len(query_string) > 0: 146 query_string = bytes_to_str(query_string) 147 else: 148 query_string = None 149 150 trace_utils.set_http_meta( 151 span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers 152 ) 153 154 tags = _extract_versions_from_scope(scope, self.integration_config) 155 span.set_tags(tags) 156 157 async def wrapped_send(message): 158 if span and message.get("type") == "http.response.start" and "status" in message: 159 status_code = message["status"] 160 else: 161 status_code = None 162 163 if "headers" in message: 164 response_headers = message["headers"] 165 else: 166 response_headers = None 167 168 trace_utils.set_http_meta( 169 span, self.integration_config, status_code=status_code, response_headers=response_headers 170 ) 171 172 return await send(message) 173 174 try: 175 return await self.app(scope, receive, wrapped_send) 176 except Exception as exc: 177 (exc_type, exc_val, exc_tb) = sys.exc_info() 178 span.set_exc_info(exc_type, exc_val, exc_tb) 179 self.handle_exception_span(exc, span) 180 reraise(exc_type, exc_val, exc_tb) 181 finally: 182 try: 183 del scope["datadog"]["request_span"] 184 except KeyError: 185 pass 186 span.finish() 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py --- a/ddtrace/contrib/asgi/middleware.py +++ b/ddtrace/contrib/asgi/middleware.py @@ -169,7 +169,13 @@ span, self.integration_config, status_code=status_code, response_headers=response_headers ) - return await send(message) + try: + return await send(message) + finally: + # Per asgi spec, "more_body" is used if there is still data to send + # Close the span if "http.response.body" has no more data left to send in the response. + if message.get("type") == "http.response.body" and not message.get("more_body", False): + span.finish() try: return await self.app(scope, receive, wrapped_send) @@ -183,4 +189,5 @@ del scope["datadog"]["request_span"] except KeyError: pass + span.finish()
{"golden_diff": "diff --git a/ddtrace/contrib/asgi/middleware.py b/ddtrace/contrib/asgi/middleware.py\n--- a/ddtrace/contrib/asgi/middleware.py\n+++ b/ddtrace/contrib/asgi/middleware.py\n@@ -169,7 +169,13 @@\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n \n- return await send(message)\n+ try:\n+ return await send(message)\n+ finally:\n+ # Per asgi spec, \"more_body\" is used if there is still data to send\n+ # Close the span if \"http.response.body\" has no more data left to send in the response.\n+ if message.get(\"type\") == \"http.response.body\" and not message.get(\"more_body\", False):\n+ span.finish()\n \n try:\n return await self.app(scope, receive, wrapped_send)\n@@ -183,4 +189,5 @@\n del scope[\"datadog\"][\"request_span\"]\n except KeyError:\n pass\n+\n span.finish()\n", "issue": "Starlette/Fastapi: endpoint duration includes the duration of background tasks\n### Which version of dd-trace-py are you using?\r\n\r\nddtrace==0.55.4\r\n\r\n### Which version of pip are you using?\r\n\r\n21.2.4\r\n\r\n\r\n### Which version of the libraries are you using?\r\n\r\nfastapi==0.68.2\r\nstarlette==0.14.2\r\n\r\n### How can we reproduce your problem?\r\n\r\nthis would be a minimal proof of concept `app.py`, running through `ddtrace-run uvicorn app:app`\r\n\r\n```\r\nimport asyncio\r\n\r\nfrom ddtrace import tracer\r\nfrom fastapi import FastAPI, BackgroundTasks\r\n\r\napp = FastAPI()\r\n\r\n\r\nasync def some_background_task():\r\n with tracer.start_span(\"some_background_task\", activate=True):\r\n tracer.context_provider.activate(None)\r\n await asyncio.sleep(10)\r\n\r\n\r\n@app.get(\"/\")\r\nasync def main(background_tasks: BackgroundTasks):\r\n background_tasks.add_task(some_background_task)\r\n return \"ok\"\r\n\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe duration of `/` is reported to be 10s, while the browser immediately receives the response.\r\n`some_background_task` is also reported with a duration of 10s.\r\n\r\n### What is the result that you expected?\r\n\r\nI would expect that the reported endpoint duration matches the time it took to get the response, and that the background task is reported separately. Please don't mind that `tracer.context_provider.activate(None)` might be redundant here, adding it here to show what I have tried.\r\n\r\nFastAPI's `add_task` actually comes from starlette https://www.starlette.io/background/\r\n\r\nI can understand why the endpoint duration includes the background task, this is the definition of starlette's `Response.__call__`:\r\n\r\nhttps://github.com/encode/starlette/blob/ada99beee530e7b841ce320bc6e66f6dbd9ad781/starlette/responses.py#L159\r\n```\r\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\r\n await send(\r\n {\r\n \"type\": \"http.response.start\",\r\n \"status\": self.status_code,\r\n \"headers\": self.raw_headers,\r\n }\r\n )\r\n await send({\"type\": \"http.response.body\", \"body\": self.body})\r\n\r\n if self.background is not None:\r\n await self.background()\r\n```\r\n\r\nThe response header and body is sent, but the function itself is not finished until all background tasks have been processed.\r\n\r\nI believe that this is not what users of ddtrace would expect: the background tasks are used to return a response early without waiting for background operations to finish ; the reported endpoint duration should correspond to when the body was sent\r\n\n", "before_files": [{"content": "import sys\nfrom typing import TYPE_CHECKING\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import http\n\nfrom .. import trace_utils\nfrom ...internal.compat import reraise\nfrom ...internal.logger import get_logger\nfrom .utils import guarantee_single_callable\n\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import Mapping\n from typing import Optional\n\n from ddtrace import Span\n\n\nlog = get_logger(__name__)\n\nconfig._add(\n \"asgi\",\n dict(service_name=config._get_service(default=\"asgi\"), request_span_name=\"asgi.request\", distributed_tracing=True),\n)\n\nASGI_VERSION = \"asgi.version\"\nASGI_SPEC_VERSION = \"asgi.spec_version\"\n\n\ndef bytes_to_str(str_or_bytes):\n return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes\n\n\ndef _extract_versions_from_scope(scope, integration_config):\n tags = {}\n\n http_version = scope.get(\"http_version\")\n if http_version:\n tags[http.VERSION] = http_version\n\n scope_asgi = scope.get(\"asgi\")\n\n if scope_asgi and \"version\" in scope_asgi:\n tags[ASGI_VERSION] = scope_asgi[\"version\"]\n\n if scope_asgi and \"spec_version\" in scope_asgi:\n tags[ASGI_SPEC_VERSION] = scope_asgi[\"spec_version\"]\n\n return tags\n\n\ndef _extract_headers(scope):\n headers = scope.get(\"headers\")\n if headers:\n # headers: (Iterable[[byte string, byte string]])\n return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)\n return {}\n\n\ndef _default_handle_exception_span(exc, span):\n \"\"\"Default handler for exception for span\"\"\"\n span.set_tag(http.STATUS_CODE, 500)\n\n\ndef span_from_scope(scope):\n # type: (Mapping[str, Any]) -> Optional[Span]\n \"\"\"Retrieve the top-level ASGI span from the scope.\"\"\"\n return scope.get(\"datadog\", {}).get(\"request_spans\", [None])[0]\n\n\nclass TraceMiddleware:\n \"\"\"\n ASGI application middleware that traces the requests.\n Args:\n app: The ASGI application.\n tracer: Custom tracer. Defaults to the global tracer.\n \"\"\"\n\n def __init__(\n self,\n app,\n tracer=None,\n integration_config=config.asgi,\n handle_exception_span=_default_handle_exception_span,\n span_modifier=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = tracer or ddtrace.tracer\n self.integration_config = integration_config\n self.handle_exception_span = handle_exception_span\n self.span_modifier = span_modifier\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n\n try:\n headers = _extract_headers(scope)\n except Exception:\n log.warning(\"failed to decode headers for distributed tracing\", exc_info=True)\n headers = {}\n else:\n trace_utils.activate_distributed_headers(\n self.tracer, int_config=self.integration_config, request_headers=headers\n )\n\n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n\n span = self.tracer.trace(\n name=self.integration_config.get(\"request_span_name\", \"asgi.request\"),\n service=trace_utils.int_service(None, self.integration_config),\n resource=resource,\n span_type=SpanTypes.WEB,\n )\n\n if \"datadog\" not in scope:\n scope[\"datadog\"] = {\"request_spans\": [span]}\n else:\n scope[\"datadog\"][\"request_spans\"].append(span)\n\n if self.span_modifier:\n self.span_modifier(span, scope)\n\n sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = scope.get(\"method\")\n server = scope.get(\"server\")\n if server and len(server) == 2:\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port is not None and port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n else:\n url = None\n\n if self.integration_config.trace_query_string:\n query_string = scope.get(\"query_string\")\n if len(query_string) > 0:\n query_string = bytes_to_str(query_string)\n else:\n query_string = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers\n )\n\n tags = _extract_versions_from_scope(scope, self.integration_config)\n span.set_tags(tags)\n\n async def wrapped_send(message):\n if span and message.get(\"type\") == \"http.response.start\" and \"status\" in message:\n status_code = message[\"status\"]\n else:\n status_code = None\n\n if \"headers\" in message:\n response_headers = message[\"headers\"]\n else:\n response_headers = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n\n return await send(message)\n\n try:\n return await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n span.set_exc_info(exc_type, exc_val, exc_tb)\n self.handle_exception_span(exc, span)\n reraise(exc_type, exc_val, exc_tb)\n finally:\n try:\n del scope[\"datadog\"][\"request_span\"]\n except KeyError:\n pass\n span.finish()\n", "path": "ddtrace/contrib/asgi/middleware.py"}], "after_files": [{"content": "import sys\nfrom typing import TYPE_CHECKING\n\nimport ddtrace\nfrom ddtrace import config\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import http\n\nfrom .. import trace_utils\nfrom ...internal.compat import reraise\nfrom ...internal.logger import get_logger\nfrom .utils import guarantee_single_callable\n\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import Mapping\n from typing import Optional\n\n from ddtrace import Span\n\n\nlog = get_logger(__name__)\n\nconfig._add(\n \"asgi\",\n dict(service_name=config._get_service(default=\"asgi\"), request_span_name=\"asgi.request\", distributed_tracing=True),\n)\n\nASGI_VERSION = \"asgi.version\"\nASGI_SPEC_VERSION = \"asgi.spec_version\"\n\n\ndef bytes_to_str(str_or_bytes):\n return str_or_bytes.decode() if isinstance(str_or_bytes, bytes) else str_or_bytes\n\n\ndef _extract_versions_from_scope(scope, integration_config):\n tags = {}\n\n http_version = scope.get(\"http_version\")\n if http_version:\n tags[http.VERSION] = http_version\n\n scope_asgi = scope.get(\"asgi\")\n\n if scope_asgi and \"version\" in scope_asgi:\n tags[ASGI_VERSION] = scope_asgi[\"version\"]\n\n if scope_asgi and \"spec_version\" in scope_asgi:\n tags[ASGI_SPEC_VERSION] = scope_asgi[\"spec_version\"]\n\n return tags\n\n\ndef _extract_headers(scope):\n headers = scope.get(\"headers\")\n if headers:\n # headers: (Iterable[[byte string, byte string]])\n return dict((bytes_to_str(k), bytes_to_str(v)) for (k, v) in headers)\n return {}\n\n\ndef _default_handle_exception_span(exc, span):\n \"\"\"Default handler for exception for span\"\"\"\n span.set_tag(http.STATUS_CODE, 500)\n\n\ndef span_from_scope(scope):\n # type: (Mapping[str, Any]) -> Optional[Span]\n \"\"\"Retrieve the top-level ASGI span from the scope.\"\"\"\n return scope.get(\"datadog\", {}).get(\"request_spans\", [None])[0]\n\n\nclass TraceMiddleware:\n \"\"\"\n ASGI application middleware that traces the requests.\n Args:\n app: The ASGI application.\n tracer: Custom tracer. Defaults to the global tracer.\n \"\"\"\n\n def __init__(\n self,\n app,\n tracer=None,\n integration_config=config.asgi,\n handle_exception_span=_default_handle_exception_span,\n span_modifier=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = tracer or ddtrace.tracer\n self.integration_config = integration_config\n self.handle_exception_span = handle_exception_span\n self.span_modifier = span_modifier\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] != \"http\":\n return await self.app(scope, receive, send)\n\n try:\n headers = _extract_headers(scope)\n except Exception:\n log.warning(\"failed to decode headers for distributed tracing\", exc_info=True)\n headers = {}\n else:\n trace_utils.activate_distributed_headers(\n self.tracer, int_config=self.integration_config, request_headers=headers\n )\n\n resource = \"{} {}\".format(scope[\"method\"], scope[\"path\"])\n\n span = self.tracer.trace(\n name=self.integration_config.get(\"request_span_name\", \"asgi.request\"),\n service=trace_utils.int_service(None, self.integration_config),\n resource=resource,\n span_type=SpanTypes.WEB,\n )\n\n if \"datadog\" not in scope:\n scope[\"datadog\"] = {\"request_spans\": [span]}\n else:\n scope[\"datadog\"][\"request_spans\"].append(span)\n\n if self.span_modifier:\n self.span_modifier(span, scope)\n\n sample_rate = self.integration_config.get_analytics_sample_rate(use_global_config=True)\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n method = scope.get(\"method\")\n server = scope.get(\"server\")\n if server and len(server) == 2:\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port is not None and port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n else:\n url = None\n\n if self.integration_config.trace_query_string:\n query_string = scope.get(\"query_string\")\n if len(query_string) > 0:\n query_string = bytes_to_str(query_string)\n else:\n query_string = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, method=method, url=url, query=query_string, request_headers=headers\n )\n\n tags = _extract_versions_from_scope(scope, self.integration_config)\n span.set_tags(tags)\n\n async def wrapped_send(message):\n if span and message.get(\"type\") == \"http.response.start\" and \"status\" in message:\n status_code = message[\"status\"]\n else:\n status_code = None\n\n if \"headers\" in message:\n response_headers = message[\"headers\"]\n else:\n response_headers = None\n\n trace_utils.set_http_meta(\n span, self.integration_config, status_code=status_code, response_headers=response_headers\n )\n\n try:\n return await send(message)\n finally:\n # Per asgi spec, \"more_body\" is used if there is still data to send\n # Close the span if \"http.response.body\" has no more data left to send in the response.\n if message.get(\"type\") == \"http.response.body\" and not message.get(\"more_body\", False):\n span.finish()\n\n try:\n return await self.app(scope, receive, wrapped_send)\n except Exception as exc:\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n span.set_exc_info(exc_type, exc_val, exc_tb)\n self.handle_exception_span(exc, span)\n reraise(exc_type, exc_val, exc_tb)\n finally:\n try:\n del scope[\"datadog\"][\"request_span\"]\n except KeyError:\n pass\n\n span.finish()\n", "path": "ddtrace/contrib/asgi/middleware.py"}]}
2,604
232
gh_patches_debug_17311
rasdani/github-patches
git_diff
vega__altair-2900
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ga: `DeprecationWarning`: SelectableGroups dict interface is deprecated. Use select. I see the following deprecation warning in our tests: ```cmd tests/vegalite/v5/tests/test_display.py::test_check_renderer_options [51](https://github.com/altair-viz/altair/actions/runs/4210647283/jobs/7308470442#step:5:52) /home/runner/work/altair/altair/altair/utils/plugin_registry.py:124: DeprecationWarning: SelectableGroups dict interface is deprecated. Use select. [52](https://github.com/altair-viz/altair/actions/runs/4210647283/jobs/7308470442#step:5:53) more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])] ``` It is around here in the code: https://github.com/altair-viz/altair/blob/master/altair/utils/plugin_registry.py#L124 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `altair/utils/plugin_registry.py` Content: ``` 1 from typing import Any, Dict, List, Optional, Generic, TypeVar, cast 2 from types import TracebackType 3 4 try: 5 from importlib.metadata import entry_points 6 except ImportError: 7 from importlib_metadata import entry_points 8 9 from toolz import curry 10 11 12 PluginType = TypeVar("PluginType") 13 14 15 class NoSuchEntryPoint(Exception): 16 def __init__(self, group, name): 17 self.group = group 18 self.name = name 19 20 def __str__(self): 21 return f"No {self.name!r} entry point found in group {self.group!r}" 22 23 24 class PluginEnabler(object): 25 """Context manager for enabling plugins 26 27 This object lets you use enable() as a context manager to 28 temporarily enable a given plugin:: 29 30 with plugins.enable('name'): 31 do_something() # 'name' plugin temporarily enabled 32 # plugins back to original state 33 """ 34 35 def __init__(self, registry: "PluginRegistry", name: str, **options): 36 self.registry = registry # type: PluginRegistry 37 self.name = name # type: str 38 self.options = options # type: Dict[str, Any] 39 self.original_state = registry._get_state() # type: Dict[str, Any] 40 self.registry._enable(name, **options) 41 42 def __enter__(self) -> "PluginEnabler": 43 return self 44 45 def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None: 46 self.registry._set_state(self.original_state) 47 48 def __repr__(self) -> str: 49 return "{}.enable({!r})".format(self.registry.__class__.__name__, self.name) 50 51 52 class PluginRegistry(Generic[PluginType]): 53 """A registry for plugins. 54 55 This is a plugin registry that allows plugins to be loaded/registered 56 in two ways: 57 58 1. Through an explicit call to ``.register(name, value)``. 59 2. By looking for other Python packages that are installed and provide 60 a setuptools entry point group. 61 62 When you create an instance of this class, provide the name of the 63 entry point group to use:: 64 65 reg = PluginRegister('my_entrypoint_group') 66 67 """ 68 69 # this is a mapping of name to error message to allow custom error messages 70 # in case an entrypoint is not found 71 entrypoint_err_messages = {} # type: Dict[str, str] 72 73 # global settings is a key-value mapping of settings that are stored globally 74 # in the registry rather than passed to the plugins 75 _global_settings = {} # type: Dict[str, Any] 76 77 def __init__(self, entry_point_group: str = "", plugin_type: type = object): 78 """Create a PluginRegistry for a named entry point group. 79 80 Parameters 81 ========== 82 entry_point_group: str 83 The name of the entry point group. 84 plugin_type: object 85 A type that will optionally be used for runtime type checking of 86 loaded plugins using isinstance. 87 """ 88 self.entry_point_group = entry_point_group # type: str 89 self.plugin_type = plugin_type # type: Optional[type] 90 self._active = None # type: Optional[PluginType] 91 self._active_name = "" # type: str 92 self._plugins = {} # type: Dict[str, PluginType] 93 self._options = {} # type: Dict[str, Any] 94 self._global_settings = self.__class__._global_settings.copy() # type: dict 95 96 def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]: 97 """Register a plugin by name and value. 98 99 This method is used for explicit registration of a plugin and shouldn't be 100 used to manage entry point managed plugins, which are auto-loaded. 101 102 Parameters 103 ========== 104 name: str 105 The name of the plugin. 106 value: PluginType or None 107 The actual plugin object to register or None to unregister that plugin. 108 109 Returns 110 ======= 111 plugin: PluginType or None 112 The plugin that was registered or unregistered. 113 """ 114 if value is None: 115 return self._plugins.pop(name, None) 116 else: 117 assert isinstance(value, self.plugin_type) 118 self._plugins[name] = value 119 return value 120 121 def names(self) -> List[str]: 122 """List the names of the registered and entry points plugins.""" 123 exts = list(self._plugins.keys()) 124 more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])] 125 exts.extend(more_exts) 126 return sorted(set(exts)) 127 128 def _get_state(self) -> Dict[str, Any]: 129 """Return a dictionary representing the current state of the registry""" 130 return { 131 "_active": self._active, 132 "_active_name": self._active_name, 133 "_plugins": self._plugins.copy(), 134 "_options": self._options.copy(), 135 "_global_settings": self._global_settings.copy(), 136 } 137 138 def _set_state(self, state: Dict[str, Any]) -> None: 139 """Reset the state of the registry""" 140 assert set(state.keys()) == { 141 "_active", 142 "_active_name", 143 "_plugins", 144 "_options", 145 "_global_settings", 146 } 147 for key, val in state.items(): 148 setattr(self, key, val) 149 150 def _enable(self, name: str, **options) -> None: 151 if name not in self._plugins: 152 try: 153 (ep,) = [ 154 ep 155 for ep in entry_points().get(self.entry_point_group, []) 156 if ep.name == name 157 ] 158 except ValueError: 159 if name in self.entrypoint_err_messages: 160 raise ValueError(self.entrypoint_err_messages[name]) 161 else: 162 raise NoSuchEntryPoint(self.entry_point_group, name) 163 value = cast(PluginType, ep.load()) 164 self.register(name, value) 165 self._active_name = name 166 self._active = self._plugins[name] 167 for key in set(options.keys()) & set(self._global_settings.keys()): 168 self._global_settings[key] = options.pop(key) 169 self._options = options 170 171 def enable(self, name: Optional[str] = None, **options) -> PluginEnabler: 172 """Enable a plugin by name. 173 174 This can be either called directly, or used as a context manager. 175 176 Parameters 177 ---------- 178 name : string (optional) 179 The name of the plugin to enable. If not specified, then use the 180 current active name. 181 **options : 182 Any additional parameters will be passed to the plugin as keyword 183 arguments 184 185 Returns 186 ------- 187 PluginEnabler: 188 An object that allows enable() to be used as a context manager 189 """ 190 if name is None: 191 name = self.active 192 return PluginEnabler(self, name, **options) 193 194 @property 195 def active(self) -> str: 196 """Return the name of the currently active plugin""" 197 return self._active_name 198 199 @property 200 def options(self) -> Dict[str, Any]: 201 """Return the current options dictionary""" 202 return self._options 203 204 def get(self) -> Optional[PluginType]: 205 """Return the currently active plugin.""" 206 if self._options: 207 return curry(self._active, **self._options) 208 else: 209 return self._active 210 211 def __repr__(self) -> str: 212 return "{}(active={!r}, registered={!r})" "".format( 213 self.__class__.__name__, self._active_name, list(self.names()) 214 ) 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py --- a/altair/utils/plugin_registry.py +++ b/altair/utils/plugin_registry.py @@ -1,3 +1,4 @@ +import sys from typing import Any, Dict, List, Optional, Generic, TypeVar, cast from types import TracebackType @@ -121,7 +122,11 @@ def names(self) -> List[str]: """List the names of the registered and entry points plugins.""" exts = list(self._plugins.keys()) - more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])] + if sys.version_info.major == 3 and sys.version_info.minor < 10: + e_points = entry_points().get(self.entry_point_group, []) + else: + e_points = entry_points(group=self.entry_point_group) + more_exts = [ep.name for ep in e_points] exts.extend(more_exts) return sorted(set(exts))
{"golden_diff": "diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py\n--- a/altair/utils/plugin_registry.py\n+++ b/altair/utils/plugin_registry.py\n@@ -1,3 +1,4 @@\n+import sys\n from typing import Any, Dict, List, Optional, Generic, TypeVar, cast\n from types import TracebackType\n \n@@ -121,7 +122,11 @@\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n- more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])]\n+ if sys.version_info.major == 3 and sys.version_info.minor < 10:\n+ e_points = entry_points().get(self.entry_point_group, [])\n+ else:\n+ e_points = entry_points(group=self.entry_point_group)\n+ more_exts = [ep.name for ep in e_points]\n exts.extend(more_exts)\n return sorted(set(exts))\n", "issue": "ga: `DeprecationWarning`: SelectableGroups dict interface is deprecated. Use select.\nI see the following deprecation warning in our tests:\r\n```cmd\r\ntests/vegalite/v5/tests/test_display.py::test_check_renderer_options\r\n[51](https://github.com/altair-viz/altair/actions/runs/4210647283/jobs/7308470442#step:5:52)\r\n /home/runner/work/altair/altair/altair/utils/plugin_registry.py:124: \r\nDeprecationWarning: SelectableGroups dict interface is deprecated. Use select.\r\n\r\n[52](https://github.com/altair-viz/altair/actions/runs/4210647283/jobs/7308470442#step:5:53)\r\n\r\n more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])]\r\n```\r\nIt is around here in the code: https://github.com/altair-viz/altair/blob/master/altair/utils/plugin_registry.py#L124\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Generic, TypeVar, cast\nfrom types import TracebackType\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nfrom toolz import curry\n\n\nPluginType = TypeVar(\"PluginType\")\n\n\nclass NoSuchEntryPoint(Exception):\n def __init__(self, group, name):\n self.group = group\n self.name = name\n\n def __str__(self):\n return f\"No {self.name!r} entry point found in group {self.group!r}\"\n\n\nclass PluginEnabler(object):\n \"\"\"Context manager for enabling plugins\n\n This object lets you use enable() as a context manager to\n temporarily enable a given plugin::\n\n with plugins.enable('name'):\n do_something() # 'name' plugin temporarily enabled\n # plugins back to original state\n \"\"\"\n\n def __init__(self, registry: \"PluginRegistry\", name: str, **options):\n self.registry = registry # type: PluginRegistry\n self.name = name # type: str\n self.options = options # type: Dict[str, Any]\n self.original_state = registry._get_state() # type: Dict[str, Any]\n self.registry._enable(name, **options)\n\n def __enter__(self) -> \"PluginEnabler\":\n return self\n\n def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None:\n self.registry._set_state(self.original_state)\n\n def __repr__(self) -> str:\n return \"{}.enable({!r})\".format(self.registry.__class__.__name__, self.name)\n\n\nclass PluginRegistry(Generic[PluginType]):\n \"\"\"A registry for plugins.\n\n This is a plugin registry that allows plugins to be loaded/registered\n in two ways:\n\n 1. Through an explicit call to ``.register(name, value)``.\n 2. By looking for other Python packages that are installed and provide\n a setuptools entry point group.\n\n When you create an instance of this class, provide the name of the\n entry point group to use::\n\n reg = PluginRegister('my_entrypoint_group')\n\n \"\"\"\n\n # this is a mapping of name to error message to allow custom error messages\n # in case an entrypoint is not found\n entrypoint_err_messages = {} # type: Dict[str, str]\n\n # global settings is a key-value mapping of settings that are stored globally\n # in the registry rather than passed to the plugins\n _global_settings = {} # type: Dict[str, Any]\n\n def __init__(self, entry_point_group: str = \"\", plugin_type: type = object):\n \"\"\"Create a PluginRegistry for a named entry point group.\n\n Parameters\n ==========\n entry_point_group: str\n The name of the entry point group.\n plugin_type: object\n A type that will optionally be used for runtime type checking of\n loaded plugins using isinstance.\n \"\"\"\n self.entry_point_group = entry_point_group # type: str\n self.plugin_type = plugin_type # type: Optional[type]\n self._active = None # type: Optional[PluginType]\n self._active_name = \"\" # type: str\n self._plugins = {} # type: Dict[str, PluginType]\n self._options = {} # type: Dict[str, Any]\n self._global_settings = self.__class__._global_settings.copy() # type: dict\n\n def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]:\n \"\"\"Register a plugin by name and value.\n\n This method is used for explicit registration of a plugin and shouldn't be\n used to manage entry point managed plugins, which are auto-loaded.\n\n Parameters\n ==========\n name: str\n The name of the plugin.\n value: PluginType or None\n The actual plugin object to register or None to unregister that plugin.\n\n Returns\n =======\n plugin: PluginType or None\n The plugin that was registered or unregistered.\n \"\"\"\n if value is None:\n return self._plugins.pop(name, None)\n else:\n assert isinstance(value, self.plugin_type)\n self._plugins[name] = value\n return value\n\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entry_points().get(self.entry_point_group, [])]\n exts.extend(more_exts)\n return sorted(set(exts))\n\n def _get_state(self) -> Dict[str, Any]:\n \"\"\"Return a dictionary representing the current state of the registry\"\"\"\n return {\n \"_active\": self._active,\n \"_active_name\": self._active_name,\n \"_plugins\": self._plugins.copy(),\n \"_options\": self._options.copy(),\n \"_global_settings\": self._global_settings.copy(),\n }\n\n def _set_state(self, state: Dict[str, Any]) -> None:\n \"\"\"Reset the state of the registry\"\"\"\n assert set(state.keys()) == {\n \"_active\",\n \"_active_name\",\n \"_plugins\",\n \"_options\",\n \"_global_settings\",\n }\n for key, val in state.items():\n setattr(self, key, val)\n\n def _enable(self, name: str, **options) -> None:\n if name not in self._plugins:\n try:\n (ep,) = [\n ep\n for ep in entry_points().get(self.entry_point_group, [])\n if ep.name == name\n ]\n except ValueError:\n if name in self.entrypoint_err_messages:\n raise ValueError(self.entrypoint_err_messages[name])\n else:\n raise NoSuchEntryPoint(self.entry_point_group, name)\n value = cast(PluginType, ep.load())\n self.register(name, value)\n self._active_name = name\n self._active = self._plugins[name]\n for key in set(options.keys()) & set(self._global_settings.keys()):\n self._global_settings[key] = options.pop(key)\n self._options = options\n\n def enable(self, name: Optional[str] = None, **options) -> PluginEnabler:\n \"\"\"Enable a plugin by name.\n\n This can be either called directly, or used as a context manager.\n\n Parameters\n ----------\n name : string (optional)\n The name of the plugin to enable. If not specified, then use the\n current active name.\n **options :\n Any additional parameters will be passed to the plugin as keyword\n arguments\n\n Returns\n -------\n PluginEnabler:\n An object that allows enable() to be used as a context manager\n \"\"\"\n if name is None:\n name = self.active\n return PluginEnabler(self, name, **options)\n\n @property\n def active(self) -> str:\n \"\"\"Return the name of the currently active plugin\"\"\"\n return self._active_name\n\n @property\n def options(self) -> Dict[str, Any]:\n \"\"\"Return the current options dictionary\"\"\"\n return self._options\n\n def get(self) -> Optional[PluginType]:\n \"\"\"Return the currently active plugin.\"\"\"\n if self._options:\n return curry(self._active, **self._options)\n else:\n return self._active\n\n def __repr__(self) -> str:\n return \"{}(active={!r}, registered={!r})\" \"\".format(\n self.__class__.__name__, self._active_name, list(self.names())\n )\n", "path": "altair/utils/plugin_registry.py"}], "after_files": [{"content": "import sys\nfrom typing import Any, Dict, List, Optional, Generic, TypeVar, cast\nfrom types import TracebackType\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nfrom toolz import curry\n\n\nPluginType = TypeVar(\"PluginType\")\n\n\nclass NoSuchEntryPoint(Exception):\n def __init__(self, group, name):\n self.group = group\n self.name = name\n\n def __str__(self):\n return f\"No {self.name!r} entry point found in group {self.group!r}\"\n\n\nclass PluginEnabler(object):\n \"\"\"Context manager for enabling plugins\n\n This object lets you use enable() as a context manager to\n temporarily enable a given plugin::\n\n with plugins.enable('name'):\n do_something() # 'name' plugin temporarily enabled\n # plugins back to original state\n \"\"\"\n\n def __init__(self, registry: \"PluginRegistry\", name: str, **options):\n self.registry = registry # type: PluginRegistry\n self.name = name # type: str\n self.options = options # type: Dict[str, Any]\n self.original_state = registry._get_state() # type: Dict[str, Any]\n self.registry._enable(name, **options)\n\n def __enter__(self) -> \"PluginEnabler\":\n return self\n\n def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None:\n self.registry._set_state(self.original_state)\n\n def __repr__(self) -> str:\n return \"{}.enable({!r})\".format(self.registry.__class__.__name__, self.name)\n\n\nclass PluginRegistry(Generic[PluginType]):\n \"\"\"A registry for plugins.\n\n This is a plugin registry that allows plugins to be loaded/registered\n in two ways:\n\n 1. Through an explicit call to ``.register(name, value)``.\n 2. By looking for other Python packages that are installed and provide\n a setuptools entry point group.\n\n When you create an instance of this class, provide the name of the\n entry point group to use::\n\n reg = PluginRegister('my_entrypoint_group')\n\n \"\"\"\n\n # this is a mapping of name to error message to allow custom error messages\n # in case an entrypoint is not found\n entrypoint_err_messages = {} # type: Dict[str, str]\n\n # global settings is a key-value mapping of settings that are stored globally\n # in the registry rather than passed to the plugins\n _global_settings = {} # type: Dict[str, Any]\n\n def __init__(self, entry_point_group: str = \"\", plugin_type: type = object):\n \"\"\"Create a PluginRegistry for a named entry point group.\n\n Parameters\n ==========\n entry_point_group: str\n The name of the entry point group.\n plugin_type: object\n A type that will optionally be used for runtime type checking of\n loaded plugins using isinstance.\n \"\"\"\n self.entry_point_group = entry_point_group # type: str\n self.plugin_type = plugin_type # type: Optional[type]\n self._active = None # type: Optional[PluginType]\n self._active_name = \"\" # type: str\n self._plugins = {} # type: Dict[str, PluginType]\n self._options = {} # type: Dict[str, Any]\n self._global_settings = self.__class__._global_settings.copy() # type: dict\n\n def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]:\n \"\"\"Register a plugin by name and value.\n\n This method is used for explicit registration of a plugin and shouldn't be\n used to manage entry point managed plugins, which are auto-loaded.\n\n Parameters\n ==========\n name: str\n The name of the plugin.\n value: PluginType or None\n The actual plugin object to register or None to unregister that plugin.\n\n Returns\n =======\n plugin: PluginType or None\n The plugin that was registered or unregistered.\n \"\"\"\n if value is None:\n return self._plugins.pop(name, None)\n else:\n assert isinstance(value, self.plugin_type)\n self._plugins[name] = value\n return value\n\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n if sys.version_info.major == 3 and sys.version_info.minor < 10:\n e_points = entry_points().get(self.entry_point_group, [])\n else:\n e_points = entry_points(group=self.entry_point_group)\n more_exts = [ep.name for ep in e_points]\n exts.extend(more_exts)\n return sorted(set(exts))\n\n def _get_state(self) -> Dict[str, Any]:\n \"\"\"Return a dictionary representing the current state of the registry\"\"\"\n return {\n \"_active\": self._active,\n \"_active_name\": self._active_name,\n \"_plugins\": self._plugins.copy(),\n \"_options\": self._options.copy(),\n \"_global_settings\": self._global_settings.copy(),\n }\n\n def _set_state(self, state: Dict[str, Any]) -> None:\n \"\"\"Reset the state of the registry\"\"\"\n assert set(state.keys()) == {\n \"_active\",\n \"_active_name\",\n \"_plugins\",\n \"_options\",\n \"_global_settings\",\n }\n for key, val in state.items():\n setattr(self, key, val)\n\n def _enable(self, name: str, **options) -> None:\n if name not in self._plugins:\n try:\n (ep,) = [\n ep\n for ep in entry_points().get(self.entry_point_group, [])\n if ep.name == name\n ]\n except ValueError:\n if name in self.entrypoint_err_messages:\n raise ValueError(self.entrypoint_err_messages[name])\n else:\n raise NoSuchEntryPoint(self.entry_point_group, name)\n value = cast(PluginType, ep.load())\n self.register(name, value)\n self._active_name = name\n self._active = self._plugins[name]\n for key in set(options.keys()) & set(self._global_settings.keys()):\n self._global_settings[key] = options.pop(key)\n self._options = options\n\n def enable(self, name: Optional[str] = None, **options) -> PluginEnabler:\n \"\"\"Enable a plugin by name.\n\n This can be either called directly, or used as a context manager.\n\n Parameters\n ----------\n name : string (optional)\n The name of the plugin to enable. If not specified, then use the\n current active name.\n **options :\n Any additional parameters will be passed to the plugin as keyword\n arguments\n\n Returns\n -------\n PluginEnabler:\n An object that allows enable() to be used as a context manager\n \"\"\"\n if name is None:\n name = self.active\n return PluginEnabler(self, name, **options)\n\n @property\n def active(self) -> str:\n \"\"\"Return the name of the currently active plugin\"\"\"\n return self._active_name\n\n @property\n def options(self) -> Dict[str, Any]:\n \"\"\"Return the current options dictionary\"\"\"\n return self._options\n\n def get(self) -> Optional[PluginType]:\n \"\"\"Return the currently active plugin.\"\"\"\n if self._options:\n return curry(self._active, **self._options)\n else:\n return self._active\n\n def __repr__(self) -> str:\n return \"{}(active={!r}, registered={!r})\" \"\".format(\n self.__class__.__name__, self._active_name, list(self.names())\n )\n", "path": "altair/utils/plugin_registry.py"}]}
2,696
233
gh_patches_debug_39017
rasdani/github-patches
git_diff
pypa__pip-2464
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Interrupting a pip download with CTRL-C does not unhide the cursor Version: pep 6.0.8 Pressing CTRL-C while pip is downloading a package (and displaying its progress bar) shows an "Operation cancelled by user" message and drops the user back to the shell prompt, but does not unhide the cursor that was hidden while displaying the progress bar. Glancing at the code, it looks like the `progress` library already offers a `SigIntMixin` helper for handling this, and that pip's progress bar isn't using it. Maybe including this mixin in the appropriate place(s) is all that's needed? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/utils/ui.py` Content: ``` 1 from __future__ import absolute_import 2 from __future__ import division 3 4 import itertools 5 import sys 6 7 from pip.compat import WINDOWS 8 from pip.utils import format_size 9 from pip.utils.logging import get_indentation 10 from pip._vendor import six 11 from pip._vendor.progress.bar import Bar, IncrementalBar 12 from pip._vendor.progress.helpers import WritelnMixin 13 from pip._vendor.progress.spinner import Spinner 14 15 try: 16 from pip._vendor import colorama 17 # Lots of different errors can come from this, including SystemError and 18 # ImportError. 19 except Exception: 20 colorama = None 21 22 23 def _select_progress_class(preferred, fallback): 24 encoding = getattr(preferred.file, "encoding", None) 25 26 # If we don't know what encoding this file is in, then we'll just assume 27 # that it doesn't support unicode and use the ASCII bar. 28 if not encoding: 29 return fallback 30 31 # Collect all of the possible characters we want to use with the preferred 32 # bar. 33 characters = [ 34 getattr(preferred, "empty_fill", six.text_type()), 35 getattr(preferred, "fill", six.text_type()), 36 ] 37 characters += list(getattr(preferred, "phases", [])) 38 39 # Try to decode the characters we're using for the bar using the encoding 40 # of the given file, if this works then we'll assume that we can use the 41 # fancier bar and if not we'll fall back to the plaintext bar. 42 try: 43 six.text_type().join(characters).encode(encoding) 44 except UnicodeEncodeError: 45 return fallback 46 else: 47 return preferred 48 49 50 _BaseBar = _select_progress_class(IncrementalBar, Bar) 51 52 53 class DownloadProgressMixin(object): 54 55 def __init__(self, *args, **kwargs): 56 super(DownloadProgressMixin, self).__init__(*args, **kwargs) 57 self.message = (" " * (get_indentation() + 2)) + self.message 58 59 @property 60 def downloaded(self): 61 return format_size(self.index) 62 63 @property 64 def download_speed(self): 65 # Avoid zero division errors... 66 if self.avg == 0.0: 67 return "..." 68 return format_size(1 / self.avg) + "/s" 69 70 @property 71 def pretty_eta(self): 72 if self.eta: 73 return "eta %s" % self.eta_td 74 return "" 75 76 def iter(self, it, n=1): 77 for x in it: 78 yield x 79 self.next(n) 80 self.finish() 81 82 83 class WindowsMixin(object): 84 85 def __init__(self, *args, **kwargs): 86 # The Windows terminal does not support the hide/show cursor ANSI codes 87 # even with colorama. So we'll ensure that hide_cursor is False on 88 # Windows. 89 # This call neds to go before the super() call, so that hide_cursor 90 # is set in time. The base progress bar class writes the "hide cursor" 91 # code to the terminal in its init, so if we don't set this soon 92 # enough, we get a "hide" with no corresponding "show"... 93 if WINDOWS and self.hide_cursor: 94 self.hide_cursor = False 95 96 super(WindowsMixin, self).__init__(*args, **kwargs) 97 98 # Check if we are running on Windows and we have the colorama module, 99 # if we do then wrap our file with it. 100 if WINDOWS and colorama: 101 self.file = colorama.AnsiToWin32(self.file) 102 # The progress code expects to be able to call self.file.isatty() 103 # but the colorama.AnsiToWin32() object doesn't have that, so we'll 104 # add it. 105 self.file.isatty = lambda: self.file.wrapped.isatty() 106 # The progress code expects to be able to call self.file.flush() 107 # but the colorama.AnsiToWin32() object doesn't have that, so we'll 108 # add it. 109 self.file.flush = lambda: self.file.wrapped.flush() 110 111 112 class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, _BaseBar): 113 114 file = sys.stdout 115 message = "%(percent)d%%" 116 suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" 117 118 119 class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin, 120 WritelnMixin, Spinner): 121 122 file = sys.stdout 123 suffix = "%(downloaded)s %(download_speed)s" 124 125 def next_phase(self): 126 if not hasattr(self, "_phaser"): 127 self._phaser = itertools.cycle(self.phases) 128 return next(self._phaser) 129 130 def update(self): 131 message = self.message % self 132 phase = self.next_phase() 133 suffix = self.suffix % self 134 line = ''.join([ 135 message, 136 " " if message else "", 137 phase, 138 " " if suffix else "", 139 suffix, 140 ]) 141 142 self.writeln(line) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/utils/ui.py b/pip/utils/ui.py --- a/pip/utils/ui.py +++ b/pip/utils/ui.py @@ -3,6 +3,7 @@ import itertools import sys +from signal import signal, SIGINT, default_int_handler from pip.compat import WINDOWS from pip.utils import format_size @@ -50,6 +51,61 @@ _BaseBar = _select_progress_class(IncrementalBar, Bar) +class InterruptibleMixin(object): + """ + Helper to ensure that self.finish() gets called on keyboard interrupt. + + This allows downloads to be interrupted without leaving temporary state + (like hidden cursors) behind. + + This class is similar to the progress library's existing SigIntMixin + helper, but as of version 1.2, that helper has the following problems: + + 1. It calls sys.exit(). + 2. It discards the existing SIGINT handler completely. + 3. It leaves its own handler in place even after an uninterrupted finish, + which will have unexpected delayed effects if the user triggers an + unrelated keyboard interrupt some time after a progress-displaying + download has already completed, for example. + """ + + def __init__(self, *args, **kwargs): + """ + Save the original SIGINT handler for later. + """ + super(InterruptibleMixin, self).__init__(*args, **kwargs) + + self.original_handler = signal(SIGINT, self.handle_sigint) + + # If signal() returns None, the previous handler was not installed from + # Python, and we cannot restore it. This probably should not happen, + # but if it does, we must restore something sensible instead, at least. + # The least bad option should be Python's default SIGINT handler, which + # just raises KeyboardInterrupt. + if self.original_handler is None: + self.original_handler = default_int_handler + + def finish(self): + """ + Restore the original SIGINT handler after finishing. + + This should happen regardless of whether the progress display finishes + normally, or gets interrupted. + """ + super(InterruptibleMixin, self).finish() + signal(SIGINT, self.original_handler) + + def handle_sigint(self, signum, frame): + """ + Call self.finish() before delegating to the original SIGINT handler. + + This handler should only be in place while the progress display is + active. + """ + self.finish() + self.original_handler(signum, frame) + + class DownloadProgressMixin(object): def __init__(self, *args, **kwargs): @@ -109,15 +165,16 @@ self.file.flush = lambda: self.file.wrapped.flush() -class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, _BaseBar): +class DownloadProgressBar(WindowsMixin, InterruptibleMixin, + DownloadProgressMixin, _BaseBar): file = sys.stdout message = "%(percent)d%%" suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" -class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin, - WritelnMixin, Spinner): +class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, + DownloadProgressMixin, WritelnMixin, Spinner): file = sys.stdout suffix = "%(downloaded)s %(download_speed)s"
{"golden_diff": "diff --git a/pip/utils/ui.py b/pip/utils/ui.py\n--- a/pip/utils/ui.py\n+++ b/pip/utils/ui.py\n@@ -3,6 +3,7 @@\n \n import itertools\n import sys\n+from signal import signal, SIGINT, default_int_handler\n \n from pip.compat import WINDOWS\n from pip.utils import format_size\n@@ -50,6 +51,61 @@\n _BaseBar = _select_progress_class(IncrementalBar, Bar)\n \n \n+class InterruptibleMixin(object):\n+ \"\"\"\n+ Helper to ensure that self.finish() gets called on keyboard interrupt.\n+\n+ This allows downloads to be interrupted without leaving temporary state\n+ (like hidden cursors) behind.\n+\n+ This class is similar to the progress library's existing SigIntMixin\n+ helper, but as of version 1.2, that helper has the following problems:\n+\n+ 1. It calls sys.exit().\n+ 2. It discards the existing SIGINT handler completely.\n+ 3. It leaves its own handler in place even after an uninterrupted finish,\n+ which will have unexpected delayed effects if the user triggers an\n+ unrelated keyboard interrupt some time after a progress-displaying\n+ download has already completed, for example.\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ \"\"\"\n+ Save the original SIGINT handler for later.\n+ \"\"\"\n+ super(InterruptibleMixin, self).__init__(*args, **kwargs)\n+\n+ self.original_handler = signal(SIGINT, self.handle_sigint)\n+\n+ # If signal() returns None, the previous handler was not installed from\n+ # Python, and we cannot restore it. This probably should not happen,\n+ # but if it does, we must restore something sensible instead, at least.\n+ # The least bad option should be Python's default SIGINT handler, which\n+ # just raises KeyboardInterrupt.\n+ if self.original_handler is None:\n+ self.original_handler = default_int_handler\n+\n+ def finish(self):\n+ \"\"\"\n+ Restore the original SIGINT handler after finishing.\n+\n+ This should happen regardless of whether the progress display finishes\n+ normally, or gets interrupted.\n+ \"\"\"\n+ super(InterruptibleMixin, self).finish()\n+ signal(SIGINT, self.original_handler)\n+\n+ def handle_sigint(self, signum, frame):\n+ \"\"\"\n+ Call self.finish() before delegating to the original SIGINT handler.\n+\n+ This handler should only be in place while the progress display is\n+ active.\n+ \"\"\"\n+ self.finish()\n+ self.original_handler(signum, frame)\n+\n+\n class DownloadProgressMixin(object):\n \n def __init__(self, *args, **kwargs):\n@@ -109,15 +165,16 @@\n self.file.flush = lambda: self.file.wrapped.flush()\n \n \n-class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, _BaseBar):\n+class DownloadProgressBar(WindowsMixin, InterruptibleMixin,\n+ DownloadProgressMixin, _BaseBar):\n \n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n \n \n-class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n- WritelnMixin, Spinner):\n+class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,\n+ DownloadProgressMixin, WritelnMixin, Spinner):\n \n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n", "issue": "Interrupting a pip download with CTRL-C does not unhide the cursor\nVersion: pep 6.0.8\n\nPressing CTRL-C while pip is downloading a package (and displaying its progress bar) shows an \"Operation cancelled by user\" message and drops the user back to the shell prompt, but does not unhide the cursor that was hidden while displaying the progress bar.\n\nGlancing at the code, it looks like the `progress` library already offers a `SigIntMixin` helper for handling this, and that pip's progress bar isn't using it. Maybe including this mixin in the appropriate place(s) is all that's needed?\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\n\nfrom pip.compat import WINDOWS\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor import six\nfrom pip._vendor.progress.bar import Bar, IncrementalBar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\ntry:\n from pip._vendor import colorama\n# Lots of different errors can come from this, including SystemError and\n# ImportError.\nexcept Exception:\n colorama = None\n\n\ndef _select_progress_class(preferred, fallback):\n encoding = getattr(preferred.file, \"encoding\", None)\n\n # If we don't know what encoding this file is in, then we'll just assume\n # that it doesn't support unicode and use the ASCII bar.\n if not encoding:\n return fallback\n\n # Collect all of the possible characters we want to use with the preferred\n # bar.\n characters = [\n getattr(preferred, \"empty_fill\", six.text_type()),\n getattr(preferred, \"fill\", six.text_type()),\n ]\n characters += list(getattr(preferred, \"phases\", []))\n\n # Try to decode the characters we're using for the bar using the encoding\n # of the given file, if this works then we'll assume that we can use the\n # fancier bar and if not we'll fall back to the plaintext bar.\n try:\n six.text_type().join(characters).encode(encoding)\n except UnicodeEncodeError:\n return fallback\n else:\n return preferred\n\n\n_BaseBar = _select_progress_class(IncrementalBar, Bar)\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass WindowsMixin(object):\n\n def __init__(self, *args, **kwargs):\n # The Windows terminal does not support the hide/show cursor ANSI codes\n # even with colorama. So we'll ensure that hide_cursor is False on\n # Windows.\n # This call neds to go before the super() call, so that hide_cursor\n # is set in time. The base progress bar class writes the \"hide cursor\"\n # code to the terminal in its init, so if we don't set this soon\n # enough, we get a \"hide\" with no corresponding \"show\"...\n if WINDOWS and self.hide_cursor:\n self.hide_cursor = False\n\n super(WindowsMixin, self).__init__(*args, **kwargs)\n\n # Check if we are running on Windows and we have the colorama module,\n # if we do then wrap our file with it.\n if WINDOWS and colorama:\n self.file = colorama.AnsiToWin32(self.file)\n # The progress code expects to be able to call self.file.isatty()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.isatty = lambda: self.file.wrapped.isatty()\n # The progress code expects to be able to call self.file.flush()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.flush = lambda: self.file.wrapped.flush()\n\n\nclass DownloadProgressBar(WindowsMixin, DownloadProgressMixin, _BaseBar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\nfrom signal import signal, SIGINT, default_int_handler\n\nfrom pip.compat import WINDOWS\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor import six\nfrom pip._vendor.progress.bar import Bar, IncrementalBar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\ntry:\n from pip._vendor import colorama\n# Lots of different errors can come from this, including SystemError and\n# ImportError.\nexcept Exception:\n colorama = None\n\n\ndef _select_progress_class(preferred, fallback):\n encoding = getattr(preferred.file, \"encoding\", None)\n\n # If we don't know what encoding this file is in, then we'll just assume\n # that it doesn't support unicode and use the ASCII bar.\n if not encoding:\n return fallback\n\n # Collect all of the possible characters we want to use with the preferred\n # bar.\n characters = [\n getattr(preferred, \"empty_fill\", six.text_type()),\n getattr(preferred, \"fill\", six.text_type()),\n ]\n characters += list(getattr(preferred, \"phases\", []))\n\n # Try to decode the characters we're using for the bar using the encoding\n # of the given file, if this works then we'll assume that we can use the\n # fancier bar and if not we'll fall back to the plaintext bar.\n try:\n six.text_type().join(characters).encode(encoding)\n except UnicodeEncodeError:\n return fallback\n else:\n return preferred\n\n\n_BaseBar = _select_progress_class(IncrementalBar, Bar)\n\n\nclass InterruptibleMixin(object):\n \"\"\"\n Helper to ensure that self.finish() gets called on keyboard interrupt.\n\n This allows downloads to be interrupted without leaving temporary state\n (like hidden cursors) behind.\n\n This class is similar to the progress library's existing SigIntMixin\n helper, but as of version 1.2, that helper has the following problems:\n\n 1. It calls sys.exit().\n 2. It discards the existing SIGINT handler completely.\n 3. It leaves its own handler in place even after an uninterrupted finish,\n which will have unexpected delayed effects if the user triggers an\n unrelated keyboard interrupt some time after a progress-displaying\n download has already completed, for example.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Save the original SIGINT handler for later.\n \"\"\"\n super(InterruptibleMixin, self).__init__(*args, **kwargs)\n\n self.original_handler = signal(SIGINT, self.handle_sigint)\n\n # If signal() returns None, the previous handler was not installed from\n # Python, and we cannot restore it. This probably should not happen,\n # but if it does, we must restore something sensible instead, at least.\n # The least bad option should be Python's default SIGINT handler, which\n # just raises KeyboardInterrupt.\n if self.original_handler is None:\n self.original_handler = default_int_handler\n\n def finish(self):\n \"\"\"\n Restore the original SIGINT handler after finishing.\n\n This should happen regardless of whether the progress display finishes\n normally, or gets interrupted.\n \"\"\"\n super(InterruptibleMixin, self).finish()\n signal(SIGINT, self.original_handler)\n\n def handle_sigint(self, signum, frame):\n \"\"\"\n Call self.finish() before delegating to the original SIGINT handler.\n\n This handler should only be in place while the progress display is\n active.\n \"\"\"\n self.finish()\n self.original_handler(signum, frame)\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass WindowsMixin(object):\n\n def __init__(self, *args, **kwargs):\n # The Windows terminal does not support the hide/show cursor ANSI codes\n # even with colorama. So we'll ensure that hide_cursor is False on\n # Windows.\n # This call neds to go before the super() call, so that hide_cursor\n # is set in time. The base progress bar class writes the \"hide cursor\"\n # code to the terminal in its init, so if we don't set this soon\n # enough, we get a \"hide\" with no corresponding \"show\"...\n if WINDOWS and self.hide_cursor:\n self.hide_cursor = False\n\n super(WindowsMixin, self).__init__(*args, **kwargs)\n\n # Check if we are running on Windows and we have the colorama module,\n # if we do then wrap our file with it.\n if WINDOWS and colorama:\n self.file = colorama.AnsiToWin32(self.file)\n # The progress code expects to be able to call self.file.isatty()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.isatty = lambda: self.file.wrapped.isatty()\n # The progress code expects to be able to call self.file.flush()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.flush = lambda: self.file.wrapped.flush()\n\n\nclass DownloadProgressBar(WindowsMixin, InterruptibleMixin,\n DownloadProgressMixin, _BaseBar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,\n DownloadProgressMixin, WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}]}
1,786
763
gh_patches_debug_17530
rasdani/github-patches
git_diff
biopython__biopython-2513
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove use of Bio._py3k (Python 2 / 3 compatibility) As of Biopython 1.76 (released December 2019), we are dropping Python 2 support and focusing on Python 3.6 or later. This means we no longer need our (internal) Python 2 vs 3 compatibility library ``Bio._py3k`` (which is a bit like the third party library ``six``). This issue is suitable and specifically targeting first time contributors. There are lots of cases: ``` $ grep _py3k Bio*/*.py Bio/*/*.py Bio/*/*/*.py Bio/File.py:from Bio._py3k import basestring Bio/MarkovModel.py: from Bio._py3k import StringIO Bio/Seq.py:from Bio._py3k import range Bio/Seq.py:from Bio._py3k import basestring ... ``` Example One ------------ Taking the first example, ``from Bio._py3k import basestring`` we see that this is defined under Python 3 as an alias of ``str``: https://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L56 ```python # Lots of our Python 2 code uses isinstance(x, basestring) # which after 2to3 becomes isinstance(x, str) basestring = str unicode = str ``` Therefore the fix for ``Bio/File.py`` is to remove the ``from Bio._py3k import basestring`` line, and update where ``basestring`` was used to instead use ``str``, which in this case means editing one line: ```python if isinstance(handleish, basestring): ``` with: ```python if isinstance(handleish, str): ``` Example Two ------------ Taking the second example, ``Bio/MarkovModel.py`` has ``from Bio._py3k import StringIO`` which we find on Python 3 can just be replaced with ``from io import StringIO`` https://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L130 Contributing ----------- Could any newcomer wanting to work on this first comment on this issue saying which file(s) they are going to start with (e.g. ``Bio/File.py``, or ``BioSQL/``) to avoid duplication of effort. (*Update: The consensus was to switch to be function or constant instead, since they generally require the same technique/fix each time*) Then read https://github.com/biopython/biopython/blob/master/CONTRIBUTING.rst and setup ``flake8`` on your machine. Then make a pull request making the necessary changes so that those files no longer import from ``Bio._py3k``. Once that's done, you could pick some more to work on. Eventually there will be nothing using ``Bio._py3k`` and that code itself can be removed, and this issue closed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Bio/_py3k/__init__.py` Content: ``` 1 # Copyright 2010-2018 by Peter Cock. All rights reserved. 2 # 3 # This file is part of the Biopython distribution and governed by your 4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". 5 # Please see the LICENSE file that should have been included as part of this 6 # package. 7 """Python 3 compatibility tools (PRIVATE). 8 9 Once we drop support for Python 2, the whole of Bio._py3k will 10 go away. 11 """ 12 13 # From the point of view of pep8 and flake8, there are lots of issues with 14 # this file. This line tells flake8 to ignore it for quality assurance: 15 # flake8: noqa 16 17 import sys 18 19 import codecs 20 21 22 def _bytes_bytearray_to_str(s): 23 """If s is bytes or bytearray, convert to a unicode string (PRIVATE).""" 24 if isinstance(s, (bytes, bytearray)): 25 return s.decode() 26 return s 27 28 29 import io 30 import locale 31 32 # Python 3.4 onwards, the standard library wrappers should work: 33 def _binary_to_string_handle(handle): 34 """Treat a binary (bytes) handle like a text (unicode) handle (PRIVATE).""" 35 try: 36 # If this is a network handle from urllib, 37 # the HTTP headers may tell us the encoding. 38 encoding = handle.headers.get_content_charset() 39 except AttributeError: 40 encoding = None 41 if encoding is None: 42 # The W3C recommendation is: 43 # When no explicit charset parameter is provided by the sender, 44 # media subtypes of the "text" type are defined to have a default 45 # charset value of "ISO-8859-1" when received via HTTP. 46 # "ISO-8859-1" is also known as 'latin-1' 47 # See the following for more detail: 48 # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1 49 encoding = "latin-1" 50 wrapped = io.TextIOWrapper(io.BufferedReader(handle), encoding=encoding) 51 try: 52 # If wrapping an online handle, this is nice to have: 53 wrapped.url = handle.url 54 except AttributeError: 55 pass 56 return wrapped 57 58 59 # On Python 3 urllib, urllib2, and urlparse were merged: 60 from urllib.request import urlopen, Request, urlparse, urlcleanup 61 from urllib.parse import urlencode, quote 62 from urllib.error import URLError, HTTPError 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Bio/_py3k/__init__.py b/Bio/_py3k/__init__.py --- a/Bio/_py3k/__init__.py +++ b/Bio/_py3k/__init__.py @@ -14,20 +14,7 @@ # this file. This line tells flake8 to ignore it for quality assurance: # flake8: noqa -import sys - -import codecs - - -def _bytes_bytearray_to_str(s): - """If s is bytes or bytearray, convert to a unicode string (PRIVATE).""" - if isinstance(s, (bytes, bytearray)): - return s.decode() - return s - - import io -import locale # Python 3.4 onwards, the standard library wrappers should work: def _binary_to_string_handle(handle): @@ -54,9 +41,3 @@ except AttributeError: pass return wrapped - - -# On Python 3 urllib, urllib2, and urlparse were merged: -from urllib.request import urlopen, Request, urlparse, urlcleanup -from urllib.parse import urlencode, quote -from urllib.error import URLError, HTTPError
{"golden_diff": "diff --git a/Bio/_py3k/__init__.py b/Bio/_py3k/__init__.py\n--- a/Bio/_py3k/__init__.py\n+++ b/Bio/_py3k/__init__.py\n@@ -14,20 +14,7 @@\n # this file. This line tells flake8 to ignore it for quality assurance:\n # flake8: noqa\n \n-import sys\n-\n-import codecs\n-\n-\n-def _bytes_bytearray_to_str(s):\n- \"\"\"If s is bytes or bytearray, convert to a unicode string (PRIVATE).\"\"\"\n- if isinstance(s, (bytes, bytearray)):\n- return s.decode()\n- return s\n-\n-\n import io\n-import locale\n \n # Python 3.4 onwards, the standard library wrappers should work:\n def _binary_to_string_handle(handle):\n@@ -54,9 +41,3 @@\n except AttributeError:\n pass\n return wrapped\n-\n-\n-# On Python 3 urllib, urllib2, and urlparse were merged:\n-from urllib.request import urlopen, Request, urlparse, urlcleanup\n-from urllib.parse import urlencode, quote\n-from urllib.error import URLError, HTTPError\n", "issue": "Remove use of Bio._py3k (Python 2 / 3 compatibility)\nAs of Biopython 1.76 (released December 2019), we are dropping Python 2 support and focusing on Python 3.6 or later. This means we no longer need our (internal) Python 2 vs 3 compatibility library ``Bio._py3k`` (which is a bit like the third party library ``six``).\r\n\r\nThis issue is suitable and specifically targeting first time contributors.\r\n\r\nThere are lots of cases:\r\n\r\n```\r\n$ grep _py3k Bio*/*.py Bio/*/*.py Bio/*/*/*.py\r\nBio/File.py:from Bio._py3k import basestring\r\nBio/MarkovModel.py: from Bio._py3k import StringIO\r\nBio/Seq.py:from Bio._py3k import range\r\nBio/Seq.py:from Bio._py3k import basestring\r\n...\r\n```\r\n\r\nExample One\r\n------------\r\n\r\nTaking the first example, ``from Bio._py3k import basestring`` we see that this is defined under Python 3 as an alias of ``str``:\r\n\r\nhttps://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L56\r\n\r\n```python\r\n # Lots of our Python 2 code uses isinstance(x, basestring)\r\n # which after 2to3 becomes isinstance(x, str)\r\n basestring = str\r\n unicode = str\r\n```\r\n\r\nTherefore the fix for ``Bio/File.py`` is to remove the ``from Bio._py3k import basestring`` line, and update where ``basestring`` was used to instead use ``str``, which in this case means editing one line:\r\n\r\n```python\r\nif isinstance(handleish, basestring):\r\n```\r\n\r\nwith:\r\n\r\n```python\r\nif isinstance(handleish, str):\r\n```\r\n\r\nExample Two\r\n------------\r\n\r\nTaking the second example, ``Bio/MarkovModel.py`` has ``from Bio._py3k import StringIO`` which we find on Python 3 can just be replaced with ``from io import StringIO``\r\n\r\nhttps://github.com/biopython/biopython/blob/biopython-176/Bio/_py3k/__init__.py#L130\r\n\r\nContributing\r\n-----------\r\n\r\nCould any newcomer wanting to work on this first comment on this issue saying which file(s) they are going to start with (e.g. ``Bio/File.py``, or ``BioSQL/``) to avoid duplication of effort.\r\n\r\n(*Update: The consensus was to switch to be function or constant instead, since they generally require the same technique/fix each time*)\r\n\r\nThen read https://github.com/biopython/biopython/blob/master/CONTRIBUTING.rst and setup ``flake8`` on your machine.\r\n\r\nThen make a pull request making the necessary changes so that those files no longer import from ``Bio._py3k``. Once that's done, you could pick some more to work on.\r\n\r\nEventually there will be nothing using ``Bio._py3k`` and that code itself can be removed, and this issue closed.\n", "before_files": [{"content": "# Copyright 2010-2018 by Peter Cock. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Python 3 compatibility tools (PRIVATE).\n\nOnce we drop support for Python 2, the whole of Bio._py3k will\ngo away.\n\"\"\"\n\n# From the point of view of pep8 and flake8, there are lots of issues with\n# this file. This line tells flake8 to ignore it for quality assurance:\n# flake8: noqa\n\nimport sys\n\nimport codecs\n\n\ndef _bytes_bytearray_to_str(s):\n \"\"\"If s is bytes or bytearray, convert to a unicode string (PRIVATE).\"\"\"\n if isinstance(s, (bytes, bytearray)):\n return s.decode()\n return s\n\n\nimport io\nimport locale\n\n# Python 3.4 onwards, the standard library wrappers should work:\ndef _binary_to_string_handle(handle):\n \"\"\"Treat a binary (bytes) handle like a text (unicode) handle (PRIVATE).\"\"\"\n try:\n # If this is a network handle from urllib,\n # the HTTP headers may tell us the encoding.\n encoding = handle.headers.get_content_charset()\n except AttributeError:\n encoding = None\n if encoding is None:\n # The W3C recommendation is:\n # When no explicit charset parameter is provided by the sender,\n # media subtypes of the \"text\" type are defined to have a default\n # charset value of \"ISO-8859-1\" when received via HTTP.\n # \"ISO-8859-1\" is also known as 'latin-1'\n # See the following for more detail:\n # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n encoding = \"latin-1\"\n wrapped = io.TextIOWrapper(io.BufferedReader(handle), encoding=encoding)\n try:\n # If wrapping an online handle, this is nice to have:\n wrapped.url = handle.url\n except AttributeError:\n pass\n return wrapped\n\n\n# On Python 3 urllib, urllib2, and urlparse were merged:\nfrom urllib.request import urlopen, Request, urlparse, urlcleanup\nfrom urllib.parse import urlencode, quote\nfrom urllib.error import URLError, HTTPError\n", "path": "Bio/_py3k/__init__.py"}], "after_files": [{"content": "# Copyright 2010-2018 by Peter Cock. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Python 3 compatibility tools (PRIVATE).\n\nOnce we drop support for Python 2, the whole of Bio._py3k will\ngo away.\n\"\"\"\n\n# From the point of view of pep8 and flake8, there are lots of issues with\n# this file. This line tells flake8 to ignore it for quality assurance:\n# flake8: noqa\n\nimport io\n\n# Python 3.4 onwards, the standard library wrappers should work:\ndef _binary_to_string_handle(handle):\n \"\"\"Treat a binary (bytes) handle like a text (unicode) handle (PRIVATE).\"\"\"\n try:\n # If this is a network handle from urllib,\n # the HTTP headers may tell us the encoding.\n encoding = handle.headers.get_content_charset()\n except AttributeError:\n encoding = None\n if encoding is None:\n # The W3C recommendation is:\n # When no explicit charset parameter is provided by the sender,\n # media subtypes of the \"text\" type are defined to have a default\n # charset value of \"ISO-8859-1\" when received via HTTP.\n # \"ISO-8859-1\" is also known as 'latin-1'\n # See the following for more detail:\n # https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n encoding = \"latin-1\"\n wrapped = io.TextIOWrapper(io.BufferedReader(handle), encoding=encoding)\n try:\n # If wrapping an online handle, this is nice to have:\n wrapped.url = handle.url\n except AttributeError:\n pass\n return wrapped\n", "path": "Bio/_py3k/__init__.py"}]}
1,581
252
gh_patches_debug_8524
rasdani/github-patches
git_diff
ethereum__web3.py-864
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValueError when using IPCProvider * Version: 4.2.1 * Python: 2.7/3.4/3.5 * OS: osx/linux/win ### What was wrong? When using IPCProvider sometimes ValueError is encountered if the response is incomplete. Looks like in this pull request https://github.com/ethereum/web3.py/pull/785/files the author forgot to add `except ValueError` to IPCProvider https://github.com/ethereum/web3.py/blob/master/web3/providers/ipc.py#L176 ### How can it be fixed? Add `except ValueError` to IPCProvider.make_request https://github.com/ethereum/web3.py/blob/master/web3/providers/ipc.py#L176 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/utils/encoding.py` Content: ``` 1 # String encodings and numeric representations 2 import json 3 import re 4 5 from eth_utils import ( 6 add_0x_prefix, 7 big_endian_to_int, 8 decode_hex, 9 encode_hex, 10 int_to_big_endian, 11 is_boolean, 12 is_bytes, 13 is_hex, 14 is_integer, 15 is_list_like, 16 remove_0x_prefix, 17 to_hex, 18 ) 19 20 from web3.utils.abi import ( 21 is_address_type, 22 is_array_type, 23 is_bool_type, 24 is_bytes_type, 25 is_int_type, 26 is_string_type, 27 is_uint_type, 28 size_of_type, 29 sub_type_of_array_type, 30 ) 31 from web3.utils.toolz import ( 32 curry, 33 ) 34 from web3.utils.validation import ( 35 assert_one_val, 36 validate_abi_type, 37 validate_abi_value, 38 ) 39 40 41 def hex_encode_abi_type(abi_type, value, force_size=None): 42 """ 43 Encodes value into a hex string in format of abi_type 44 """ 45 validate_abi_type(abi_type) 46 validate_abi_value(abi_type, value) 47 48 data_size = force_size or size_of_type(abi_type) 49 if is_array_type(abi_type): 50 sub_type = sub_type_of_array_type(abi_type) 51 return "".join([remove_0x_prefix(hex_encode_abi_type(sub_type, v, 256)) for v in value]) 52 elif is_bool_type(abi_type): 53 return to_hex_with_size(value, data_size) 54 elif is_uint_type(abi_type): 55 return to_hex_with_size(value, data_size) 56 elif is_int_type(abi_type): 57 return to_hex_twos_compliment(value, data_size) 58 elif is_address_type(abi_type): 59 return pad_hex(value, data_size) 60 elif is_bytes_type(abi_type): 61 if is_bytes(value): 62 return encode_hex(value) 63 else: 64 return value 65 elif is_string_type(abi_type): 66 return to_hex(text=value) 67 else: 68 raise ValueError( 69 "Unsupported ABI type: {0}".format(abi_type) 70 ) 71 72 73 def to_hex_twos_compliment(value, bit_size): 74 """ 75 Converts integer value to twos compliment hex representation with given bit_size 76 """ 77 if value >= 0: 78 return to_hex_with_size(value, bit_size) 79 80 value = (1 << bit_size) + value 81 hex_value = hex(value) 82 hex_value = hex_value.rstrip("L") 83 return hex_value 84 85 86 def to_hex_with_size(value, bit_size): 87 """ 88 Converts a value to hex with given bit_size: 89 """ 90 return pad_hex(to_hex(value), bit_size) 91 92 93 def pad_hex(value, bit_size): 94 """ 95 Pads a hex string up to the given bit_size 96 """ 97 value = remove_0x_prefix(value) 98 return add_0x_prefix(value.zfill(int(bit_size / 4))) 99 100 101 def trim_hex(hexstr): 102 if hexstr.startswith('0x0'): 103 hexstr = re.sub('^0x0+', '0x', hexstr) 104 if hexstr == '0x': 105 hexstr = '0x0' 106 return hexstr 107 108 109 def to_int(value=None, hexstr=None, text=None): 110 """ 111 Converts value to it's integer representation. 112 113 Values are converted this way: 114 115 * value: 116 * bytes: big-endian integer 117 * bool: True => 1, False => 0 118 * hexstr: interpret hex as integer 119 * text: interpret as string of digits, like '12' => 12 120 """ 121 assert_one_val(value, hexstr=hexstr, text=text) 122 123 if hexstr is not None: 124 return int(hexstr, 16) 125 elif text is not None: 126 return int(text) 127 elif isinstance(value, bytes): 128 return big_endian_to_int(value) 129 elif isinstance(value, str): 130 raise TypeError("Pass in strings with keyword hexstr or text") 131 else: 132 return int(value) 133 134 135 @curry 136 def pad_bytes(fill_with, num_bytes, unpadded): 137 return unpadded.rjust(num_bytes, fill_with) 138 139 140 zpad_bytes = pad_bytes(b'\0') 141 142 143 def to_bytes(primitive=None, hexstr=None, text=None): 144 assert_one_val(primitive, hexstr=hexstr, text=text) 145 146 if is_boolean(primitive): 147 return b'\x01' if primitive else b'\x00' 148 elif isinstance(primitive, bytes): 149 return primitive 150 elif is_integer(primitive): 151 return to_bytes(hexstr=to_hex(primitive)) 152 elif hexstr is not None: 153 if len(hexstr) % 2: 154 hexstr = '0x0' + remove_0x_prefix(hexstr) 155 return decode_hex(hexstr) 156 elif text is not None: 157 return text.encode('utf-8') 158 raise TypeError("expected an int in first arg, or keyword of hexstr or text") 159 160 161 def to_text(primitive=None, hexstr=None, text=None): 162 assert_one_val(primitive, hexstr=hexstr, text=text) 163 164 if hexstr is not None: 165 return to_bytes(hexstr=hexstr).decode('utf-8') 166 elif text is not None: 167 return text 168 elif isinstance(primitive, str): 169 return to_text(hexstr=primitive) 170 elif isinstance(primitive, bytes): 171 return primitive.decode('utf-8') 172 elif is_integer(primitive): 173 byte_encoding = int_to_big_endian(primitive) 174 return to_text(byte_encoding) 175 raise TypeError("Expected an int, bytes or hexstr.") 176 177 178 @curry 179 def text_if_str(to_type, text_or_primitive): 180 ''' 181 Convert to a type, assuming that strings can be only unicode text (not a hexstr) 182 183 @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text), 184 eg~ to_bytes, to_text, to_hex, to_int, etc 185 @param hexstr_or_primitive in bytes, str, or int. 186 ''' 187 if isinstance(text_or_primitive, str): 188 (primitive, text) = (None, text_or_primitive) 189 else: 190 (primitive, text) = (text_or_primitive, None) 191 return to_type(primitive, text=text) 192 193 194 @curry 195 def hexstr_if_str(to_type, hexstr_or_primitive): 196 ''' 197 Convert to a type, assuming that strings can be only hexstr (not unicode text) 198 199 @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text), 200 eg~ to_bytes, to_text, to_hex, to_int, etc 201 @param text_or_primitive in bytes, str, or int. 202 ''' 203 if isinstance(hexstr_or_primitive, str): 204 (primitive, hexstr) = (None, hexstr_or_primitive) 205 if remove_0x_prefix(hexstr) and not is_hex(hexstr): 206 raise ValueError( 207 "when sending a str, it must be a hex string. Got: {0!r}".format( 208 hexstr_or_primitive, 209 ) 210 ) 211 else: 212 (primitive, hexstr) = (hexstr_or_primitive, None) 213 return to_type(primitive, hexstr=hexstr) 214 215 216 class FriendlyJsonSerde: 217 ''' 218 Friendly JSON serializer & deserializer 219 220 When encoding or decoding fails, this class collects 221 information on which fields failed, to show more 222 helpful information in the raised error messages. 223 ''' 224 def _json_mapping_errors(self, mapping): 225 for key, val in mapping.items(): 226 try: 227 self._friendly_json_encode(val) 228 except TypeError as exc: 229 yield "%r: because (%s)" % (key, exc) 230 231 def _json_list_errors(self, iterable): 232 for index, element in enumerate(iterable): 233 try: 234 self._friendly_json_encode(element) 235 except TypeError as exc: 236 yield "%d: because (%s)" % (index, exc) 237 238 def _friendly_json_encode(self, obj): 239 try: 240 encoded = json.dumps(obj) 241 return encoded 242 except TypeError as full_exception: 243 if hasattr(obj, 'items'): 244 item_errors = '; '.join(self._json_mapping_errors(obj)) 245 raise TypeError("dict had unencodable value at keys: {{{}}}".format(item_errors)) 246 elif is_list_like(obj): 247 element_errors = '; '.join(self._json_list_errors(obj)) 248 raise TypeError("list had unencodable value at index: [{}]".format(element_errors)) 249 else: 250 raise full_exception 251 252 def json_decode(self, json_str): 253 try: 254 decoded = json.loads(json_str) 255 return decoded 256 except json.decoder.JSONDecodeError as exc: 257 err_msg = 'Could not decode {} because of {}.'.format(repr(json_str), exc) 258 raise ValueError(err_msg) 259 260 def json_encode(self, obj): 261 try: 262 return self._friendly_json_encode(obj) 263 except TypeError as exc: 264 raise TypeError("Could not encode to JSON: {}".format(exc)) 265 266 267 def to_4byte_hex(hex_or_str_or_bytes): 268 size_of_4bytes = 4 * 8 269 byte_str = hexstr_if_str(to_bytes, hex_or_str_or_bytes) 270 if len(byte_str) > 4: 271 raise ValueError( 272 'expected value of size 4 bytes. Got: %d bytes' % len(byte_str) 273 ) 274 hex_str = encode_hex(byte_str) 275 return pad_hex(hex_str, size_of_4bytes) 276 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/utils/encoding.py b/web3/utils/encoding.py --- a/web3/utils/encoding.py +++ b/web3/utils/encoding.py @@ -255,7 +255,9 @@ return decoded except json.decoder.JSONDecodeError as exc: err_msg = 'Could not decode {} because of {}.'.format(repr(json_str), exc) - raise ValueError(err_msg) + # Calling code may rely on catching JSONDecodeError to recognize bad json + # so we have to re-raise the same type. + raise json.decoder.JSONDecodeError(err_msg, exc.doc, exc.pos) def json_encode(self, obj): try:
{"golden_diff": "diff --git a/web3/utils/encoding.py b/web3/utils/encoding.py\n--- a/web3/utils/encoding.py\n+++ b/web3/utils/encoding.py\n@@ -255,7 +255,9 @@\n return decoded\n except json.decoder.JSONDecodeError as exc:\n err_msg = 'Could not decode {} because of {}.'.format(repr(json_str), exc)\n- raise ValueError(err_msg)\n+ # Calling code may rely on catching JSONDecodeError to recognize bad json\n+ # so we have to re-raise the same type.\n+ raise json.decoder.JSONDecodeError(err_msg, exc.doc, exc.pos)\n \n def json_encode(self, obj):\n try:\n", "issue": "ValueError when using IPCProvider\n* Version: 4.2.1\r\n* Python: 2.7/3.4/3.5\r\n* OS: osx/linux/win\r\n\r\n\r\n### What was wrong?\r\n\r\nWhen using IPCProvider sometimes ValueError is encountered if the response is incomplete. Looks like in this pull request https://github.com/ethereum/web3.py/pull/785/files the author forgot to add `except ValueError` to IPCProvider https://github.com/ethereum/web3.py/blob/master/web3/providers/ipc.py#L176\r\n\r\n### How can it be fixed?\r\n\r\nAdd `except ValueError` to IPCProvider.make_request https://github.com/ethereum/web3.py/blob/master/web3/providers/ipc.py#L176\r\n\n", "before_files": [{"content": "# String encodings and numeric representations\nimport json\nimport re\n\nfrom eth_utils import (\n add_0x_prefix,\n big_endian_to_int,\n decode_hex,\n encode_hex,\n int_to_big_endian,\n is_boolean,\n is_bytes,\n is_hex,\n is_integer,\n is_list_like,\n remove_0x_prefix,\n to_hex,\n)\n\nfrom web3.utils.abi import (\n is_address_type,\n is_array_type,\n is_bool_type,\n is_bytes_type,\n is_int_type,\n is_string_type,\n is_uint_type,\n size_of_type,\n sub_type_of_array_type,\n)\nfrom web3.utils.toolz import (\n curry,\n)\nfrom web3.utils.validation import (\n assert_one_val,\n validate_abi_type,\n validate_abi_value,\n)\n\n\ndef hex_encode_abi_type(abi_type, value, force_size=None):\n \"\"\"\n Encodes value into a hex string in format of abi_type\n \"\"\"\n validate_abi_type(abi_type)\n validate_abi_value(abi_type, value)\n\n data_size = force_size or size_of_type(abi_type)\n if is_array_type(abi_type):\n sub_type = sub_type_of_array_type(abi_type)\n return \"\".join([remove_0x_prefix(hex_encode_abi_type(sub_type, v, 256)) for v in value])\n elif is_bool_type(abi_type):\n return to_hex_with_size(value, data_size)\n elif is_uint_type(abi_type):\n return to_hex_with_size(value, data_size)\n elif is_int_type(abi_type):\n return to_hex_twos_compliment(value, data_size)\n elif is_address_type(abi_type):\n return pad_hex(value, data_size)\n elif is_bytes_type(abi_type):\n if is_bytes(value):\n return encode_hex(value)\n else:\n return value\n elif is_string_type(abi_type):\n return to_hex(text=value)\n else:\n raise ValueError(\n \"Unsupported ABI type: {0}\".format(abi_type)\n )\n\n\ndef to_hex_twos_compliment(value, bit_size):\n \"\"\"\n Converts integer value to twos compliment hex representation with given bit_size\n \"\"\"\n if value >= 0:\n return to_hex_with_size(value, bit_size)\n\n value = (1 << bit_size) + value\n hex_value = hex(value)\n hex_value = hex_value.rstrip(\"L\")\n return hex_value\n\n\ndef to_hex_with_size(value, bit_size):\n \"\"\"\n Converts a value to hex with given bit_size:\n \"\"\"\n return pad_hex(to_hex(value), bit_size)\n\n\ndef pad_hex(value, bit_size):\n \"\"\"\n Pads a hex string up to the given bit_size\n \"\"\"\n value = remove_0x_prefix(value)\n return add_0x_prefix(value.zfill(int(bit_size / 4)))\n\n\ndef trim_hex(hexstr):\n if hexstr.startswith('0x0'):\n hexstr = re.sub('^0x0+', '0x', hexstr)\n if hexstr == '0x':\n hexstr = '0x0'\n return hexstr\n\n\ndef to_int(value=None, hexstr=None, text=None):\n \"\"\"\n Converts value to it's integer representation.\n\n Values are converted this way:\n\n * value:\n * bytes: big-endian integer\n * bool: True => 1, False => 0\n * hexstr: interpret hex as integer\n * text: interpret as string of digits, like '12' => 12\n \"\"\"\n assert_one_val(value, hexstr=hexstr, text=text)\n\n if hexstr is not None:\n return int(hexstr, 16)\n elif text is not None:\n return int(text)\n elif isinstance(value, bytes):\n return big_endian_to_int(value)\n elif isinstance(value, str):\n raise TypeError(\"Pass in strings with keyword hexstr or text\")\n else:\n return int(value)\n\n\n@curry\ndef pad_bytes(fill_with, num_bytes, unpadded):\n return unpadded.rjust(num_bytes, fill_with)\n\n\nzpad_bytes = pad_bytes(b'\\0')\n\n\ndef to_bytes(primitive=None, hexstr=None, text=None):\n assert_one_val(primitive, hexstr=hexstr, text=text)\n\n if is_boolean(primitive):\n return b'\\x01' if primitive else b'\\x00'\n elif isinstance(primitive, bytes):\n return primitive\n elif is_integer(primitive):\n return to_bytes(hexstr=to_hex(primitive))\n elif hexstr is not None:\n if len(hexstr) % 2:\n hexstr = '0x0' + remove_0x_prefix(hexstr)\n return decode_hex(hexstr)\n elif text is not None:\n return text.encode('utf-8')\n raise TypeError(\"expected an int in first arg, or keyword of hexstr or text\")\n\n\ndef to_text(primitive=None, hexstr=None, text=None):\n assert_one_val(primitive, hexstr=hexstr, text=text)\n\n if hexstr is not None:\n return to_bytes(hexstr=hexstr).decode('utf-8')\n elif text is not None:\n return text\n elif isinstance(primitive, str):\n return to_text(hexstr=primitive)\n elif isinstance(primitive, bytes):\n return primitive.decode('utf-8')\n elif is_integer(primitive):\n byte_encoding = int_to_big_endian(primitive)\n return to_text(byte_encoding)\n raise TypeError(\"Expected an int, bytes or hexstr.\")\n\n\n@curry\ndef text_if_str(to_type, text_or_primitive):\n '''\n Convert to a type, assuming that strings can be only unicode text (not a hexstr)\n\n @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n @param hexstr_or_primitive in bytes, str, or int.\n '''\n if isinstance(text_or_primitive, str):\n (primitive, text) = (None, text_or_primitive)\n else:\n (primitive, text) = (text_or_primitive, None)\n return to_type(primitive, text=text)\n\n\n@curry\ndef hexstr_if_str(to_type, hexstr_or_primitive):\n '''\n Convert to a type, assuming that strings can be only hexstr (not unicode text)\n\n @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n @param text_or_primitive in bytes, str, or int.\n '''\n if isinstance(hexstr_or_primitive, str):\n (primitive, hexstr) = (None, hexstr_or_primitive)\n if remove_0x_prefix(hexstr) and not is_hex(hexstr):\n raise ValueError(\n \"when sending a str, it must be a hex string. Got: {0!r}\".format(\n hexstr_or_primitive,\n )\n )\n else:\n (primitive, hexstr) = (hexstr_or_primitive, None)\n return to_type(primitive, hexstr=hexstr)\n\n\nclass FriendlyJsonSerde:\n '''\n Friendly JSON serializer & deserializer\n\n When encoding or decoding fails, this class collects\n information on which fields failed, to show more\n helpful information in the raised error messages.\n '''\n def _json_mapping_errors(self, mapping):\n for key, val in mapping.items():\n try:\n self._friendly_json_encode(val)\n except TypeError as exc:\n yield \"%r: because (%s)\" % (key, exc)\n\n def _json_list_errors(self, iterable):\n for index, element in enumerate(iterable):\n try:\n self._friendly_json_encode(element)\n except TypeError as exc:\n yield \"%d: because (%s)\" % (index, exc)\n\n def _friendly_json_encode(self, obj):\n try:\n encoded = json.dumps(obj)\n return encoded\n except TypeError as full_exception:\n if hasattr(obj, 'items'):\n item_errors = '; '.join(self._json_mapping_errors(obj))\n raise TypeError(\"dict had unencodable value at keys: {{{}}}\".format(item_errors))\n elif is_list_like(obj):\n element_errors = '; '.join(self._json_list_errors(obj))\n raise TypeError(\"list had unencodable value at index: [{}]\".format(element_errors))\n else:\n raise full_exception\n\n def json_decode(self, json_str):\n try:\n decoded = json.loads(json_str)\n return decoded\n except json.decoder.JSONDecodeError as exc:\n err_msg = 'Could not decode {} because of {}.'.format(repr(json_str), exc)\n raise ValueError(err_msg)\n\n def json_encode(self, obj):\n try:\n return self._friendly_json_encode(obj)\n except TypeError as exc:\n raise TypeError(\"Could not encode to JSON: {}\".format(exc))\n\n\ndef to_4byte_hex(hex_or_str_or_bytes):\n size_of_4bytes = 4 * 8\n byte_str = hexstr_if_str(to_bytes, hex_or_str_or_bytes)\n if len(byte_str) > 4:\n raise ValueError(\n 'expected value of size 4 bytes. Got: %d bytes' % len(byte_str)\n )\n hex_str = encode_hex(byte_str)\n return pad_hex(hex_str, size_of_4bytes)\n", "path": "web3/utils/encoding.py"}], "after_files": [{"content": "# String encodings and numeric representations\nimport json\nimport re\n\nfrom eth_utils import (\n add_0x_prefix,\n big_endian_to_int,\n decode_hex,\n encode_hex,\n int_to_big_endian,\n is_boolean,\n is_bytes,\n is_hex,\n is_integer,\n is_list_like,\n remove_0x_prefix,\n to_hex,\n)\n\nfrom web3.utils.abi import (\n is_address_type,\n is_array_type,\n is_bool_type,\n is_bytes_type,\n is_int_type,\n is_string_type,\n is_uint_type,\n size_of_type,\n sub_type_of_array_type,\n)\nfrom web3.utils.toolz import (\n curry,\n)\nfrom web3.utils.validation import (\n assert_one_val,\n validate_abi_type,\n validate_abi_value,\n)\n\n\ndef hex_encode_abi_type(abi_type, value, force_size=None):\n \"\"\"\n Encodes value into a hex string in format of abi_type\n \"\"\"\n validate_abi_type(abi_type)\n validate_abi_value(abi_type, value)\n\n data_size = force_size or size_of_type(abi_type)\n if is_array_type(abi_type):\n sub_type = sub_type_of_array_type(abi_type)\n return \"\".join([remove_0x_prefix(hex_encode_abi_type(sub_type, v, 256)) for v in value])\n elif is_bool_type(abi_type):\n return to_hex_with_size(value, data_size)\n elif is_uint_type(abi_type):\n return to_hex_with_size(value, data_size)\n elif is_int_type(abi_type):\n return to_hex_twos_compliment(value, data_size)\n elif is_address_type(abi_type):\n return pad_hex(value, data_size)\n elif is_bytes_type(abi_type):\n if is_bytes(value):\n return encode_hex(value)\n else:\n return value\n elif is_string_type(abi_type):\n return to_hex(text=value)\n else:\n raise ValueError(\n \"Unsupported ABI type: {0}\".format(abi_type)\n )\n\n\ndef to_hex_twos_compliment(value, bit_size):\n \"\"\"\n Converts integer value to twos compliment hex representation with given bit_size\n \"\"\"\n if value >= 0:\n return to_hex_with_size(value, bit_size)\n\n value = (1 << bit_size) + value\n hex_value = hex(value)\n hex_value = hex_value.rstrip(\"L\")\n return hex_value\n\n\ndef to_hex_with_size(value, bit_size):\n \"\"\"\n Converts a value to hex with given bit_size:\n \"\"\"\n return pad_hex(to_hex(value), bit_size)\n\n\ndef pad_hex(value, bit_size):\n \"\"\"\n Pads a hex string up to the given bit_size\n \"\"\"\n value = remove_0x_prefix(value)\n return add_0x_prefix(value.zfill(int(bit_size / 4)))\n\n\ndef trim_hex(hexstr):\n if hexstr.startswith('0x0'):\n hexstr = re.sub('^0x0+', '0x', hexstr)\n if hexstr == '0x':\n hexstr = '0x0'\n return hexstr\n\n\ndef to_int(value=None, hexstr=None, text=None):\n \"\"\"\n Converts value to it's integer representation.\n\n Values are converted this way:\n\n * value:\n * bytes: big-endian integer\n * bool: True => 1, False => 0\n * hexstr: interpret hex as integer\n * text: interpret as string of digits, like '12' => 12\n \"\"\"\n assert_one_val(value, hexstr=hexstr, text=text)\n\n if hexstr is not None:\n return int(hexstr, 16)\n elif text is not None:\n return int(text)\n elif isinstance(value, bytes):\n return big_endian_to_int(value)\n elif isinstance(value, str):\n raise TypeError(\"Pass in strings with keyword hexstr or text\")\n else:\n return int(value)\n\n\n@curry\ndef pad_bytes(fill_with, num_bytes, unpadded):\n return unpadded.rjust(num_bytes, fill_with)\n\n\nzpad_bytes = pad_bytes(b'\\0')\n\n\ndef to_bytes(primitive=None, hexstr=None, text=None):\n assert_one_val(primitive, hexstr=hexstr, text=text)\n\n if is_boolean(primitive):\n return b'\\x01' if primitive else b'\\x00'\n elif isinstance(primitive, bytes):\n return primitive\n elif is_integer(primitive):\n return to_bytes(hexstr=to_hex(primitive))\n elif hexstr is not None:\n if len(hexstr) % 2:\n hexstr = '0x0' + remove_0x_prefix(hexstr)\n return decode_hex(hexstr)\n elif text is not None:\n return text.encode('utf-8')\n raise TypeError(\"expected an int in first arg, or keyword of hexstr or text\")\n\n\ndef to_text(primitive=None, hexstr=None, text=None):\n assert_one_val(primitive, hexstr=hexstr, text=text)\n\n if hexstr is not None:\n return to_bytes(hexstr=hexstr).decode('utf-8')\n elif text is not None:\n return text\n elif isinstance(primitive, str):\n return to_text(hexstr=primitive)\n elif isinstance(primitive, bytes):\n return primitive.decode('utf-8')\n elif is_integer(primitive):\n byte_encoding = int_to_big_endian(primitive)\n return to_text(byte_encoding)\n raise TypeError(\"Expected an int, bytes or hexstr.\")\n\n\n@curry\ndef text_if_str(to_type, text_or_primitive):\n '''\n Convert to a type, assuming that strings can be only unicode text (not a hexstr)\n\n @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n @param hexstr_or_primitive in bytes, str, or int.\n '''\n if isinstance(text_or_primitive, str):\n (primitive, text) = (None, text_or_primitive)\n else:\n (primitive, text) = (text_or_primitive, None)\n return to_type(primitive, text=text)\n\n\n@curry\ndef hexstr_if_str(to_type, hexstr_or_primitive):\n '''\n Convert to a type, assuming that strings can be only hexstr (not unicode text)\n\n @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n @param text_or_primitive in bytes, str, or int.\n '''\n if isinstance(hexstr_or_primitive, str):\n (primitive, hexstr) = (None, hexstr_or_primitive)\n if remove_0x_prefix(hexstr) and not is_hex(hexstr):\n raise ValueError(\n \"when sending a str, it must be a hex string. Got: {0!r}\".format(\n hexstr_or_primitive,\n )\n )\n else:\n (primitive, hexstr) = (hexstr_or_primitive, None)\n return to_type(primitive, hexstr=hexstr)\n\n\nclass FriendlyJsonSerde:\n '''\n Friendly JSON serializer & deserializer\n\n When encoding or decoding fails, this class collects\n information on which fields failed, to show more\n helpful information in the raised error messages.\n '''\n def _json_mapping_errors(self, mapping):\n for key, val in mapping.items():\n try:\n self._friendly_json_encode(val)\n except TypeError as exc:\n yield \"%r: because (%s)\" % (key, exc)\n\n def _json_list_errors(self, iterable):\n for index, element in enumerate(iterable):\n try:\n self._friendly_json_encode(element)\n except TypeError as exc:\n yield \"%d: because (%s)\" % (index, exc)\n\n def _friendly_json_encode(self, obj):\n try:\n encoded = json.dumps(obj)\n return encoded\n except TypeError as full_exception:\n if hasattr(obj, 'items'):\n item_errors = '; '.join(self._json_mapping_errors(obj))\n raise TypeError(\"dict had unencodable value at keys: {{{}}}\".format(item_errors))\n elif is_list_like(obj):\n element_errors = '; '.join(self._json_list_errors(obj))\n raise TypeError(\"list had unencodable value at index: [{}]\".format(element_errors))\n else:\n raise full_exception\n\n def json_decode(self, json_str):\n try:\n decoded = json.loads(json_str)\n return decoded\n except json.decoder.JSONDecodeError as exc:\n err_msg = 'Could not decode {} because of {}.'.format(repr(json_str), exc)\n # Calling code may rely on catching JSONDecodeError to recognize bad json\n # so we have to re-raise the same type.\n raise json.decoder.JSONDecodeError(err_msg, exc.doc, exc.pos)\n\n def json_encode(self, obj):\n try:\n return self._friendly_json_encode(obj)\n except TypeError as exc:\n raise TypeError(\"Could not encode to JSON: {}\".format(exc))\n\n\ndef to_4byte_hex(hex_or_str_or_bytes):\n size_of_4bytes = 4 * 8\n byte_str = hexstr_if_str(to_bytes, hex_or_str_or_bytes)\n if len(byte_str) > 4:\n raise ValueError(\n 'expected value of size 4 bytes. Got: %d bytes' % len(byte_str)\n )\n hex_str = encode_hex(byte_str)\n return pad_hex(hex_str, size_of_4bytes)\n", "path": "web3/utils/encoding.py"}]}
3,199
152
gh_patches_debug_9633
rasdani/github-patches
git_diff
acl-org__acl-anthology-433
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create shortened volume links @mjpost https://aclweb.org/anthology/volumes/sem-2012-the-first-joint-conference-on-lexical-and-computational-semantics-volume-1-proceedings-of-the-main-conference-and-the-shared-task-and-volume-2-proceedings-of-the-sixth-international-workshop-on-semantic-evaluation-semeval-2012/ The URL of the PDF is: https://www.aclweb.org/anthology/S12-1 For a paper: https://aclweb.org/anthology/papers/S/S12/S12-1000/ (the title is the same, but the content not). The PDF: https://www.aclweb.org/anthology/S12-1000 It would be much easier to reference the first link if it was https://aclweb.org/anthology/papers/S/S12/S12-1/. _Originally posted by @Aspie96 in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-504191669_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bin/create_hugo_pages.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2019 Marcel Bollmann <marcel@bollmann.me> 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug] 19 20 Creates page stubs for the full anthology based on the YAML data files. 21 22 This script can only be run after create_hugo_yaml.py! 23 24 Options: 25 --dir=DIR Hugo project directory. [default: {scriptdir}/../build/] 26 --debug Output debug-level log messages. 27 -c, --clean Delete existing files in target directory before generation. 28 -h, --help Display this helpful text. 29 """ 30 31 from docopt import docopt 32 from glob import glob 33 from slugify import slugify 34 from tqdm import tqdm 35 import logging as log 36 import os 37 import shutil 38 import yaml 39 40 try: 41 from yaml import CLoader as Loader 42 except ImportError: 43 from yaml import Loader 44 45 from anthology.utils import SeverityTracker 46 47 48 def check_directory(cdir, clean=False): 49 if not os.path.isdir(cdir) and not os.path.exists(cdir): 50 os.mkdir(cdir) 51 return True 52 entries = os.listdir(cdir) 53 if "_index.md" in entries: 54 entries.remove("_index.md") 55 if entries and not clean: 56 log.critical("Directory already exists and has content files: {}".format(cdir)) 57 log.info( 58 "Call this script with the -c/--clean flag to automatically DELETE existing files" 59 ) 60 return False 61 for entry in entries: 62 entry = "{}/{}".format(cdir, entry) 63 if os.path.isdir(entry): 64 shutil.rmtree(entry) 65 else: 66 os.remove(entry) 67 return True 68 69 70 def create_papers(srcdir, clean=False): 71 """Creates page stubs for all papers in the Anthology.""" 72 log.info("Creating stubs for papers...") 73 if not check_directory("{}/content/papers".format(srcdir), clean=clean): 74 return 75 76 # Go through all paper volumes 77 for yamlfile in tqdm(glob("{}/data/papers/*.yaml".format(srcdir))): 78 log.debug("Processing {}".format(yamlfile)) 79 with open(yamlfile, "r") as f: 80 data = yaml.load(f, Loader=Loader) 81 # Create a paper stub for each entry in the volume 82 for anthology_id, entry in data.items(): 83 paper_dir = "{}/content/papers/{}/{}".format( 84 srcdir, anthology_id[0], anthology_id[:3] 85 ) 86 if not os.path.exists(paper_dir): 87 os.makedirs(paper_dir) 88 with open("{}/{}.md".format(paper_dir, anthology_id), "w") as f: 89 print("---", file=f) 90 yaml.dump( 91 {"anthology_id": anthology_id, "title": entry["title"]}, 92 default_flow_style=False, 93 stream=f, 94 ) 95 print("---", file=f) 96 97 98 def create_volumes(srcdir, clean=False): 99 """Creates page stubs for all proceedings volumes in the Anthology.""" 100 log.info("Creating stubs for volumes...") 101 if not check_directory("{}/content/volumes".format(srcdir), clean=clean): 102 return 103 104 yamlfile = "{}/data/volumes.yaml".format(srcdir) 105 log.debug("Processing {}".format(yamlfile)) 106 with open(yamlfile, "r") as f: 107 data = yaml.load(f, Loader=Loader) 108 # Create a paper stub for each proceedings volume 109 for anthology_id, entry in data.items(): 110 with open("{}/content/volumes/{}.md".format(srcdir, anthology_id), "w") as f: 111 print("---", file=f) 112 yaml.dump( 113 { 114 "anthology_id": anthology_id, 115 "title": entry["title"], 116 "slug": slugify(entry["title"]), 117 }, 118 default_flow_style=False, 119 stream=f, 120 ) 121 print("---", file=f) 122 123 return data 124 125 126 def create_people(srcdir, clean=False): 127 """Creates page stubs for all authors/editors in the Anthology.""" 128 log.info("Creating stubs for people...") 129 if not check_directory("{}/content/people".format(srcdir), clean=clean): 130 return 131 132 for yamlfile in tqdm(glob("{}/data/people/*.yaml".format(srcdir))): 133 log.debug("Processing {}".format(yamlfile)) 134 with open(yamlfile, "r") as f: 135 data = yaml.load(f, Loader=Loader) 136 # Create a page stub for each person 137 for name, entry in data.items(): 138 person_dir = "{}/content/people/{}".format(srcdir, name[0]) 139 if not os.path.exists(person_dir): 140 os.makedirs(person_dir) 141 yaml_data = { 142 "name": name, 143 "title": entry["full"], 144 "lastname": entry["last"], 145 } 146 with open("{}/{}.md".format(person_dir, name), "w") as f: 147 print("---", file=f) 148 # "lastname" is dumped to allow sorting by it in Hugo 149 yaml.dump(yaml_data, default_flow_style=False, stream=f) 150 print("---", file=f) 151 152 return data 153 154 155 def create_venues_and_events(srcdir, clean=False): 156 """Creates page stubs for all venues and events in the Anthology.""" 157 yamlfile = "{}/data/venues.yaml".format(srcdir) 158 log.debug("Processing {}".format(yamlfile)) 159 with open(yamlfile, "r") as f: 160 data = yaml.load(f, Loader=Loader) 161 162 log.info("Creating stubs for venues...") 163 if not check_directory("{}/content/venues".format(srcdir), clean=clean): 164 return 165 # Create a paper stub for each venue (e.g. ACL) 166 for venue, venue_data in data.items(): 167 venue_str = venue_data["slug"] 168 with open("{}/content/venues/{}.md".format(srcdir, venue_str), "w") as f: 169 print("---", file=f) 170 yaml_data = {"venue": venue, "title": venue_data["name"]} 171 if venue_data["is_toplevel"]: 172 main_letter = venue_data["main_letter"] 173 yaml_data["aliases"] = ["/papers/{}/".format(main_letter)] 174 yaml.dump(yaml_data, default_flow_style=False, stream=f) 175 print("---", file=f) 176 177 log.info("Creating stubs for events...") 178 if not check_directory("{}/content/events".format(srcdir), clean=clean): 179 return 180 # Create a paper stub for each event (= venue + year, e.g. ACL 2018) 181 for venue, venue_data in data.items(): 182 venue_str = venue_data["slug"] 183 for year in venue_data["volumes_by_year"]: 184 with open( 185 "{}/content/events/{}-{}.md".format(srcdir, venue_str, year), "w" 186 ) as f: 187 print("---", file=f) 188 yaml_data = { 189 "venue": venue, 190 "year": year, 191 "title": "{} ({})".format(venue_data["name"], year), 192 } 193 if venue_data["is_toplevel"]: 194 main_letter = venue_data["main_letter"] 195 main_prefix = main_letter + year[-2:] # e.g., P05 196 yaml_data["aliases"] = [ 197 "/papers/{}/{}/".format(main_letter, main_prefix) 198 ] 199 yaml.dump(yaml_data, default_flow_style=False, stream=f) 200 print("---", file=f) 201 202 203 def create_sigs(srcdir, clean=False): 204 """Creates page stubs for all SIGs in the Anthology.""" 205 yamlfile = "{}/data/sigs.yaml".format(srcdir) 206 log.debug("Processing {}".format(yamlfile)) 207 with open(yamlfile, "r") as f: 208 data = yaml.load(f, Loader=Loader) 209 210 log.info("Creating stubs for SIGs...") 211 if not check_directory("{}/content/sigs".format(srcdir), clean=clean): 212 return 213 # Create a paper stub for each SIGS (e.g. SIGMORPHON) 214 for sig, sig_data in data.items(): 215 sig_str = sig_data["slug"] 216 with open("{}/content/sigs/{}.md".format(srcdir, sig_str), "w") as f: 217 print("---", file=f) 218 yaml.dump( 219 { 220 "acronym": sig, 221 "short_acronym": sig[3:] if sig.startswith("SIG") else sig, 222 "title": sig_data["name"], 223 }, 224 default_flow_style=False, 225 stream=f, 226 ) 227 print("---", file=f) 228 229 230 if __name__ == "__main__": 231 args = docopt(__doc__) 232 scriptdir = os.path.dirname(os.path.abspath(__file__)) 233 if "{scriptdir}" in args["--dir"]: 234 args["--dir"] = args["--dir"].format(scriptdir=scriptdir) 235 dir_ = os.path.abspath(args["--dir"]) 236 237 log_level = log.DEBUG if args["--debug"] else log.INFO 238 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level) 239 tracker = SeverityTracker() 240 log.getLogger().addHandler(tracker) 241 242 create_papers(dir_, clean=args["--clean"]) 243 create_volumes(dir_, clean=args["--clean"]) 244 create_people(dir_, clean=args["--clean"]) 245 create_venues_and_events(dir_, clean=args["--clean"]) 246 create_sigs(dir_, clean=args["--clean"]) 247 248 if tracker.highest >= log.ERROR: 249 exit(1) 250 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py --- a/bin/create_hugo_pages.py +++ b/bin/create_hugo_pages.py @@ -113,7 +113,12 @@ { "anthology_id": anthology_id, "title": entry["title"], - "slug": slugify(entry["title"]), + "aliases": [ + slugify(entry["title"]), + "/papers/{}/{}/{}/".format( + anthology_id[0], anthology_id[:3], anthology_id + ), + ], }, default_flow_style=False, stream=f,
{"golden_diff": "diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py\n--- a/bin/create_hugo_pages.py\n+++ b/bin/create_hugo_pages.py\n@@ -113,7 +113,12 @@\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n- \"slug\": slugify(entry[\"title\"]),\n+ \"aliases\": [\n+ slugify(entry[\"title\"]),\n+ \"/papers/{}/{}/{}/\".format(\n+ anthology_id[0], anthology_id[:3], anthology_id\n+ ),\n+ ],\n },\n default_flow_style=False,\n stream=f,\n", "issue": "Create shortened volume links\n@mjpost https://aclweb.org/anthology/volumes/sem-2012-the-first-joint-conference-on-lexical-and-computational-semantics-volume-1-proceedings-of-the-main-conference-and-the-shared-task-and-volume-2-proceedings-of-the-sixth-international-workshop-on-semantic-evaluation-semeval-2012/\r\n\r\nThe URL of the PDF is:\r\nhttps://www.aclweb.org/anthology/S12-1\r\n\r\nFor a paper:\r\nhttps://aclweb.org/anthology/papers/S/S12/S12-1000/\r\n(the title is the same, but the content not).\r\n\r\nThe PDF:\r\nhttps://www.aclweb.org/anthology/S12-1000\r\n\r\nIt would be much easier to reference the first link if it was https://aclweb.org/anthology/papers/S/S12/S12-1/.\r\n\r\n_Originally posted by @Aspie96 in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-504191669_\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}/{}\".format(\n srcdir, anthology_id[0], anthology_id[:3]\n )\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"slug\": slugify(entry[\"title\"]),\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\n \"name\": name,\n \"title\": entry[\"full\"],\n \"lastname\": entry[\"last\"],\n }\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n yaml_data[\"aliases\"] = [\"/papers/{}/\".format(main_letter)]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n main_prefix = main_letter + year[-2:] # e.g., P05\n yaml_data[\"aliases\"] = [\n \"/papers/{}/{}/\".format(main_letter, main_prefix)\n ]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}/{}\".format(\n srcdir, anthology_id[0], anthology_id[:3]\n )\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n slugify(entry[\"title\"]),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n ],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\n \"name\": name,\n \"title\": entry[\"full\"],\n \"lastname\": entry[\"last\"],\n }\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n yaml_data[\"aliases\"] = [\"/papers/{}/\".format(main_letter)]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n main_prefix = main_letter + year[-2:] # e.g., P05\n yaml_data[\"aliases\"] = [\n \"/papers/{}/{}/\".format(main_letter, main_prefix)\n ]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}]}
3,330
139
gh_patches_debug_19359
rasdani/github-patches
git_diff
fedora-infra__bodhi-4115
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add the possibility to query updates by releases in graphql We currently have a getUpdates query in graphql that let us query updates using certain attributes, we should add the possibility to give a release name and get all the updates from a release. For example ```query {getUpdates(releaseName: "F33") {alias}}``` For this we will most likely need to use a join query in the graphlq resolve function (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/services/graphql.py#L132) to exploit the relationship between Updates and release. Some hints https://stackoverflow.com/questions/8561470/sqlalchemy-filtering-by-relationship-attribute --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/server/services/graphql.py` Content: ``` 1 # Copyright © 2020 Red Hat Inc., and others. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """Defines API endpoints related to GraphQL objects.""" 19 import graphene 20 from cornice import Service 21 from webob_graphql import serve_graphql_request 22 23 from bodhi.server.config import config 24 from bodhi.server.graphql_schemas import Release, ReleaseModel, Update, UpdateModel 25 26 graphql = Service(name='graphql', path='/graphql', description='graphql service') 27 28 29 @graphql.get() 30 @graphql.post() 31 def graphql_get(request): 32 """ 33 Perform a GET request. 34 35 Args: 36 request (pyramid.Request): The current request. 37 Returns: 38 The GraphQL response to the request. 39 """ 40 context = {'session': request.session} 41 return serve_graphql_request( 42 request, schema, graphiql_enabled=config.get('graphiql_enabled'), 43 context_value=context) 44 45 46 class Query(graphene.ObjectType): 47 """Allow querying objects.""" 48 49 allReleases = graphene.List(Release) 50 getReleases = graphene.Field( 51 lambda: graphene.List(Release), name=graphene.String(), 52 id_prefix=graphene.String(), composed_by_bodhi=graphene.Boolean(), 53 state=graphene.String()) 54 55 getUpdates = graphene.Field( 56 lambda: graphene.List(Update), stable_karma=graphene.Int(), 57 stable_days=graphene.Int(), unstable_karma=graphene.Int(), 58 status=graphene.String(), request=graphene.String(), 59 pushed=graphene.Boolean(), critpath=graphene.Boolean(), 60 date_approved=graphene.String(), alias=graphene.String(), 61 user_id=graphene.Int()) 62 63 def resolve_allReleases(self, info): 64 """Answer Queries by fetching data from the Schema.""" 65 query = Release.get_query(info) # SQLAlchemy query 66 return query.all() 67 68 def resolve_getReleases(self, info, **args): 69 """Answer Release queries with a given argument.""" 70 query = Release.get_query(info) 71 72 id_prefix = args.get("id_prefix") 73 if id_prefix is not None: 74 query = query.filter(ReleaseModel.id_prefix == id_prefix) 75 76 name = args.get("name") 77 if name is not None: 78 query = query.filter(ReleaseModel.name == name) 79 80 composed_by_bodhi = args.get("composed_by_bodhi") 81 if composed_by_bodhi is not None: 82 query = query.filter(ReleaseModel.composed_by_bodhi == composed_by_bodhi) 83 84 state = args.get("state") 85 if state is not None: 86 query = query.filter(ReleaseModel.state == state) 87 88 return query.all() 89 90 def resolve_getUpdates(self, info, **args): 91 """Answer Release queries with a given argument.""" 92 query = Update.get_query(info) 93 94 stable_karma = args.get("stable_karma") 95 if stable_karma is not None: 96 query = query.filter(UpdateModel.stable_karma == stable_karma) 97 98 stable_days = args.get("stable_days") 99 if stable_days is not None: 100 query = query.filter(UpdateModel.stable_days == stable_days) 101 102 unstable_karma = args.get("unstable_karma") 103 if unstable_karma is not None: 104 query = query.filter(UpdateModel.unstable_karma == unstable_karma) 105 106 status = args.get("status") 107 if status is not None: 108 query = query.filter(UpdateModel.status == status) 109 110 request = args.get("request") 111 if request is not None: 112 query = query.filter(UpdateModel.request == request) 113 114 pushed = args.get("pushed") 115 if pushed is not None: 116 query = query.filter(UpdateModel.pushed == pushed) 117 118 critpath = args.get("critpath") 119 if critpath is not None: 120 query = query.filter(UpdateModel.critpath == critpath) 121 122 date_approved = args.get("date_approved") 123 if date_approved is not None: 124 query = query.filter(UpdateModel.date_approved == date_approved) 125 126 alias = args.get("alias") 127 if alias is not None: 128 query = query.filter(UpdateModel.alias == alias) 129 130 user_id = args.get("user_id") 131 if user_id is not None: 132 query = query.filter(UpdateModel.user_id == user_id) 133 134 return query.all() 135 136 137 schema = graphene.Schema(query=Query) 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/server/services/graphql.py b/bodhi/server/services/graphql.py --- a/bodhi/server/services/graphql.py +++ b/bodhi/server/services/graphql.py @@ -58,7 +58,7 @@ status=graphene.String(), request=graphene.String(), pushed=graphene.Boolean(), critpath=graphene.Boolean(), date_approved=graphene.String(), alias=graphene.String(), - user_id=graphene.Int()) + user_id=graphene.Int(), release_name=graphene.String()) def resolve_allReleases(self, info): """Answer Queries by fetching data from the Schema.""" @@ -131,6 +131,10 @@ if user_id is not None: query = query.filter(UpdateModel.user_id == user_id) + release_name = args.get("release_name") + if release_name is not None: + query = query.join(UpdateModel.release).filter(ReleaseModel.name == release_name) + return query.all()
{"golden_diff": "diff --git a/bodhi/server/services/graphql.py b/bodhi/server/services/graphql.py\n--- a/bodhi/server/services/graphql.py\n+++ b/bodhi/server/services/graphql.py\n@@ -58,7 +58,7 @@\n status=graphene.String(), request=graphene.String(),\n pushed=graphene.Boolean(), critpath=graphene.Boolean(),\n date_approved=graphene.String(), alias=graphene.String(),\n- user_id=graphene.Int())\n+ user_id=graphene.Int(), release_name=graphene.String())\n \n def resolve_allReleases(self, info):\n \"\"\"Answer Queries by fetching data from the Schema.\"\"\"\n@@ -131,6 +131,10 @@\n if user_id is not None:\n query = query.filter(UpdateModel.user_id == user_id)\n \n+ release_name = args.get(\"release_name\")\n+ if release_name is not None:\n+ query = query.join(UpdateModel.release).filter(ReleaseModel.name == release_name)\n+\n return query.all()\n", "issue": "Add the possibility to query updates by releases in graphql\nWe currently have a getUpdates query in graphql that let us query updates using certain attributes, we should add the possibility to give a release name and get all the updates from a release.\r\n\r\nFor example ```query {getUpdates(releaseName: \"F33\") {alias}}```\r\n\r\nFor this we will most likely need to use a join query in the graphlq resolve function (https://github.com/fedora-infra/bodhi/blob/develop/bodhi/server/services/graphql.py#L132) to exploit the relationship between Updates and release.\r\n\r\nSome hints https://stackoverflow.com/questions/8561470/sqlalchemy-filtering-by-relationship-attribute\n", "before_files": [{"content": "# Copyright \u00a9 2020 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Defines API endpoints related to GraphQL objects.\"\"\"\nimport graphene\nfrom cornice import Service\nfrom webob_graphql import serve_graphql_request\n\nfrom bodhi.server.config import config\nfrom bodhi.server.graphql_schemas import Release, ReleaseModel, Update, UpdateModel\n\ngraphql = Service(name='graphql', path='/graphql', description='graphql service')\n\n\n@graphql.get()\n@graphql.post()\ndef graphql_get(request):\n \"\"\"\n Perform a GET request.\n\n Args:\n request (pyramid.Request): The current request.\n Returns:\n The GraphQL response to the request.\n \"\"\"\n context = {'session': request.session}\n return serve_graphql_request(\n request, schema, graphiql_enabled=config.get('graphiql_enabled'),\n context_value=context)\n\n\nclass Query(graphene.ObjectType):\n \"\"\"Allow querying objects.\"\"\"\n\n allReleases = graphene.List(Release)\n getReleases = graphene.Field(\n lambda: graphene.List(Release), name=graphene.String(),\n id_prefix=graphene.String(), composed_by_bodhi=graphene.Boolean(),\n state=graphene.String())\n\n getUpdates = graphene.Field(\n lambda: graphene.List(Update), stable_karma=graphene.Int(),\n stable_days=graphene.Int(), unstable_karma=graphene.Int(),\n status=graphene.String(), request=graphene.String(),\n pushed=graphene.Boolean(), critpath=graphene.Boolean(),\n date_approved=graphene.String(), alias=graphene.String(),\n user_id=graphene.Int())\n\n def resolve_allReleases(self, info):\n \"\"\"Answer Queries by fetching data from the Schema.\"\"\"\n query = Release.get_query(info) # SQLAlchemy query\n return query.all()\n\n def resolve_getReleases(self, info, **args):\n \"\"\"Answer Release queries with a given argument.\"\"\"\n query = Release.get_query(info)\n\n id_prefix = args.get(\"id_prefix\")\n if id_prefix is not None:\n query = query.filter(ReleaseModel.id_prefix == id_prefix)\n\n name = args.get(\"name\")\n if name is not None:\n query = query.filter(ReleaseModel.name == name)\n\n composed_by_bodhi = args.get(\"composed_by_bodhi\")\n if composed_by_bodhi is not None:\n query = query.filter(ReleaseModel.composed_by_bodhi == composed_by_bodhi)\n\n state = args.get(\"state\")\n if state is not None:\n query = query.filter(ReleaseModel.state == state)\n\n return query.all()\n\n def resolve_getUpdates(self, info, **args):\n \"\"\"Answer Release queries with a given argument.\"\"\"\n query = Update.get_query(info)\n\n stable_karma = args.get(\"stable_karma\")\n if stable_karma is not None:\n query = query.filter(UpdateModel.stable_karma == stable_karma)\n\n stable_days = args.get(\"stable_days\")\n if stable_days is not None:\n query = query.filter(UpdateModel.stable_days == stable_days)\n\n unstable_karma = args.get(\"unstable_karma\")\n if unstable_karma is not None:\n query = query.filter(UpdateModel.unstable_karma == unstable_karma)\n\n status = args.get(\"status\")\n if status is not None:\n query = query.filter(UpdateModel.status == status)\n\n request = args.get(\"request\")\n if request is not None:\n query = query.filter(UpdateModel.request == request)\n\n pushed = args.get(\"pushed\")\n if pushed is not None:\n query = query.filter(UpdateModel.pushed == pushed)\n\n critpath = args.get(\"critpath\")\n if critpath is not None:\n query = query.filter(UpdateModel.critpath == critpath)\n\n date_approved = args.get(\"date_approved\")\n if date_approved is not None:\n query = query.filter(UpdateModel.date_approved == date_approved)\n\n alias = args.get(\"alias\")\n if alias is not None:\n query = query.filter(UpdateModel.alias == alias)\n\n user_id = args.get(\"user_id\")\n if user_id is not None:\n query = query.filter(UpdateModel.user_id == user_id)\n\n return query.all()\n\n\nschema = graphene.Schema(query=Query)\n", "path": "bodhi/server/services/graphql.py"}], "after_files": [{"content": "# Copyright \u00a9 2020 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Defines API endpoints related to GraphQL objects.\"\"\"\nimport graphene\nfrom cornice import Service\nfrom webob_graphql import serve_graphql_request\n\nfrom bodhi.server.config import config\nfrom bodhi.server.graphql_schemas import Release, ReleaseModel, Update, UpdateModel\n\ngraphql = Service(name='graphql', path='/graphql', description='graphql service')\n\n\n@graphql.get()\n@graphql.post()\ndef graphql_get(request):\n \"\"\"\n Perform a GET request.\n\n Args:\n request (pyramid.Request): The current request.\n Returns:\n The GraphQL response to the request.\n \"\"\"\n context = {'session': request.session}\n return serve_graphql_request(\n request, schema, graphiql_enabled=config.get('graphiql_enabled'),\n context_value=context)\n\n\nclass Query(graphene.ObjectType):\n \"\"\"Allow querying objects.\"\"\"\n\n allReleases = graphene.List(Release)\n getReleases = graphene.Field(\n lambda: graphene.List(Release), name=graphene.String(),\n id_prefix=graphene.String(), composed_by_bodhi=graphene.Boolean(),\n state=graphene.String())\n\n getUpdates = graphene.Field(\n lambda: graphene.List(Update), stable_karma=graphene.Int(),\n stable_days=graphene.Int(), unstable_karma=graphene.Int(),\n status=graphene.String(), request=graphene.String(),\n pushed=graphene.Boolean(), critpath=graphene.Boolean(),\n date_approved=graphene.String(), alias=graphene.String(),\n user_id=graphene.Int(), release_name=graphene.String())\n\n def resolve_allReleases(self, info):\n \"\"\"Answer Queries by fetching data from the Schema.\"\"\"\n query = Release.get_query(info) # SQLAlchemy query\n return query.all()\n\n def resolve_getReleases(self, info, **args):\n \"\"\"Answer Release queries with a given argument.\"\"\"\n query = Release.get_query(info)\n\n id_prefix = args.get(\"id_prefix\")\n if id_prefix is not None:\n query = query.filter(ReleaseModel.id_prefix == id_prefix)\n\n name = args.get(\"name\")\n if name is not None:\n query = query.filter(ReleaseModel.name == name)\n\n composed_by_bodhi = args.get(\"composed_by_bodhi\")\n if composed_by_bodhi is not None:\n query = query.filter(ReleaseModel.composed_by_bodhi == composed_by_bodhi)\n\n state = args.get(\"state\")\n if state is not None:\n query = query.filter(ReleaseModel.state == state)\n\n return query.all()\n\n def resolve_getUpdates(self, info, **args):\n \"\"\"Answer Release queries with a given argument.\"\"\"\n query = Update.get_query(info)\n\n stable_karma = args.get(\"stable_karma\")\n if stable_karma is not None:\n query = query.filter(UpdateModel.stable_karma == stable_karma)\n\n stable_days = args.get(\"stable_days\")\n if stable_days is not None:\n query = query.filter(UpdateModel.stable_days == stable_days)\n\n unstable_karma = args.get(\"unstable_karma\")\n if unstable_karma is not None:\n query = query.filter(UpdateModel.unstable_karma == unstable_karma)\n\n status = args.get(\"status\")\n if status is not None:\n query = query.filter(UpdateModel.status == status)\n\n request = args.get(\"request\")\n if request is not None:\n query = query.filter(UpdateModel.request == request)\n\n pushed = args.get(\"pushed\")\n if pushed is not None:\n query = query.filter(UpdateModel.pushed == pushed)\n\n critpath = args.get(\"critpath\")\n if critpath is not None:\n query = query.filter(UpdateModel.critpath == critpath)\n\n date_approved = args.get(\"date_approved\")\n if date_approved is not None:\n query = query.filter(UpdateModel.date_approved == date_approved)\n\n alias = args.get(\"alias\")\n if alias is not None:\n query = query.filter(UpdateModel.alias == alias)\n\n user_id = args.get(\"user_id\")\n if user_id is not None:\n query = query.filter(UpdateModel.user_id == user_id)\n\n release_name = args.get(\"release_name\")\n if release_name is not None:\n query = query.join(UpdateModel.release).filter(ReleaseModel.name == release_name)\n\n return query.all()\n\n\nschema = graphene.Schema(query=Query)\n", "path": "bodhi/server/services/graphql.py"}]}
1,827
224
gh_patches_debug_6998
rasdani/github-patches
git_diff
microsoft__hi-ml-80
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Handle the "v" in version numbering Our code in `setup.py` will trigger with new tags. `setuptools.setup` will reject tags that are not release versions but we could do more to make that explicit by checking for the leading "v". Also when we tag releases as, say, "v0.1.1" the leading "v" is carried through `setuptools.setup` so it becomes part of the pip test download > Successfully installed pip-21.2.4 > Collecting hi-ml==v0.1.0 > Downloading hi_ml-0.1.0-py3-none-any.whl (25 kB) (from [here](https://github.com/microsoft/hi-ml/runs/3362573497?check_suite_focus=true#step:6:29)) This works, but it would be cleaner to submit the version number using the public version identifier format mandated in [PEP 440](https://www.python.org/dev/peps/pep-0440/#public-version-identifiers), i.e. without the leading "v" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 6 """A setuptools based setup module. 7 8 See: 9 https://packaging.python.org/guides/distributing-packages-using-setuptools/ 10 """ 11 12 import os 13 from math import floor 14 import pathlib 15 from random import random 16 from setuptools import setup, find_packages # type: ignore 17 18 19 here = pathlib.Path(__file__).parent.resolve() 20 21 # Get the long description from the README file 22 long_description = (here / 'README.md').read_text(encoding='utf-8') 23 24 version = '' 25 26 # If running from a GitHub Action then a standard set of environment variables will be 27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables). 28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow. 29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'. 30 # Extract this tag and use it as a version string 31 # See also: 32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ 33 # https://github.com/pypa/gh-action-pypi-publish 34 GITHUB_REF_TAG_COMMIT = 'refs/tags/' 35 36 github_ref = os.getenv('GITHUB_REF') 37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT): 38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):] 39 40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated. 41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be 42 # '0.1.2.post124'. Although this is discouraged, see: 43 # https://www.python.org/dev/peps/pep-0440/#post-releases 44 # it is necessary here to avoid duplicate packages in Test.PyPI. 45 if not version: 46 # TODO: Replace this with more principled package version management for the package wheels built during local test 47 # runs, one which circumvents AzureML's apparent package caching: 48 build_number = os.getenv('GITHUB_RUN_NUMBER') 49 if build_number: 50 version = '0.1.0.post' + build_number 51 else: 52 default_random_version_number = floor(random() * 10_000_000_000) 53 version = f'0.1.0.post{str(default_random_version_number)}' 54 55 (here / 'latest_version.txt').write_text(version) 56 57 # Read run_requirements.txt to get install_requires 58 install_requires = (here / 'run_requirements.txt').read_text().split("\n") 59 # Remove any whitespace and blank lines 60 install_requires = [line.strip() for line in install_requires if line.strip()] 61 62 description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace' 63 64 setup( 65 name='hi-ml', 66 version=version, 67 description=description, 68 long_description=long_description, 69 long_description_content_type='text/markdown', 70 url='https://github.com/microsoft/hi-ml', 71 author="Microsoft Research Cambridge InnerEye Team ", 72 author_email="innereyedev@microsoft.com", 73 classifiers=[ 74 'Development Status :: 3 - Alpha', 75 'Intended Audience :: Science/Research', 76 "Topic :: Scientific/Engineering :: Medical Science Apps.", 77 'License :: OSI Approved :: MIT License', 78 'Programming Language :: Python :: 3.7' 79 ], 80 keywords='InnerEye, HealthIntelligence, AzureML', 81 license='MIT License', 82 packages=find_packages(where="src"), 83 package_dir={"": "src"}, 84 include_package_data=True, 85 install_requires=install_requires, 86 scripts=['src/health/azure/run_tensorboard.py'] 87 ) 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ # See also: # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ # https://github.com/pypa/gh-action-pypi-publish -GITHUB_REF_TAG_COMMIT = 'refs/tags/' +GITHUB_REF_TAG_COMMIT = 'refs/tags/v' github_ref = os.getenv('GITHUB_REF') if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n # See also:\n # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n # https://github.com/pypa/gh-action-pypi-publish\n-GITHUB_REF_TAG_COMMIT = 'refs/tags/'\n+GITHUB_REF_TAG_COMMIT = 'refs/tags/v'\n \n github_ref = os.getenv('GITHUB_REF')\n if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n", "issue": "Handle the \"v\" in version numbering \nOur code in `setup.py` will trigger with new tags. `setuptools.setup` will reject tags that are not release versions but we could do more to make that explicit by checking for the leading \"v\".\r\n\r\nAlso when we tag releases as, say, \"v0.1.1\" the leading \"v\" is carried through `setuptools.setup` so it becomes part of the pip test download\r\n\r\n> Successfully installed pip-21.2.4\r\n> Collecting hi-ml==v0.1.0\r\n> Downloading hi_ml-0.1.0-py3-none-any.whl (25 kB)\r\n\r\n(from [here](https://github.com/microsoft/hi-ml/runs/3362573497?check_suite_focus=true#step:6:29))\r\n\r\nThis works, but it would be cleaner to submit the version number using the public version identifier format mandated in [PEP 440](https://www.python.org/dev/peps/pep-0440/#public-version-identifiers), i.e. without the leading \"v\"\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\ndescription = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n\nsetup(\n name='hi-ml',\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"innereyedev@microsoft.com\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/v'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\ndescription = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n\nsetup(\n name='hi-ml',\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"innereyedev@microsoft.com\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]}
1,509
125
gh_patches_debug_9036
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-9110
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aws.secrets-manager:tag action tries to retag secret with existing tags as well ### Describe the bug I have a policy that checks all AWS Secret Manager secrets for the existence of two particular tags. If the secret is missing either of these tags, it tries to makr (i.e. tag) the secret for later action. However, Custodian fails to apply its marking tag with the following error: 2023-10-18 14:25:20,524 - custodian.policy - INFO - policy:secrets-manager-secret-mark-untagged resource:secrets-manager region:us-west-2 count:2 time:0.03 2023-10-18 14:25:20,525 - custodian.actions - INFO - Tagging 2 resources for notify on 2023/10/19 2023-10-18 14:25:21,519 - custodian.actions - ERROR - Exception with tags: [{'Key': 'c7n-secrets-manager-secret-untagged', 'Value': 'Resource does not meet policy: notify@2023/10/19'}] An error occurred (InvalidParameterException) when calling the TagResource operation: You can't modify system tags. Tag key: 'aws:cloudformation:stack-name'. It appears Custodian is trying to retag with all existing tags in addition to the new marking tag. AWS doesn't like you messing with the system tags? ### What did you expect to happen? I expected Custodian to only add the marking tag rather than try to apply all the existing tags as well. Is this possible? Is it a bug in Custodian or in the AWS Secrets Manager API? ### Cloud Provider Amazon Web Services (AWS) ### Cloud Custodian version and dependency information ```shell Custodian: 0.9.32 Python: 3.8.16 (default, Mar 10 2023, 03:07:48) [GCC 7.3.1 20180712 (Red Hat 7.3.1-15)] Platform: posix.uname_result(sysname='Linux', nodename='custodian', release='4.14.320-243.544.amzn2.x86_64', version='#1 SMP Tue Aug 1 21:03:08 UTC 2023', machine='x86_64') Using venv: False Docker: False Installed: argcomplete==3.1.2 attrs==23.1.0 boto3==1.28.58 botocore==1.31.58 docutils==0.18.1 importlib-metadata==5.2.0 importlib-resources==6.1.0 jmespath==1.0.1 jsonschema==4.19.1 jsonschema-specifications==2023.7.1 pkgutil-resolve-name==1.3.10 python-dateutil==2.8.2 pyyaml==6.0.1 referencing==0.30.2 rpds-py==0.10.3 s3transfer==0.7.0 six==1.16.0 tabulate==0.9.0 urllib3==1.26.17 zipp==3.17.0 ``` ### Policy ```shell Here is the action bit: actions: - type: mark-for-op tag: c7n-secrets-manager-secret-untagged op: notify days: 1 ``` ### Relevant log/traceback output ```shell 2023-10-18 14:25:20,524 - custodian.policy - INFO - policy:secrets-manager-secret-mark-untagged resource:secrets-manager region:us-west-2 count:2 time:0.03 2023-10-18 14:25:20,525 - custodian.actions - INFO - Tagging 2 resources for notify on 2023/10/19 2023-10-18 14:25:21,519 - custodian.actions - ERROR - Exception with tags: [{'Key': 'c7n-secrets-manager-secret-untagged', 'Value': 'Resource does not meet policy: notify@2023/10/19'}] An error occurred (InvalidParameterException) when calling the TagResource operation: You can't modify system tags. Tag key: 'aws:cloudformation:stack-name'. ``` ### Extra information or context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `c7n/resources/secretsmanager.py` Content: ``` 1 # Copyright The Cloud Custodian Authors. 2 # SPDX-License-Identifier: Apache-2.0 3 import json 4 from c7n.manager import resources 5 from c7n.actions import BaseAction, RemovePolicyBase 6 from c7n.exceptions import PolicyValidationError 7 from c7n.filters import iamaccess 8 from c7n.query import QueryResourceManager, TypeInfo 9 from c7n.filters.kms import KmsRelatedFilter 10 from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction, Action 11 from c7n.utils import local_session, type_schema, jmespath_search 12 from c7n.filters.policystatement import HasStatementFilter 13 14 15 @resources.register('secrets-manager') 16 class SecretsManager(QueryResourceManager): 17 18 permissions = ('secretsmanager:ListSecretVersionIds',) 19 20 class resource_type(TypeInfo): 21 service = 'secretsmanager' 22 enum_spec = ('list_secrets', 'SecretList', None) 23 detail_spec = ('describe_secret', 'SecretId', 'Name', None) 24 config_type = cfn_type = 'AWS::SecretsManager::Secret' 25 name = id = 'Name' 26 arn = 'ARN' 27 28 29 SecretsManager.filter_registry.register('marked-for-op', TagActionFilter) 30 31 32 @SecretsManager.filter_registry.register('cross-account') 33 class CrossAccountAccessFilter(iamaccess.CrossAccountAccessFilter): 34 35 policy_annotation = "c7n:AccessPolicy" 36 permissions = ("secretsmanager:GetResourcePolicy",) 37 38 def process(self, resources, event=None): 39 self.client = local_session(self.manager.session_factory).client('secretsmanager') 40 return super(CrossAccountAccessFilter, self).process(resources) 41 42 def get_resource_policy(self, r): 43 if self.policy_annotation in r: 44 return r[self.policy_annotation] 45 r[self.policy_annotation] = p = self.client.get_resource_policy( 46 SecretId=r['Name']).get('ResourcePolicy', None) 47 return p 48 49 50 @SecretsManager.filter_registry.register('kms-key') 51 class KmsFilter(KmsRelatedFilter): 52 RelatedIdsExpression = 'KmsKeyId' 53 54 55 @SecretsManager.filter_registry.register('has-statement') 56 class HasStatementFilter(HasStatementFilter): 57 58 def get_std_format_args(self, secret): 59 return { 60 'secret_arn': secret['ARN'], 61 'account_id': self.manager.config.account_id, 62 'region': self.manager.config.region 63 } 64 65 def process(self, resources, event=None): 66 self.client = local_session(self.manager.session_factory).client('secretsmanager') 67 for r in resources: 68 try: 69 policy = self.client.get_resource_policy(SecretId=r['Name']) 70 if policy.get('ResourcePolicy'): 71 r['Policy'] = policy['ResourcePolicy'] 72 except self.client.exceptions.ResourceNotFoundException: 73 continue 74 75 return list(filter(None, map(self.process_resource, resources))) 76 77 78 @SecretsManager.action_registry.register('tag') 79 class TagSecretsManagerResource(Tag): 80 """Action to create tag(s) on a Secret resource 81 82 :example: 83 84 .. code-block:: yaml 85 86 policies: 87 - name: tag-secret 88 resource: secrets-manager 89 actions: 90 - type: tag 91 key: tag-key 92 value: tag-value 93 """ 94 95 permissions = ('secretsmanager:TagResource',) 96 97 def process_resource_set(self, client, resources, new_tags): 98 for r in resources: 99 tags = {t['Key']: t['Value'] for t in r.get('Tags', ())} 100 for t in new_tags: 101 tags[t['Key']] = t['Value'] 102 formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()] 103 client.tag_resource(SecretId=r['ARN'], Tags=formatted_tags) 104 105 106 @SecretsManager.action_registry.register('remove-tag') 107 class RemoveTagSecretsManagerResource(RemoveTag): 108 """Action to remove tag(s) on a Secret resource 109 110 :example: 111 112 .. code-block:: yaml 113 114 policies: 115 - name: untag-secret 116 resource: secrets-manager 117 actions: 118 - type: remove-tag 119 tags: ['tag-to-be-removed'] 120 """ 121 122 permissions = ('secretsmanager:UntagResource',) 123 124 def process_resource_set(self, client, resources, keys): 125 for r in resources: 126 client.untag_resource(SecretId=r['ARN'], TagKeys=keys) 127 128 129 @SecretsManager.action_registry.register('mark-for-op') 130 class MarkSecretForOp(TagDelayedAction): 131 """Action to mark a Secret resource for deferred action :example: 132 133 .. code-block:: yaml 134 135 policies: 136 - name: mark-secret-for-delete 137 resource: secrets-manager 138 actions: 139 - type: mark-for-op 140 op: tag 141 days: 1 142 """ 143 144 145 @SecretsManager.action_registry.register('delete') 146 class DeleteSecretsManager(BaseAction): 147 """Delete a secret and all of its versions. 148 The recovery window is the number of days from 7 to 30 that 149 Secrets Manager waits before permanently deleting the secret 150 with default as 30 151 152 :example: 153 154 .. code-block:: yaml 155 156 policies: 157 - name: delete-cross-account-secrets 158 resource: aws.secrets-manager 159 filters: 160 - type: cross-account 161 actions: 162 - type: delete 163 recovery_window: 10 164 """ 165 166 schema = type_schema('delete', recovery_window={'type': 'integer'}) 167 permissions = ('secretsmanager:DeleteSecret',) 168 169 def process(self, resources): 170 client = local_session( 171 self.manager.session_factory).client('secretsmanager') 172 173 for r in resources: 174 if 'ReplicationStatus' in r: 175 rep_regions = jmespath_search('ReplicationStatus[*].Region', r) 176 self.manager.retry(client.remove_regions_from_replication, 177 SecretId=r['ARN'], RemoveReplicaRegions=rep_regions) 178 self.manager.retry(client.delete_secret, 179 SecretId=r['ARN'], RecoveryWindowInDays=self.data.get('recovery_window', 30)) 180 181 182 @SecretsManager.action_registry.register('remove-statements') 183 class SecretsManagerRemovePolicyStatement(RemovePolicyBase): 184 """ 185 Action to remove resource based policy statements from secrets manager 186 187 :example: 188 189 .. code-block:: yaml 190 191 policies: 192 - name: secrets-manager-cross-account 193 resource: aws.secrets-manager 194 filters: 195 - type: cross-account 196 actions: 197 - type: remove-statements 198 statement_ids: matched 199 """ 200 201 permissions = ("secretsmanager:DeleteResourcePolicy", "secretsmanager:PutResourcePolicy",) 202 203 def validate(self): 204 for f in self.manager.iter_filters(): 205 if isinstance(f, CrossAccountAccessFilter): 206 return self 207 raise PolicyValidationError( 208 '`remove-statements` may only be used in ' 209 'conjunction with `cross-account` filter on %s' % (self.manager.data,)) 210 211 def process(self, resources): 212 client = local_session(self.manager.session_factory).client('secretsmanager') 213 for r in resources: 214 try: 215 self.process_resource(client, r) 216 except Exception: 217 self.log.exception("Error processing secretsmanager:%s", r['ARN']) 218 219 def process_resource(self, client, resource): 220 p = json.loads(resource.get('c7n:AccessPolicy')) 221 if p is None: 222 return 223 224 statements, found = self.process_policy( 225 p, resource, CrossAccountAccessFilter.annotation_key) 226 227 if not found: 228 return 229 if statements: 230 client.put_resource_policy( 231 SecretId=resource['ARN'], 232 ResourcePolicy=json.dumps(p) 233 ) 234 else: 235 client.delete_resource_policy(SecretId=resource['ARN']) 236 237 238 @SecretsManager.action_registry.register('set-encryption') 239 class SetEncryptionAction(Action): 240 """ 241 Set kms encryption key for secrets, key supports ARN, ID, or alias 242 243 :example: 244 245 .. code-block:: yaml 246 247 policies: 248 - name: set-secret-encryption 249 resource: aws.secrets-manager 250 actions: 251 - type: set-encryption 252 key: alias/foo/bar 253 """ 254 255 schema = type_schema('set-encryption', key={'type': 'string'}, required=['key']) 256 permissions = ('secretsmanager:UpdateSecret', ) 257 258 def process(self, resources): 259 key = self.data['key'] 260 client = local_session(self.manager.session_factory).client('secretsmanager') 261 for r in resources: 262 client.update_secret( 263 SecretId=r['Name'], 264 KmsKeyId=key 265 ) 266 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/c7n/resources/secretsmanager.py b/c7n/resources/secretsmanager.py --- a/c7n/resources/secretsmanager.py +++ b/c7n/resources/secretsmanager.py @@ -96,7 +96,8 @@ def process_resource_set(self, client, resources, new_tags): for r in resources: - tags = {t['Key']: t['Value'] for t in r.get('Tags', ())} + tags = {t['Key']: t['Value'] for t in r.get('Tags', ()) + if not t['Key'].startswith('aws:')} for t in new_tags: tags[t['Key']] = t['Value'] formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
{"golden_diff": "diff --git a/c7n/resources/secretsmanager.py b/c7n/resources/secretsmanager.py\n--- a/c7n/resources/secretsmanager.py\n+++ b/c7n/resources/secretsmanager.py\n@@ -96,7 +96,8 @@\n \n def process_resource_set(self, client, resources, new_tags):\n for r in resources:\n- tags = {t['Key']: t['Value'] for t in r.get('Tags', ())}\n+ tags = {t['Key']: t['Value'] for t in r.get('Tags', ())\n+ if not t['Key'].startswith('aws:')}\n for t in new_tags:\n tags[t['Key']] = t['Value']\n formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()]\n", "issue": "aws.secrets-manager:tag action tries to retag secret with existing tags as well\n### Describe the bug\n\nI have a policy that checks all AWS Secret Manager secrets for the existence of two particular tags. If the secret is missing either of these tags, it tries to makr (i.e. tag) the secret for later action. However, Custodian fails to apply its marking tag with the following error:\r\n\r\n2023-10-18 14:25:20,524 - custodian.policy - INFO - policy:secrets-manager-secret-mark-untagged resource:secrets-manager region:us-west-2 count:2 time:0.03\r\n2023-10-18 14:25:20,525 - custodian.actions - INFO - Tagging 2 resources for notify on 2023/10/19\r\n2023-10-18 14:25:21,519 - custodian.actions - ERROR - Exception with tags: [{'Key': 'c7n-secrets-manager-secret-untagged', 'Value': 'Resource does not meet policy: notify@2023/10/19'}] An error occurred (InvalidParameterException) when calling the TagResource operation: You can't modify system tags. Tag key: 'aws:cloudformation:stack-name'.\r\n\r\nIt appears Custodian is trying to retag with all existing tags in addition to the new marking tag. AWS doesn't like you messing with the system tags?\n\n### What did you expect to happen?\n\nI expected Custodian to only add the marking tag rather than try to apply all the existing tags as well.\r\n\r\nIs this possible? Is it a bug in Custodian or in the AWS Secrets Manager API?\n\n### Cloud Provider\n\nAmazon Web Services (AWS)\n\n### Cloud Custodian version and dependency information\n\n```shell\nCustodian: 0.9.32\r\nPython: 3.8.16 (default, Mar 10 2023, 03:07:48) \r\n [GCC 7.3.1 20180712 (Red Hat 7.3.1-15)]\r\nPlatform: posix.uname_result(sysname='Linux', nodename='custodian', release='4.14.320-243.544.amzn2.x86_64', version='#1 SMP Tue Aug 1 21:03:08 UTC 2023', machine='x86_64')\r\nUsing venv: False\r\nDocker: False\r\nInstalled: \r\n\r\nargcomplete==3.1.2\r\nattrs==23.1.0\r\nboto3==1.28.58\r\nbotocore==1.31.58\r\ndocutils==0.18.1\r\nimportlib-metadata==5.2.0\r\nimportlib-resources==6.1.0\r\njmespath==1.0.1\r\njsonschema==4.19.1\r\njsonschema-specifications==2023.7.1\r\npkgutil-resolve-name==1.3.10\r\npython-dateutil==2.8.2\r\npyyaml==6.0.1\r\nreferencing==0.30.2\r\nrpds-py==0.10.3\r\ns3transfer==0.7.0\r\nsix==1.16.0\r\ntabulate==0.9.0\r\nurllib3==1.26.17\r\nzipp==3.17.0\n```\n\n\n### Policy\n\n```shell\nHere is the action bit:\r\n\r\nactions:\r\n - type: mark-for-op\r\n tag: c7n-secrets-manager-secret-untagged\r\n op: notify\r\n days: 1\n```\n\n\n### Relevant log/traceback output\n\n```shell\n2023-10-18 14:25:20,524 - custodian.policy - INFO - policy:secrets-manager-secret-mark-untagged resource:secrets-manager region:us-west-2 count:2 time:0.03\r\n2023-10-18 14:25:20,525 - custodian.actions - INFO - Tagging 2 resources for notify on 2023/10/19\r\n2023-10-18 14:25:21,519 - custodian.actions - ERROR - Exception with tags: [{'Key': 'c7n-secrets-manager-secret-untagged', 'Value': 'Resource does not meet policy: notify@2023/10/19'}] An error occurred (InvalidParameterException) when calling the TagResource operation: You can't modify system tags. Tag key: 'aws:cloudformation:stack-name'.\n```\n\n\n### Extra information or context\n\n_No response_\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport json\nfrom c7n.manager import resources\nfrom c7n.actions import BaseAction, RemovePolicyBase\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import iamaccess\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction, Action\nfrom c7n.utils import local_session, type_schema, jmespath_search\nfrom c7n.filters.policystatement import HasStatementFilter\n\n\n@resources.register('secrets-manager')\nclass SecretsManager(QueryResourceManager):\n\n permissions = ('secretsmanager:ListSecretVersionIds',)\n\n class resource_type(TypeInfo):\n service = 'secretsmanager'\n enum_spec = ('list_secrets', 'SecretList', None)\n detail_spec = ('describe_secret', 'SecretId', 'Name', None)\n config_type = cfn_type = 'AWS::SecretsManager::Secret'\n name = id = 'Name'\n arn = 'ARN'\n\n\nSecretsManager.filter_registry.register('marked-for-op', TagActionFilter)\n\n\n@SecretsManager.filter_registry.register('cross-account')\nclass CrossAccountAccessFilter(iamaccess.CrossAccountAccessFilter):\n\n policy_annotation = \"c7n:AccessPolicy\"\n permissions = (\"secretsmanager:GetResourcePolicy\",)\n\n def process(self, resources, event=None):\n self.client = local_session(self.manager.session_factory).client('secretsmanager')\n return super(CrossAccountAccessFilter, self).process(resources)\n\n def get_resource_policy(self, r):\n if self.policy_annotation in r:\n return r[self.policy_annotation]\n r[self.policy_annotation] = p = self.client.get_resource_policy(\n SecretId=r['Name']).get('ResourcePolicy', None)\n return p\n\n\n@SecretsManager.filter_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n\n\n@SecretsManager.filter_registry.register('has-statement')\nclass HasStatementFilter(HasStatementFilter):\n\n def get_std_format_args(self, secret):\n return {\n 'secret_arn': secret['ARN'],\n 'account_id': self.manager.config.account_id,\n 'region': self.manager.config.region\n }\n\n def process(self, resources, event=None):\n self.client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n try:\n policy = self.client.get_resource_policy(SecretId=r['Name'])\n if policy.get('ResourcePolicy'):\n r['Policy'] = policy['ResourcePolicy']\n except self.client.exceptions.ResourceNotFoundException:\n continue\n\n return list(filter(None, map(self.process_resource, resources)))\n\n\n@SecretsManager.action_registry.register('tag')\nclass TagSecretsManagerResource(Tag):\n \"\"\"Action to create tag(s) on a Secret resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: tag-secret\n resource: secrets-manager\n actions:\n - type: tag\n key: tag-key\n value: tag-value\n \"\"\"\n\n permissions = ('secretsmanager:TagResource',)\n\n def process_resource_set(self, client, resources, new_tags):\n for r in resources:\n tags = {t['Key']: t['Value'] for t in r.get('Tags', ())}\n for t in new_tags:\n tags[t['Key']] = t['Value']\n formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()]\n client.tag_resource(SecretId=r['ARN'], Tags=formatted_tags)\n\n\n@SecretsManager.action_registry.register('remove-tag')\nclass RemoveTagSecretsManagerResource(RemoveTag):\n \"\"\"Action to remove tag(s) on a Secret resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: untag-secret\n resource: secrets-manager\n actions:\n - type: remove-tag\n tags: ['tag-to-be-removed']\n \"\"\"\n\n permissions = ('secretsmanager:UntagResource',)\n\n def process_resource_set(self, client, resources, keys):\n for r in resources:\n client.untag_resource(SecretId=r['ARN'], TagKeys=keys)\n\n\n@SecretsManager.action_registry.register('mark-for-op')\nclass MarkSecretForOp(TagDelayedAction):\n \"\"\"Action to mark a Secret resource for deferred action :example:\n\n .. code-block:: yaml\n\n policies:\n - name: mark-secret-for-delete\n resource: secrets-manager\n actions:\n - type: mark-for-op\n op: tag\n days: 1\n \"\"\"\n\n\n@SecretsManager.action_registry.register('delete')\nclass DeleteSecretsManager(BaseAction):\n \"\"\"Delete a secret and all of its versions.\n The recovery window is the number of days from 7 to 30 that\n Secrets Manager waits before permanently deleting the secret\n with default as 30\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cross-account-secrets\n resource: aws.secrets-manager\n filters:\n - type: cross-account\n actions:\n - type: delete\n recovery_window: 10\n \"\"\"\n\n schema = type_schema('delete', recovery_window={'type': 'integer'})\n permissions = ('secretsmanager:DeleteSecret',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('secretsmanager')\n\n for r in resources:\n if 'ReplicationStatus' in r:\n rep_regions = jmespath_search('ReplicationStatus[*].Region', r)\n self.manager.retry(client.remove_regions_from_replication,\n SecretId=r['ARN'], RemoveReplicaRegions=rep_regions)\n self.manager.retry(client.delete_secret,\n SecretId=r['ARN'], RecoveryWindowInDays=self.data.get('recovery_window', 30))\n\n\n@SecretsManager.action_registry.register('remove-statements')\nclass SecretsManagerRemovePolicyStatement(RemovePolicyBase):\n \"\"\"\n Action to remove resource based policy statements from secrets manager\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: secrets-manager-cross-account\n resource: aws.secrets-manager\n filters:\n - type: cross-account\n actions:\n - type: remove-statements\n statement_ids: matched\n \"\"\"\n\n permissions = (\"secretsmanager:DeleteResourcePolicy\", \"secretsmanager:PutResourcePolicy\",)\n\n def validate(self):\n for f in self.manager.iter_filters():\n if isinstance(f, CrossAccountAccessFilter):\n return self\n raise PolicyValidationError(\n '`remove-statements` may only be used in '\n 'conjunction with `cross-account` filter on %s' % (self.manager.data,))\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n try:\n self.process_resource(client, r)\n except Exception:\n self.log.exception(\"Error processing secretsmanager:%s\", r['ARN'])\n\n def process_resource(self, client, resource):\n p = json.loads(resource.get('c7n:AccessPolicy'))\n if p is None:\n return\n\n statements, found = self.process_policy(\n p, resource, CrossAccountAccessFilter.annotation_key)\n\n if not found:\n return\n if statements:\n client.put_resource_policy(\n SecretId=resource['ARN'],\n ResourcePolicy=json.dumps(p)\n )\n else:\n client.delete_resource_policy(SecretId=resource['ARN'])\n\n\n@SecretsManager.action_registry.register('set-encryption')\nclass SetEncryptionAction(Action):\n \"\"\"\n Set kms encryption key for secrets, key supports ARN, ID, or alias\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: set-secret-encryption\n resource: aws.secrets-manager\n actions:\n - type: set-encryption\n key: alias/foo/bar\n \"\"\"\n\n schema = type_schema('set-encryption', key={'type': 'string'}, required=['key'])\n permissions = ('secretsmanager:UpdateSecret', )\n\n def process(self, resources):\n key = self.data['key']\n client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n client.update_secret(\n SecretId=r['Name'],\n KmsKeyId=key\n )\n", "path": "c7n/resources/secretsmanager.py"}], "after_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport json\nfrom c7n.manager import resources\nfrom c7n.actions import BaseAction, RemovePolicyBase\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import iamaccess\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction, Action\nfrom c7n.utils import local_session, type_schema, jmespath_search\nfrom c7n.filters.policystatement import HasStatementFilter\n\n\n@resources.register('secrets-manager')\nclass SecretsManager(QueryResourceManager):\n\n permissions = ('secretsmanager:ListSecretVersionIds',)\n\n class resource_type(TypeInfo):\n service = 'secretsmanager'\n enum_spec = ('list_secrets', 'SecretList', None)\n detail_spec = ('describe_secret', 'SecretId', 'Name', None)\n config_type = cfn_type = 'AWS::SecretsManager::Secret'\n name = id = 'Name'\n arn = 'ARN'\n\n\nSecretsManager.filter_registry.register('marked-for-op', TagActionFilter)\n\n\n@SecretsManager.filter_registry.register('cross-account')\nclass CrossAccountAccessFilter(iamaccess.CrossAccountAccessFilter):\n\n policy_annotation = \"c7n:AccessPolicy\"\n permissions = (\"secretsmanager:GetResourcePolicy\",)\n\n def process(self, resources, event=None):\n self.client = local_session(self.manager.session_factory).client('secretsmanager')\n return super(CrossAccountAccessFilter, self).process(resources)\n\n def get_resource_policy(self, r):\n if self.policy_annotation in r:\n return r[self.policy_annotation]\n r[self.policy_annotation] = p = self.client.get_resource_policy(\n SecretId=r['Name']).get('ResourcePolicy', None)\n return p\n\n\n@SecretsManager.filter_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n\n\n@SecretsManager.filter_registry.register('has-statement')\nclass HasStatementFilter(HasStatementFilter):\n\n def get_std_format_args(self, secret):\n return {\n 'secret_arn': secret['ARN'],\n 'account_id': self.manager.config.account_id,\n 'region': self.manager.config.region\n }\n\n def process(self, resources, event=None):\n self.client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n try:\n policy = self.client.get_resource_policy(SecretId=r['Name'])\n if policy.get('ResourcePolicy'):\n r['Policy'] = policy['ResourcePolicy']\n except self.client.exceptions.ResourceNotFoundException:\n continue\n\n return list(filter(None, map(self.process_resource, resources)))\n\n\n@SecretsManager.action_registry.register('tag')\nclass TagSecretsManagerResource(Tag):\n \"\"\"Action to create tag(s) on a Secret resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: tag-secret\n resource: secrets-manager\n actions:\n - type: tag\n key: tag-key\n value: tag-value\n \"\"\"\n\n permissions = ('secretsmanager:TagResource',)\n\n def process_resource_set(self, client, resources, new_tags):\n for r in resources:\n tags = {t['Key']: t['Value'] for t in r.get('Tags', ())\n if not t['Key'].startswith('aws:')}\n for t in new_tags:\n tags[t['Key']] = t['Value']\n formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()]\n client.tag_resource(SecretId=r['ARN'], Tags=formatted_tags)\n\n\n@SecretsManager.action_registry.register('remove-tag')\nclass RemoveTagSecretsManagerResource(RemoveTag):\n \"\"\"Action to remove tag(s) on a Secret resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: untag-secret\n resource: secrets-manager\n actions:\n - type: remove-tag\n tags: ['tag-to-be-removed']\n \"\"\"\n\n permissions = ('secretsmanager:UntagResource',)\n\n def process_resource_set(self, client, resources, keys):\n for r in resources:\n client.untag_resource(SecretId=r['ARN'], TagKeys=keys)\n\n\n@SecretsManager.action_registry.register('mark-for-op')\nclass MarkSecretForOp(TagDelayedAction):\n \"\"\"Action to mark a Secret resource for deferred action :example:\n\n .. code-block:: yaml\n\n policies:\n - name: mark-secret-for-delete\n resource: secrets-manager\n actions:\n - type: mark-for-op\n op: tag\n days: 1\n \"\"\"\n\n\n@SecretsManager.action_registry.register('delete')\nclass DeleteSecretsManager(BaseAction):\n \"\"\"Delete a secret and all of its versions.\n The recovery window is the number of days from 7 to 30 that\n Secrets Manager waits before permanently deleting the secret\n with default as 30\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cross-account-secrets\n resource: aws.secrets-manager\n filters:\n - type: cross-account\n actions:\n - type: delete\n recovery_window: 10\n \"\"\"\n\n schema = type_schema('delete', recovery_window={'type': 'integer'})\n permissions = ('secretsmanager:DeleteSecret',)\n\n def process(self, resources):\n client = local_session(\n self.manager.session_factory).client('secretsmanager')\n\n for r in resources:\n if 'ReplicationStatus' in r:\n rep_regions = jmespath_search('ReplicationStatus[*].Region', r)\n self.manager.retry(client.remove_regions_from_replication,\n SecretId=r['ARN'], RemoveReplicaRegions=rep_regions)\n self.manager.retry(client.delete_secret,\n SecretId=r['ARN'], RecoveryWindowInDays=self.data.get('recovery_window', 30))\n\n\n@SecretsManager.action_registry.register('remove-statements')\nclass SecretsManagerRemovePolicyStatement(RemovePolicyBase):\n \"\"\"\n Action to remove resource based policy statements from secrets manager\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: secrets-manager-cross-account\n resource: aws.secrets-manager\n filters:\n - type: cross-account\n actions:\n - type: remove-statements\n statement_ids: matched\n \"\"\"\n\n permissions = (\"secretsmanager:DeleteResourcePolicy\", \"secretsmanager:PutResourcePolicy\",)\n\n def validate(self):\n for f in self.manager.iter_filters():\n if isinstance(f, CrossAccountAccessFilter):\n return self\n raise PolicyValidationError(\n '`remove-statements` may only be used in '\n 'conjunction with `cross-account` filter on %s' % (self.manager.data,))\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n try:\n self.process_resource(client, r)\n except Exception:\n self.log.exception(\"Error processing secretsmanager:%s\", r['ARN'])\n\n def process_resource(self, client, resource):\n p = json.loads(resource.get('c7n:AccessPolicy'))\n if p is None:\n return\n\n statements, found = self.process_policy(\n p, resource, CrossAccountAccessFilter.annotation_key)\n\n if not found:\n return\n if statements:\n client.put_resource_policy(\n SecretId=resource['ARN'],\n ResourcePolicy=json.dumps(p)\n )\n else:\n client.delete_resource_policy(SecretId=resource['ARN'])\n\n\n@SecretsManager.action_registry.register('set-encryption')\nclass SetEncryptionAction(Action):\n \"\"\"\n Set kms encryption key for secrets, key supports ARN, ID, or alias\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: set-secret-encryption\n resource: aws.secrets-manager\n actions:\n - type: set-encryption\n key: alias/foo/bar\n \"\"\"\n\n schema = type_schema('set-encryption', key={'type': 'string'}, required=['key'])\n permissions = ('secretsmanager:UpdateSecret', )\n\n def process(self, resources):\n key = self.data['key']\n client = local_session(self.manager.session_factory).client('secretsmanager')\n for r in resources:\n client.update_secret(\n SecretId=r['Name'],\n KmsKeyId=key\n )\n", "path": "c7n/resources/secretsmanager.py"}]}
3,897
175
gh_patches_debug_42542
rasdani/github-patches
git_diff
networkx__networkx-2532
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- maximal_independent_set does not work for DiGraph Currently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `networkx/algorithms/mis.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $ 3 """ 4 Algorithm to find a maximal (not maximum) independent set. 5 6 """ 7 # Leo Lopes <leo.lopes@monash.edu> 8 # Aric Hagberg <hagberg@lanl.gov> 9 # Dan Schult <dschult@colgate.edu> 10 # Pieter Swart <swart@lanl.gov> 11 # All rights reserved. 12 # BSD license. 13 14 __author__ = "\n".join(["Leo Lopes <leo.lopes@monash.edu>", 15 "Loïc Séguin-C. <loicseguin@gmail.com>"]) 16 17 __all__ = ['maximal_independent_set'] 18 19 import random 20 import networkx as nx 21 22 def maximal_independent_set(G, nodes=None): 23 """Return a random maximal independent set guaranteed to contain 24 a given set of nodes. 25 26 An independent set is a set of nodes such that the subgraph 27 of G induced by these nodes contains no edges. A maximal 28 independent set is an independent set such that it is not possible 29 to add a new node and still get an independent set. 30 31 Parameters 32 ---------- 33 G : NetworkX graph 34 35 nodes : list or iterable 36 Nodes that must be part of the independent set. This set of nodes 37 must be independent. 38 39 Returns 40 ------- 41 indep_nodes : list 42 List of nodes that are part of a maximal independent set. 43 44 Raises 45 ------ 46 NetworkXUnfeasible 47 If the nodes in the provided list are not part of the graph or 48 do not form an independent set, an exception is raised. 49 50 Examples 51 -------- 52 >>> G = nx.path_graph(5) 53 >>> nx.maximal_independent_set(G) # doctest: +SKIP 54 [4, 0, 2] 55 >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP 56 [1, 3] 57 58 Notes 59 ----- 60 This algorithm does not solve the maximum independent set problem. 61 62 """ 63 if not nodes: 64 nodes = set([random.choice(list(G))]) 65 else: 66 nodes = set(nodes) 67 if not nodes.issubset(G): 68 raise nx.NetworkXUnfeasible( 69 "%s is not a subset of the nodes of G" % nodes) 70 neighbors = set.union(*[set(G.neighbors(v)) for v in nodes]) 71 if set.intersection(neighbors, nodes): 72 raise nx.NetworkXUnfeasible( 73 "%s is not an independent set of G" % nodes) 74 indep_nodes = list(nodes) 75 available_nodes = set(G.nodes()).difference(neighbors.union(nodes)) 76 while available_nodes: 77 node = random.choice(list(available_nodes)) 78 indep_nodes.append(node) 79 available_nodes.difference_update(list(G.neighbors(node)) + [node]) 80 return indep_nodes 81 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py --- a/networkx/algorithms/mis.py +++ b/networkx/algorithms/mis.py @@ -1,24 +1,26 @@ # -*- coding: utf-8 -*- # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $ -""" -Algorithm to find a maximal (not maximum) independent set. - -""" # Leo Lopes <leo.lopes@monash.edu> # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. +# +# Authors: Leo Lopes <leo.lopes@monash.edu> +# Loïc Séguin-C. <loicseguin@gmail.com> +""" +Algorithm to find a maximal (not maximum) independent set. -__author__ = "\n".join(["Leo Lopes <leo.lopes@monash.edu>", - "Loïc Séguin-C. <loicseguin@gmail.com>"]) +""" +import random +import networkx as nx +from networkx.utils import not_implemented_for __all__ = ['maximal_independent_set'] -import random -import networkx as nx +@not_implemented_for('directed') def maximal_independent_set(G, nodes=None): """Return a random maximal independent set guaranteed to contain a given set of nodes. @@ -27,10 +29,10 @@ of G induced by these nodes contains no edges. A maximal independent set is an independent set such that it is not possible to add a new node and still get an independent set. - + Parameters ---------- - G : NetworkX graph + G : NetworkX graph nodes : list or iterable Nodes that must be part of the independent set. This set of nodes @@ -38,7 +40,7 @@ Returns ------- - indep_nodes : list + indep_nodes : list List of nodes that are part of a maximal independent set. Raises @@ -47,6 +49,9 @@ If the nodes in the provided list are not part of the graph or do not form an independent set, an exception is raised. + NetworkXNotImplemented + If `G` is directed. + Examples -------- >>> G = nx.path_graph(5) @@ -54,7 +59,7 @@ [4, 0, 2] >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP [1, 3] - + Notes ----- This algorithm does not solve the maximum independent set problem. @@ -67,7 +72,7 @@ if not nodes.issubset(G): raise nx.NetworkXUnfeasible( "%s is not a subset of the nodes of G" % nodes) - neighbors = set.union(*[set(G.neighbors(v)) for v in nodes]) + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) if set.intersection(neighbors, nodes): raise nx.NetworkXUnfeasible( "%s is not an independent set of G" % nodes) @@ -76,6 +81,5 @@ while available_nodes: node = random.choice(list(available_nodes)) indep_nodes.append(node) - available_nodes.difference_update(list(G.neighbors(node)) + [node]) + available_nodes.difference_update(list(G.adj[node]) + [node]) return indep_nodes -
{"golden_diff": "diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py\n--- a/networkx/algorithms/mis.py\n+++ b/networkx/algorithms/mis.py\n@@ -1,24 +1,26 @@\n # -*- coding: utf-8 -*-\n # $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $\n-\"\"\"\n-Algorithm to find a maximal (not maximum) independent set.\n-\n-\"\"\"\n # Leo Lopes <leo.lopes@monash.edu>\n # Aric Hagberg <hagberg@lanl.gov>\n # Dan Schult <dschult@colgate.edu>\n # Pieter Swart <swart@lanl.gov>\n # All rights reserved.\n # BSD license.\n+#\n+# Authors: Leo Lopes <leo.lopes@monash.edu>\n+# Lo\u00efc S\u00e9guin-C. <loicseguin@gmail.com>\n+\"\"\"\n+Algorithm to find a maximal (not maximum) independent set.\n \n-__author__ = \"\\n\".join([\"Leo Lopes <leo.lopes@monash.edu>\",\n- \"Lo\u00efc S\u00e9guin-C. <loicseguin@gmail.com>\"])\n+\"\"\"\n+import random\n+import networkx as nx\n+from networkx.utils import not_implemented_for\n \n __all__ = ['maximal_independent_set']\n \n-import random\n-import networkx as nx\n \n+@not_implemented_for('directed')\n def maximal_independent_set(G, nodes=None):\n \"\"\"Return a random maximal independent set guaranteed to contain\n a given set of nodes.\n@@ -27,10 +29,10 @@\n of G induced by these nodes contains no edges. A maximal\n independent set is an independent set such that it is not possible\n to add a new node and still get an independent set.\n- \n+\n Parameters\n ----------\n- G : NetworkX graph \n+ G : NetworkX graph\n \n nodes : list or iterable\n Nodes that must be part of the independent set. This set of nodes\n@@ -38,7 +40,7 @@\n \n Returns\n -------\n- indep_nodes : list \n+ indep_nodes : list\n List of nodes that are part of a maximal independent set.\n \n Raises\n@@ -47,6 +49,9 @@\n If the nodes in the provided list are not part of the graph or\n do not form an independent set, an exception is raised.\n \n+ NetworkXNotImplemented\n+ If `G` is directed.\n+\n Examples\n --------\n >>> G = nx.path_graph(5)\n@@ -54,7 +59,7 @@\n [4, 0, 2]\n >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP\n [1, 3]\n- \n+\n Notes\n -----\n This algorithm does not solve the maximum independent set problem.\n@@ -67,7 +72,7 @@\n if not nodes.issubset(G):\n raise nx.NetworkXUnfeasible(\n \"%s is not a subset of the nodes of G\" % nodes)\n- neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])\n+ neighbors = set.union(*[set(G.adj[v]) for v in nodes])\n if set.intersection(neighbors, nodes):\n raise nx.NetworkXUnfeasible(\n \"%s is not an independent set of G\" % nodes)\n@@ -76,6 +81,5 @@\n while available_nodes:\n node = random.choice(list(available_nodes))\n indep_nodes.append(node)\n- available_nodes.difference_update(list(G.neighbors(node)) + [node])\n+ available_nodes.difference_update(list(G.adj[node]) + [node])\n return indep_nodes\n-\n", "issue": "maximal_independent_set does not work for DiGraph\nCurrently [maximal_independent_set](https://github.com/networkx/networkx/blob/d7d906e1d16ef331da0bc1d149953e7532155acc/networkx/algorithms/mis.py#L70) returns the wrong results for a `DiGraph` because it uses the `G.neighbors` method which returns only the successor nodes in a `DiGraph`. I believe the [all_neighbors](https://github.com/networkx/networkx/blob/13b373bf6938c077d1e61adc60a48cb910a75755/networkx/classes/function.py#L540) function should be used instead to make `maximal_independent_set` work correctly for both graph types.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $\n\"\"\"\nAlgorithm to find a maximal (not maximum) independent set.\n\n\"\"\"\n# Leo Lopes <leo.lopes@monash.edu>\n# Aric Hagberg <hagberg@lanl.gov>\n# Dan Schult <dschult@colgate.edu>\n# Pieter Swart <swart@lanl.gov>\n# All rights reserved.\n# BSD license.\n\n__author__ = \"\\n\".join([\"Leo Lopes <leo.lopes@monash.edu>\",\n \"Lo\u00efc S\u00e9guin-C. <loicseguin@gmail.com>\"])\n\n__all__ = ['maximal_independent_set']\n\nimport random\nimport networkx as nx\n\ndef maximal_independent_set(G, nodes=None):\n \"\"\"Return a random maximal independent set guaranteed to contain\n a given set of nodes.\n\n An independent set is a set of nodes such that the subgraph\n of G induced by these nodes contains no edges. A maximal\n independent set is an independent set such that it is not possible\n to add a new node and still get an independent set.\n \n Parameters\n ----------\n G : NetworkX graph \n\n nodes : list or iterable\n Nodes that must be part of the independent set. This set of nodes\n must be independent.\n\n Returns\n -------\n indep_nodes : list \n List of nodes that are part of a maximal independent set.\n\n Raises\n ------\n NetworkXUnfeasible\n If the nodes in the provided list are not part of the graph or\n do not form an independent set, an exception is raised.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.maximal_independent_set(G) # doctest: +SKIP\n [4, 0, 2]\n >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP\n [1, 3]\n \n Notes\n -----\n This algorithm does not solve the maximum independent set problem.\n\n \"\"\"\n if not nodes:\n nodes = set([random.choice(list(G))])\n else:\n nodes = set(nodes)\n if not nodes.issubset(G):\n raise nx.NetworkXUnfeasible(\n \"%s is not a subset of the nodes of G\" % nodes)\n neighbors = set.union(*[set(G.neighbors(v)) for v in nodes])\n if set.intersection(neighbors, nodes):\n raise nx.NetworkXUnfeasible(\n \"%s is not an independent set of G\" % nodes)\n indep_nodes = list(nodes)\n available_nodes = set(G.nodes()).difference(neighbors.union(nodes))\n while available_nodes:\n node = random.choice(list(available_nodes))\n indep_nodes.append(node)\n available_nodes.difference_update(list(G.neighbors(node)) + [node])\n return indep_nodes\n\n", "path": "networkx/algorithms/mis.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# $Id: maximalIndependentSet.py 576 2011-03-01 05:50:34Z lleeoo $\n# Leo Lopes <leo.lopes@monash.edu>\n# Aric Hagberg <hagberg@lanl.gov>\n# Dan Schult <dschult@colgate.edu>\n# Pieter Swart <swart@lanl.gov>\n# All rights reserved.\n# BSD license.\n#\n# Authors: Leo Lopes <leo.lopes@monash.edu>\n# Lo\u00efc S\u00e9guin-C. <loicseguin@gmail.com>\n\"\"\"\nAlgorithm to find a maximal (not maximum) independent set.\n\n\"\"\"\nimport random\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['maximal_independent_set']\n\n\n@not_implemented_for('directed')\ndef maximal_independent_set(G, nodes=None):\n \"\"\"Return a random maximal independent set guaranteed to contain\n a given set of nodes.\n\n An independent set is a set of nodes such that the subgraph\n of G induced by these nodes contains no edges. A maximal\n independent set is an independent set such that it is not possible\n to add a new node and still get an independent set.\n\n Parameters\n ----------\n G : NetworkX graph\n\n nodes : list or iterable\n Nodes that must be part of the independent set. This set of nodes\n must be independent.\n\n Returns\n -------\n indep_nodes : list\n List of nodes that are part of a maximal independent set.\n\n Raises\n ------\n NetworkXUnfeasible\n If the nodes in the provided list are not part of the graph or\n do not form an independent set, an exception is raised.\n\n NetworkXNotImplemented\n If `G` is directed.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.maximal_independent_set(G) # doctest: +SKIP\n [4, 0, 2]\n >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP\n [1, 3]\n\n Notes\n -----\n This algorithm does not solve the maximum independent set problem.\n\n \"\"\"\n if not nodes:\n nodes = set([random.choice(list(G))])\n else:\n nodes = set(nodes)\n if not nodes.issubset(G):\n raise nx.NetworkXUnfeasible(\n \"%s is not a subset of the nodes of G\" % nodes)\n neighbors = set.union(*[set(G.adj[v]) for v in nodes])\n if set.intersection(neighbors, nodes):\n raise nx.NetworkXUnfeasible(\n \"%s is not an independent set of G\" % nodes)\n indep_nodes = list(nodes)\n available_nodes = set(G.nodes()).difference(neighbors.union(nodes))\n while available_nodes:\n node = random.choice(list(available_nodes))\n indep_nodes.append(node)\n available_nodes.difference_update(list(G.adj[node]) + [node])\n return indep_nodes\n", "path": "networkx/algorithms/mis.py"}]}
1,265
846
gh_patches_debug_27752
rasdani/github-patches
git_diff
pyload__pyload-52
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FourChanOrg don't work When i try to download a thread (e.g. http://boards.4chan.org/wg/res/5176429) nothing happens, only BasePlugin will be used, --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `module/plugins/crypter/FourChanOrg.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import re 5 6 from module.plugins.Crypter import Crypter 7 8 class FourChanOrg(Crypter): 9 __name__ = "FourChanOrg" 10 __type__ = "container" 11 __pattern__ = r"http://(www\.)?(img\.)?(zip\.)?4chan.org/\w+/(res/|imgboard\.html)" 12 __version__ = "0.1" 13 __description__ = """4chan.org Thread Download Plugin""" 14 __author_name__ = ("Spoob") 15 __author_mail__ = ("Spoob@pyload.org") 16 17 def __init__(self, parent): 18 Crypter.__init__(self, parent) 19 self.parent = parent 20 self.html = None 21 22 def file_exists(self): 23 """ returns True or False 24 """ 25 return True 26 27 def proceed(self, url, location): 28 url = self.parent.url 29 html = self.req.load(url) 30 link_pattern = "" 31 temp_links = [] 32 if "imagebord.html" in url: 33 link_pattern = '[<a href="(res/\d*\.html)">Reply</a>]' 34 temp_links = re.findall(link_pattern, html) 35 for link in re.findall(link_pattern, html): 36 temp_links.append(link) 37 else: 38 temp_links = re.findall('File : <a href="(http://(?:img\.)?(?:zip\.)?4chan\.org/\w{,3}/src/\d*\..{3})"', html) 39 self.links = temp_links 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/module/plugins/crypter/FourChanOrg.py b/module/plugins/crypter/FourChanOrg.py --- a/module/plugins/crypter/FourChanOrg.py +++ b/module/plugins/crypter/FourChanOrg.py @@ -6,34 +6,20 @@ from module.plugins.Crypter import Crypter class FourChanOrg(Crypter): + # Based on 4chandl by Roland Beermann + # https://gist.github.com/enkore/3492599 __name__ = "FourChanOrg" __type__ = "container" - __pattern__ = r"http://(www\.)?(img\.)?(zip\.)?4chan.org/\w+/(res/|imgboard\.html)" - __version__ = "0.1" - __description__ = """4chan.org Thread Download Plugin""" - __author_name__ = ("Spoob") - __author_mail__ = ("Spoob@pyload.org") + __version__ = "0.3" + __pattern__ = r"http://boards\.4chan.org/\w+/res/(\d+)" + __description__ = "Downloader for entire 4chan threads" - def __init__(self, parent): - Crypter.__init__(self, parent) - self.parent = parent - self.html = None + def decrypt(self, pyfile): + pagehtml = self.load(pyfile.url) - def file_exists(self): - """ returns True or False - """ - return True + images = set(re.findall(r'(images\.4chan\.org/[^/]*/src/[^"<]*)', pagehtml)) + urls = [] + for image in images: + urls.append("http://" + image) - def proceed(self, url, location): - url = self.parent.url - html = self.req.load(url) - link_pattern = "" - temp_links = [] - if "imagebord.html" in url: - link_pattern = '[<a href="(res/\d*\.html)">Reply</a>]' - temp_links = re.findall(link_pattern, html) - for link in re.findall(link_pattern, html): - temp_links.append(link) - else: - temp_links = re.findall('File : <a href="(http://(?:img\.)?(?:zip\.)?4chan\.org/\w{,3}/src/\d*\..{3})"', html) - self.links = temp_links + self.core.files.addLinks(urls, self.pyfile.package().id)
{"golden_diff": "diff --git a/module/plugins/crypter/FourChanOrg.py b/module/plugins/crypter/FourChanOrg.py\n--- a/module/plugins/crypter/FourChanOrg.py\n+++ b/module/plugins/crypter/FourChanOrg.py\n@@ -6,34 +6,20 @@\n from module.plugins.Crypter import Crypter\n \n class FourChanOrg(Crypter):\n+ # Based on 4chandl by Roland Beermann\n+ # https://gist.github.com/enkore/3492599\n __name__ = \"FourChanOrg\"\n __type__ = \"container\"\n- __pattern__ = r\"http://(www\\.)?(img\\.)?(zip\\.)?4chan.org/\\w+/(res/|imgboard\\.html)\"\n- __version__ = \"0.1\"\n- __description__ = \"\"\"4chan.org Thread Download Plugin\"\"\"\n- __author_name__ = (\"Spoob\")\n- __author_mail__ = (\"Spoob@pyload.org\")\n+ __version__ = \"0.3\"\n+ __pattern__ = r\"http://boards\\.4chan.org/\\w+/res/(\\d+)\"\n+ __description__ = \"Downloader for entire 4chan threads\"\n \n- def __init__(self, parent):\n- Crypter.__init__(self, parent)\n- self.parent = parent\n- self.html = None\n+ def decrypt(self, pyfile):\n+ pagehtml = self.load(pyfile.url)\n \n- def file_exists(self):\n- \"\"\" returns True or False\n- \"\"\"\n- return True\n+ images = set(re.findall(r'(images\\.4chan\\.org/[^/]*/src/[^\"<]*)', pagehtml))\n+ urls = []\n+ for image in images:\n+ urls.append(\"http://\" + image)\n \n- def proceed(self, url, location):\n- url = self.parent.url\n- html = self.req.load(url)\n- link_pattern = \"\"\n- temp_links = []\n- if \"imagebord.html\" in url:\n- link_pattern = '[<a href=\"(res/\\d*\\.html)\">Reply</a>]'\n- temp_links = re.findall(link_pattern, html)\n- for link in re.findall(link_pattern, html):\n- temp_links.append(link)\n- else:\n- temp_links = re.findall('File : <a href=\"(http://(?:img\\.)?(?:zip\\.)?4chan\\.org/\\w{,3}/src/\\d*\\..{3})\"', html)\n- self.links = temp_links\n+ self.core.files.addLinks(urls, self.pyfile.package().id)\n", "issue": "FourChanOrg don't work\nWhen i try to download a thread (e.g. http://boards.4chan.org/wg/res/5176429) nothing happens, only BasePlugin will be used,\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.Crypter import Crypter\n\nclass FourChanOrg(Crypter):\n __name__ = \"FourChanOrg\"\n __type__ = \"container\"\n __pattern__ = r\"http://(www\\.)?(img\\.)?(zip\\.)?4chan.org/\\w+/(res/|imgboard\\.html)\"\n __version__ = \"0.1\"\n __description__ = \"\"\"4chan.org Thread Download Plugin\"\"\"\n __author_name__ = (\"Spoob\")\n __author_mail__ = (\"Spoob@pyload.org\")\n\n def __init__(self, parent):\n Crypter.__init__(self, parent)\n self.parent = parent\n self.html = None\n\n def file_exists(self):\n \"\"\" returns True or False\n \"\"\"\n return True\n\n def proceed(self, url, location):\n url = self.parent.url\n html = self.req.load(url)\n link_pattern = \"\"\n temp_links = []\n if \"imagebord.html\" in url:\n link_pattern = '[<a href=\"(res/\\d*\\.html)\">Reply</a>]'\n temp_links = re.findall(link_pattern, html)\n for link in re.findall(link_pattern, html):\n temp_links.append(link)\n else:\n temp_links = re.findall('File : <a href=\"(http://(?:img\\.)?(?:zip\\.)?4chan\\.org/\\w{,3}/src/\\d*\\..{3})\"', html)\n self.links = temp_links\n", "path": "module/plugins/crypter/FourChanOrg.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.Crypter import Crypter\n\nclass FourChanOrg(Crypter):\n # Based on 4chandl by Roland Beermann\n # https://gist.github.com/enkore/3492599\n __name__ = \"FourChanOrg\"\n __type__ = \"container\"\n __version__ = \"0.3\"\n __pattern__ = r\"http://boards\\.4chan.org/\\w+/res/(\\d+)\"\n __description__ = \"Downloader for entire 4chan threads\"\n\n def decrypt(self, pyfile):\n pagehtml = self.load(pyfile.url)\n\n images = set(re.findall(r'(images\\.4chan\\.org/[^/]*/src/[^\"<]*)', pagehtml))\n urls = []\n for image in images:\n urls.append(\"http://\" + image)\n\n self.core.files.addLinks(urls, self.pyfile.package().id)\n", "path": "module/plugins/crypter/FourChanOrg.py"}]}
725
582
gh_patches_debug_30209
rasdani/github-patches
git_diff
pyodide__pyodide-77
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update to Python 3.7 Python 3.7 is out. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/common.py` Content: ``` 1 from pathlib import Path 2 3 4 ROOTDIR = Path(__file__).parent.resolve() 5 HOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.6.4' / 'host' 6 TARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.6.4' 7 DEFAULTCFLAGS = '' 8 DEFAULTLDFLAGS = ' '.join([ 9 '-O3', 10 '-s', "BINARYEN_METHOD='native-wasm'", 11 '-Werror', 12 '-s', 'EMULATED_FUNCTION_POINTERS=1', 13 '-s', 'EMULATE_FUNCTION_POINTER_CASTS=1', 14 '-s', 'SIDE_MODULE=1', 15 '-s', 'WASM=1', 16 '--memory-init-file', '0' 17 ]) 18 19 20 def parse_package(package): 21 # Import yaml here because pywasmcross needs to run in the built native 22 # Python, which won't have PyYAML 23 import yaml 24 # TODO: Validate against a schema 25 with open(package) as fd: 26 return yaml.load(fd) 27 ``` Path: `tools/buildpkg.py` Content: ``` 1 #!/usr/bin/env python3 2 3 """ 4 Builds a Pyodide package. 5 """ 6 7 import argparse 8 import hashlib 9 import os 10 from pathlib import Path 11 import shutil 12 import subprocess 13 14 15 import common 16 17 18 ROOTDIR = Path(__file__).parent.resolve() 19 20 21 def check_checksum(path, pkg): 22 """ 23 Checks that a tarball matches the checksum in the package metadata. 24 """ 25 checksum_keys = {'md5', 'sha256'}.intersection(pkg['source']) 26 if not checksum_keys: 27 return 28 elif len(checksum_keys) != 1: 29 raise ValueError('Only one checksum should be included in a package ' 30 'setup; found {}.'.format(checksum_keys)) 31 checksum_algorithm = checksum_keys.pop() 32 checksum = pkg['source'][checksum_algorithm] 33 CHUNK_SIZE = 1 << 16 34 h = getattr(hashlib, checksum_algorithm)() 35 with open(path, 'rb') as fd: 36 while True: 37 chunk = fd.read(CHUNK_SIZE) 38 h.update(chunk) 39 if len(chunk) < CHUNK_SIZE: 40 break 41 if h.hexdigest() != checksum: 42 raise ValueError("Invalid {} checksum".format(checksum_algorithm)) 43 44 45 def download_and_extract(buildpath, packagedir, pkg, args): 46 tarballpath = buildpath / Path(pkg['source']['url']).name 47 if not tarballpath.is_file(): 48 subprocess.run([ 49 'wget', '-q', '-O', str(tarballpath), pkg['source']['url'] 50 ], check=True) 51 check_checksum(tarballpath, pkg) 52 srcpath = buildpath / packagedir 53 if not srcpath.is_dir(): 54 shutil.unpack_archive(str(tarballpath), str(buildpath)) 55 return srcpath 56 57 58 def patch(path, srcpath, pkg, args): 59 if (srcpath / '.patched').is_file(): 60 return 61 62 # Apply all of the patches 63 orig_dir = Path.cwd() 64 pkgdir = path.parent.resolve() 65 os.chdir(srcpath) 66 try: 67 for patch in pkg['source'].get('patches', []): 68 subprocess.run([ 69 'patch', '-p1', '--binary', '-i', pkgdir / patch 70 ], check=True) 71 finally: 72 os.chdir(orig_dir) 73 74 # Add any extra files 75 for src, dst in pkg['source'].get('extras', []): 76 shutil.copyfile(pkgdir / src, srcpath / dst) 77 78 with open(srcpath / '.patched', 'wb') as fd: 79 fd.write(b'\n') 80 81 82 def get_libdir(srcpath, args): 83 # Get the name of the build/lib.XXX directory that distutils wrote its 84 # output to 85 slug = subprocess.check_output([ 86 str(Path(args.host) / 'bin' / 'python3'), 87 '-c', 88 'import sysconfig, sys; ' 89 'print("{}-{}.{}".format(' 90 'sysconfig.get_platform(), ' 91 'sys.version_info[0], ' 92 'sys.version_info[1]))']).decode('ascii').strip() 93 purelib = srcpath / 'build' / 'lib' 94 if purelib.is_dir(): 95 libdir = purelib 96 else: 97 libdir = srcpath / 'build' / ('lib.' + slug) 98 return libdir 99 100 101 def compile(path, srcpath, pkg, args): 102 if (srcpath / '.built').is_file(): 103 return 104 105 orig_dir = Path.cwd() 106 os.chdir(srcpath) 107 try: 108 subprocess.run([ 109 str(Path(args.host) / 'bin' / 'python3'), 110 str(ROOTDIR / 'pywasmcross'), 111 '--cflags', 112 args.cflags + ' ' + 113 pkg.get('build', {}).get('cflags', ''), 114 '--ldflags', 115 args.ldflags + ' ' + 116 pkg.get('build', {}).get('ldflags', ''), 117 '--host', args.host, 118 '--target', args.target], check=True) 119 finally: 120 os.chdir(orig_dir) 121 122 post = pkg.get('build', {}).get('post') 123 if post is not None: 124 libdir = get_libdir(srcpath, args) 125 pkgdir = path.parent.resolve() 126 env = { 127 'BUILD': libdir, 128 'PKGDIR': pkgdir 129 } 130 subprocess.run([ 131 'bash', '-c', post], env=env, check=True) 132 133 with open(srcpath / '.built', 'wb') as fd: 134 fd.write(b'\n') 135 136 137 def package_files(buildpath, srcpath, pkg, args): 138 if (buildpath / '.packaged').is_file(): 139 return 140 141 name = pkg['package']['name'] 142 install_prefix = (srcpath / 'install').resolve() 143 subprocess.run([ 144 'python', 145 Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py', 146 name + '.data', 147 '--preload', 148 '{}@/'.format(install_prefix), 149 '--js-output={}'.format(name + '.js'), 150 '--export-name=pyodide', 151 '--exclude', '*.wasm.pre', 152 '--exclude', '__pycache__', 153 '--use-preload-plugins'], 154 cwd=buildpath, check=True) 155 subprocess.run([ 156 'uglifyjs', 157 buildpath / (name + '.js'), 158 '-o', 159 buildpath / (name + '.js')], check=True) 160 161 with open(buildpath / '.packaged', 'wb') as fd: 162 fd.write(b'\n') 163 164 165 def build_package(path, args): 166 pkg = common.parse_package(path) 167 packagedir = pkg['package']['name'] + '-' + pkg['package']['version'] 168 dirpath = path.parent 169 orig_path = Path.cwd() 170 os.chdir(dirpath) 171 try: 172 buildpath = dirpath / 'build' 173 if not buildpath.is_dir(): 174 os.makedirs(buildpath) 175 srcpath = download_and_extract(buildpath, packagedir, pkg, args) 176 patch(path, srcpath, pkg, args) 177 compile(path, srcpath, pkg, args) 178 package_files(buildpath, srcpath, pkg, args) 179 finally: 180 os.chdir(orig_path) 181 182 183 def parse_args(): 184 parser = argparse.ArgumentParser('Build a pyodide package.') 185 parser.add_argument( 186 'package', type=str, nargs=1, 187 help="Path to meta.yaml package description") 188 parser.add_argument( 189 '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS, 190 help='Extra compiling flags') 191 parser.add_argument( 192 '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS, 193 help='Extra linking flags') 194 parser.add_argument( 195 '--host', type=str, nargs='?', default=common.HOSTPYTHON, 196 help='The path to the host Python installation') 197 parser.add_argument( 198 '--target', type=str, nargs='?', default=common.TARGETPYTHON, 199 help='The path to the target Python installation') 200 return parser.parse_args() 201 202 203 def main(args): 204 path = Path(args.package[0]).resolve() 205 build_package(path, args) 206 207 208 if __name__ == '__main__': 209 args = parse_args() 210 main(args) 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/buildpkg.py b/tools/buildpkg.py --- a/tools/buildpkg.py +++ b/tools/buildpkg.py @@ -79,25 +79,6 @@ fd.write(b'\n') -def get_libdir(srcpath, args): - # Get the name of the build/lib.XXX directory that distutils wrote its - # output to - slug = subprocess.check_output([ - str(Path(args.host) / 'bin' / 'python3'), - '-c', - 'import sysconfig, sys; ' - 'print("{}-{}.{}".format(' - 'sysconfig.get_platform(), ' - 'sys.version_info[0], ' - 'sys.version_info[1]))']).decode('ascii').strip() - purelib = srcpath / 'build' / 'lib' - if purelib.is_dir(): - libdir = purelib - else: - libdir = srcpath / 'build' / ('lib.' + slug) - return libdir - - def compile(path, srcpath, pkg, args): if (srcpath / '.built').is_file(): return @@ -121,10 +102,11 @@ post = pkg.get('build', {}).get('post') if post is not None: - libdir = get_libdir(srcpath, args) + site_packages_dir = ( + srcpath / 'install' / 'lib' / 'python3.7' / 'site-packages') pkgdir = path.parent.resolve() env = { - 'BUILD': libdir, + 'SITEPACKAGES': site_packages_dir, 'PKGDIR': pkgdir } subprocess.run([ diff --git a/tools/common.py b/tools/common.py --- a/tools/common.py +++ b/tools/common.py @@ -2,8 +2,8 @@ ROOTDIR = Path(__file__).parent.resolve() -HOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.6.4' / 'host' -TARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.6.4' +HOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.7.0' / 'host' +TARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.7.0' DEFAULTCFLAGS = '' DEFAULTLDFLAGS = ' '.join([ '-O3',
{"golden_diff": "diff --git a/tools/buildpkg.py b/tools/buildpkg.py\n--- a/tools/buildpkg.py\n+++ b/tools/buildpkg.py\n@@ -79,25 +79,6 @@\n fd.write(b'\\n')\n \n \n-def get_libdir(srcpath, args):\n- # Get the name of the build/lib.XXX directory that distutils wrote its\n- # output to\n- slug = subprocess.check_output([\n- str(Path(args.host) / 'bin' / 'python3'),\n- '-c',\n- 'import sysconfig, sys; '\n- 'print(\"{}-{}.{}\".format('\n- 'sysconfig.get_platform(), '\n- 'sys.version_info[0], '\n- 'sys.version_info[1]))']).decode('ascii').strip()\n- purelib = srcpath / 'build' / 'lib'\n- if purelib.is_dir():\n- libdir = purelib\n- else:\n- libdir = srcpath / 'build' / ('lib.' + slug)\n- return libdir\n-\n-\n def compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n@@ -121,10 +102,11 @@\n \n post = pkg.get('build', {}).get('post')\n if post is not None:\n- libdir = get_libdir(srcpath, args)\n+ site_packages_dir = (\n+ srcpath / 'install' / 'lib' / 'python3.7' / 'site-packages')\n pkgdir = path.parent.resolve()\n env = {\n- 'BUILD': libdir,\n+ 'SITEPACKAGES': site_packages_dir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\ndiff --git a/tools/common.py b/tools/common.py\n--- a/tools/common.py\n+++ b/tools/common.py\n@@ -2,8 +2,8 @@\n \n \n ROOTDIR = Path(__file__).parent.resolve()\n-HOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.6.4' / 'host'\n-TARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.6.4'\n+HOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.7.0' / 'host'\n+TARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.7.0'\n DEFAULTCFLAGS = ''\n DEFAULTLDFLAGS = ' '.join([\n '-O3',\n", "issue": "Update to Python 3.7\nPython 3.7 is out.\n", "before_files": [{"content": "from pathlib import Path\n\n\nROOTDIR = Path(__file__).parent.resolve()\nHOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.6.4' / 'host'\nTARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.6.4'\nDEFAULTCFLAGS = ''\nDEFAULTLDFLAGS = ' '.join([\n '-O3',\n '-s', \"BINARYEN_METHOD='native-wasm'\",\n '-Werror',\n '-s', 'EMULATED_FUNCTION_POINTERS=1',\n '-s', 'EMULATE_FUNCTION_POINTER_CASTS=1',\n '-s', 'SIDE_MODULE=1',\n '-s', 'WASM=1',\n '--memory-init-file', '0'\n ])\n\n\ndef parse_package(package):\n # Import yaml here because pywasmcross needs to run in the built native\n # Python, which won't have PyYAML\n import yaml\n # TODO: Validate against a schema\n with open(package) as fd:\n return yaml.load(fd)\n", "path": "tools/common.py"}, {"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n str(Path(args.host) / 'bin' / 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = srcpath / 'build' / 'lib'\n if purelib.is_dir():\n libdir = purelib\n else:\n libdir = srcpath / 'build' / ('lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = path.parent.resolve()\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.packaged').is_file():\n return\n\n name = pkg['package']['name']\n install_prefix = (srcpath / 'install').resolve()\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n '{}@/'.format(install_prefix),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'],\n cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}], "after_files": [{"content": "from pathlib import Path\n\n\nROOTDIR = Path(__file__).parent.resolve()\nHOSTPYTHON = ROOTDIR / '..' / 'cpython' / 'build' / '3.7.0' / 'host'\nTARGETPYTHON = ROOTDIR / '..' / 'cpython' / 'installs' / 'python-3.7.0'\nDEFAULTCFLAGS = ''\nDEFAULTLDFLAGS = ' '.join([\n '-O3',\n '-s', \"BINARYEN_METHOD='native-wasm'\",\n '-Werror',\n '-s', 'EMULATED_FUNCTION_POINTERS=1',\n '-s', 'EMULATE_FUNCTION_POINTER_CASTS=1',\n '-s', 'SIDE_MODULE=1',\n '-s', 'WASM=1',\n '--memory-init-file', '0'\n ])\n\n\ndef parse_package(package):\n # Import yaml here because pywasmcross needs to run in the built native\n # Python, which won't have PyYAML\n import yaml\n # TODO: Validate against a schema\n with open(package) as fd:\n return yaml.load(fd)\n", "path": "tools/common.py"}, {"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = Path(__file__).parent.resolve()\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n checksum_keys = {'md5', 'sha256'}.intersection(pkg['source'])\n if not checksum_keys:\n return\n elif len(checksum_keys) != 1:\n raise ValueError('Only one checksum should be included in a package '\n 'setup; found {}.'.format(checksum_keys))\n checksum_algorithm = checksum_keys.pop()\n checksum = pkg['source'][checksum_algorithm]\n CHUNK_SIZE = 1 << 16\n h = getattr(hashlib, checksum_algorithm)()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid {} checksum\".format(checksum_algorithm))\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = buildpath / Path(pkg['source']['url']).name\n if not tarballpath.is_file():\n subprocess.run([\n 'wget', '-q', '-O', str(tarballpath), pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = buildpath / packagedir\n if not srcpath.is_dir():\n shutil.unpack_archive(str(tarballpath), str(buildpath))\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if (srcpath / '.patched').is_file():\n return\n\n # Apply all of the patches\n orig_dir = Path.cwd()\n pkgdir = path.parent.resolve()\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', pkgdir / patch\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(pkgdir / src, srcpath / dst)\n\n with open(srcpath / '.patched', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef compile(path, srcpath, pkg, args):\n if (srcpath / '.built').is_file():\n return\n\n orig_dir = Path.cwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n str(Path(args.host) / 'bin' / 'python3'),\n str(ROOTDIR / 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n site_packages_dir = (\n srcpath / 'install' / 'lib' / 'python3.7' / 'site-packages')\n pkgdir = path.parent.resolve()\n env = {\n 'SITEPACKAGES': site_packages_dir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(srcpath / '.built', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if (buildpath / '.packaged').is_file():\n return\n\n name = pkg['package']['name']\n install_prefix = (srcpath / 'install').resolve()\n subprocess.run([\n 'python',\n Path(os.environ['EMSCRIPTEN']) / 'tools' / 'file_packager.py',\n name + '.data',\n '--preload',\n '{}@/'.format(install_prefix),\n '--js-output={}'.format(name + '.js'),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'],\n cwd=buildpath, check=True)\n subprocess.run([\n 'uglifyjs',\n buildpath / (name + '.js'),\n '-o',\n buildpath / (name + '.js')], check=True)\n\n with open(buildpath / '.packaged', 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = path.parent\n orig_path = Path.cwd()\n os.chdir(dirpath)\n try:\n buildpath = dirpath / 'build'\n if not buildpath.is_dir():\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = Path(args.package[0]).resolve()\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}]}
2,647
564
gh_patches_debug_31362
rasdani/github-patches
git_diff
pytorch__ignite-69
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Start current_epoch + current_iteration from 1 instead of 0. Also increment these counters *after* the `ITERATION_COMPLETE`/`EPOCH_COMPLETE` events. See discussion in #59 for more details --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/engine.py` Content: ``` 1 import logging 2 from abc import ABCMeta, abstractmethod 3 from enum import Enum 4 import time 5 6 from ignite.history import History 7 from ignite._utils import _to_hours_mins_secs 8 9 10 class Events(Enum): 11 EPOCH_STARTED = "epoch_started" 12 EPOCH_COMPLETED = "epoch_completed" 13 STARTED = "started" 14 COMPLETED = "completed" 15 ITERATION_STARTED = "iteration_started" 16 ITERATION_COMPLETED = "iteration_completed" 17 EXCEPTION_RAISED = "exception_raised" 18 19 20 class Engine(object): 21 __metaclass__ = ABCMeta 22 23 """ 24 Abstract Engine class that is the super class of the Trainer and Evaluator engines. 25 26 Parameters 27 ---------- 28 process_function : callable 29 A function receiving the current training batch in each iteration, outputing data to be stored in the history 30 31 """ 32 def __init__(self, process_function): 33 self._event_handlers = {} 34 self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__) 35 self._logger.addHandler(logging.NullHandler()) 36 self._process_function = process_function 37 self.current_iteration = 0 38 self.history = History() 39 self.should_terminate = False 40 41 if self._process_function is None: 42 raise ValueError("Engine must be given a processing function in order to run") 43 44 def add_event_handler(self, event_name, handler, *args, **kwargs): 45 """ 46 Add an event handler to be executed when the specified event is fired 47 48 Parameters 49 ---------- 50 event_name: enum 51 event from ignite.trainer.TrainingEvents to attach the 52 handler to 53 handler: Callable 54 the callable event handler that should be invoked 55 args: 56 optional args to be passed to `handler` 57 kwargs: 58 optional keyword args to be passed to `handler` 59 60 Returns 61 ------- 62 None 63 """ 64 if event_name not in Events.__members__.values(): 65 self._logger.error("attempt to add event handler to an invalid event %s ", event_name) 66 raise ValueError("Event {} is not a valid event for this Engine".format(event_name)) 67 68 if event_name not in self._event_handlers: 69 self._event_handlers[event_name] = [] 70 71 self._event_handlers[event_name].append((handler, args, kwargs)) 72 self._logger.debug("added handler for event % ", event_name) 73 74 def on(self, event_name, *args, **kwargs): 75 """ 76 Decorator shortcut for add_event_handler 77 78 Parameters 79 ---------- 80 event_name: enum 81 event to attach the handler to 82 args: 83 optional args to be passed to `handler` 84 kwargs: 85 optional keyword args to be passed to `handler` 86 87 Returns 88 ------- 89 None 90 """ 91 def decorator(f): 92 self.add_event_handler(event_name, f, *args, **kwargs) 93 return f 94 return decorator 95 96 def _fire_event(self, event_name): 97 if event_name in self._event_handlers.keys(): 98 self._logger.debug("firing handlers for event %s ", event_name) 99 for func, args, kwargs in self._event_handlers[event_name]: 100 func(self, *args, **kwargs) 101 102 def terminate(self): 103 """ 104 Sends terminate signal to the engine, so that it terminates after the current iteration 105 """ 106 self._logger.info("Terminate signaled. Engine will stop after current iteration is finished") 107 self.should_terminate = True 108 109 def _run_once_on_dataset(self, dataset): 110 self.dataset = dataset 111 try: 112 start_time = time.time() 113 for batch in dataset: 114 self._fire_event(Events.ITERATION_STARTED) 115 step_result = self._process_function(batch) 116 if step_result is not None: 117 self.history.append(step_result) 118 119 self.current_iteration += 1 120 self._fire_event(Events.ITERATION_COMPLETED) 121 if self.should_terminate: 122 break 123 time_taken = time.time() - start_time 124 hours, mins, secs = _to_hours_mins_secs(time_taken) 125 return hours, mins, secs 126 except BaseException as e: 127 self._logger.error("Current run is terminating due to exception: %s", str(e)) 128 self._fire_event(Events.EXCEPTION_RAISED) 129 raise e 130 131 @abstractmethod 132 def run(self, data, **kwargs): 133 """ 134 Train the model, evaluate the validation set and update best parameters if the validation loss 135 improves. 136 In the event that the validation set is not run (or doesn't exist), the training loss is used 137 to update the best parameters. 138 139 Parameters 140 ---------- 141 data : Iterable 142 Collection of batches allowing for the engine to iterate over(e.g., list or DataLoader) 143 **kwargs: optional 144 Any additional kwargs 145 146 Returns 147 ------- 148 None 149 """ 150 raise NotImplementedError("This method should be implemented by a subclass") 151 ``` Path: `ignite/trainer.py` Content: ``` 1 from __future__ import division 2 3 import time 4 from collections import Iterable 5 from torch.autograd import Variable 6 7 from ignite.engine import Engine, Events 8 from ignite._utils import _to_hours_mins_secs, to_variable 9 10 __all__ = ["Trainer", "create_supervised_trainer"] 11 12 13 class Trainer(Engine): 14 """ 15 Generic trainer class. 16 17 Training update and validation functions receive batches of data and return values which will 18 be stored in the `training_history` and `validation_history`. The trainer defines multiple 19 events in `TrainingEvents` for which the user can attach event handlers to. The events get 20 passed the trainer, so they can access the training/validation history 21 22 23 Parameters 24 ---------- 25 training_update_function : callable 26 Update function receiving the current training batch in each iteration 27 """ 28 29 def __init__(self, training_update_function): 30 super(Trainer, self).__init__(training_update_function) 31 self.current_epoch = 0 32 self.max_epochs = 0 33 34 def _train_one_epoch(self, training_data): 35 hours, mins, secs = self._run_once_on_dataset(training_data) 36 self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.current_epoch, hours, 37 mins, secs) 38 39 def run(self, training_data, max_epochs=1): 40 """ 41 Train the model, evaluate the validation set and update best parameters if the validation loss 42 improves. 43 In the event that the validation set is not run (or doesn't exist), the training loss is used 44 to update the best parameters. 45 46 Parameters 47 ---------- 48 training_data : Iterable 49 Collection of training batches allowing repeated iteration (e.g., list or DataLoader) 50 max_epochs: int, optional 51 max epochs to train for [default=1] 52 53 Returns 54 ------- 55 None 56 """ 57 self.dataset = training_data 58 self.current_iteration = 0 59 self.current_epoch = 0 60 61 try: 62 self._logger.info("Training starting with max_epochs={}".format(max_epochs)) 63 64 self.max_epochs = max_epochs 65 66 start_time = time.time() 67 68 self._fire_event(Events.STARTED) 69 while self.current_epoch < max_epochs and not self.should_terminate: 70 self._fire_event(Events.EPOCH_STARTED) 71 self._train_one_epoch(training_data) 72 if self.should_terminate: 73 break 74 self._fire_event(Events.EPOCH_COMPLETED) 75 self.current_epoch += 1 76 77 self._fire_event(Events.COMPLETED) 78 time_taken = time.time() - start_time 79 hours, mins, secs = _to_hours_mins_secs(time_taken) 80 self._logger.info("Training complete. Time taken %02d:%02d:%02d" % (hours, mins, secs)) 81 82 except BaseException as e: 83 self._logger.error("Training is terminating due to exception: %s", str(e)) 84 self._fire_event(Events.EXCEPTION_RAISED) 85 raise e 86 87 88 def create_supervised_trainer(model, optimizer, loss_fn, cuda=False): 89 """ 90 Factory function for creating a trainer for supervised models 91 92 Args: 93 model (torch.nn.Module): the model to train 94 optimizer (torch.optim.Optimizer): the optimizer to use 95 loss_fn (torch.nn loss function): the loss function to use 96 cuda (bool, optional): whether or not to transfer batch to GPU (default: False) 97 98 Returns: 99 Trainer: a trainer instance with supervised update function 100 """ 101 102 def _prepare_batch(batch): 103 x, y = batch 104 x = to_variable(x, cuda=cuda) 105 y = to_variable(y, cuda=cuda) 106 return x, y 107 108 def _update(batch): 109 model.train() 110 optimizer.zero_grad() 111 x, y = _prepare_batch(batch) 112 y_pred = model(x) 113 loss = loss_fn(y_pred, y) 114 loss.backward() 115 optimizer.step() 116 return loss.data.cpu()[0] 117 118 return Trainer(_update) 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/engine.py b/ignite/engine.py --- a/ignite/engine.py +++ b/ignite/engine.py @@ -111,15 +111,16 @@ try: start_time = time.time() for batch in dataset: + self.current_iteration += 1 self._fire_event(Events.ITERATION_STARTED) step_result = self._process_function(batch) if step_result is not None: self.history.append(step_result) - self.current_iteration += 1 self._fire_event(Events.ITERATION_COMPLETED) if self.should_terminate: break + time_taken = time.time() - start_time hours, mins, secs = _to_hours_mins_secs(time_taken) return hours, mins, secs diff --git a/ignite/trainer.py b/ignite/trainer.py --- a/ignite/trainer.py +++ b/ignite/trainer.py @@ -67,12 +67,12 @@ self._fire_event(Events.STARTED) while self.current_epoch < max_epochs and not self.should_terminate: + self.current_epoch += 1 self._fire_event(Events.EPOCH_STARTED) self._train_one_epoch(training_data) if self.should_terminate: break self._fire_event(Events.EPOCH_COMPLETED) - self.current_epoch += 1 self._fire_event(Events.COMPLETED) time_taken = time.time() - start_time
{"golden_diff": "diff --git a/ignite/engine.py b/ignite/engine.py\n--- a/ignite/engine.py\n+++ b/ignite/engine.py\n@@ -111,15 +111,16 @@\n try:\n start_time = time.time()\n for batch in dataset:\n+ self.current_iteration += 1\n self._fire_event(Events.ITERATION_STARTED)\n step_result = self._process_function(batch)\n if step_result is not None:\n self.history.append(step_result)\n \n- self.current_iteration += 1\n self._fire_event(Events.ITERATION_COMPLETED)\n if self.should_terminate:\n break\n+\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n return hours, mins, secs\ndiff --git a/ignite/trainer.py b/ignite/trainer.py\n--- a/ignite/trainer.py\n+++ b/ignite/trainer.py\n@@ -67,12 +67,12 @@\n \n self._fire_event(Events.STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n+ self.current_epoch += 1\n self._fire_event(Events.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n- self.current_epoch += 1\n \n self._fire_event(Events.COMPLETED)\n time_taken = time.time() - start_time\n", "issue": "Start current_epoch + current_iteration from 1 instead of 0.\nAlso increment these counters *after* the `ITERATION_COMPLETE`/`EPOCH_COMPLETE` events.\r\n\r\nSee discussion in #59 for more details\n", "before_files": [{"content": "import logging\nfrom abc import ABCMeta, abstractmethod\nfrom enum import Enum\nimport time\n\nfrom ignite.history import History\nfrom ignite._utils import _to_hours_mins_secs\n\n\nclass Events(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\nclass Engine(object):\n __metaclass__ = ABCMeta\n\n \"\"\"\n Abstract Engine class that is the super class of the Trainer and Evaluator engines.\n\n Parameters\n ----------\n process_function : callable\n A function receiving the current training batch in each iteration, outputing data to be stored in the history\n\n \"\"\"\n def __init__(self, process_function):\n self._event_handlers = {}\n self._logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n self._logger.addHandler(logging.NullHandler())\n self._process_function = process_function\n self.current_iteration = 0\n self.history = History()\n self.should_terminate = False\n\n if self._process_function is None:\n raise ValueError(\"Engine must be given a processing function in order to run\")\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"\n Add an event handler to be executed when the specified event is fired\n\n Parameters\n ----------\n event_name: enum\n event from ignite.trainer.TrainingEvents to attach the\n handler to\n handler: Callable\n the callable event handler that should be invoked\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n if event_name not in Events.__members__.values():\n self._logger.error(\"attempt to add event handler to an invalid event %s \", event_name)\n raise ValueError(\"Event {} is not a valid event for this Engine\".format(event_name))\n\n if event_name not in self._event_handlers:\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event % \", event_name)\n\n def on(self, event_name, *args, **kwargs):\n \"\"\"\n Decorator shortcut for add_event_handler\n\n Parameters\n ----------\n event_name: enum\n event to attach the handler to\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n def decorator(f):\n self.add_event_handler(event_name, f, *args, **kwargs)\n return f\n return decorator\n\n def _fire_event(self, event_name):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *args, **kwargs)\n\n def terminate(self):\n \"\"\"\n Sends terminate signal to the engine, so that it terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled. Engine will stop after current iteration is finished\")\n self.should_terminate = True\n\n def _run_once_on_dataset(self, dataset):\n self.dataset = dataset\n try:\n start_time = time.time()\n for batch in dataset:\n self._fire_event(Events.ITERATION_STARTED)\n step_result = self._process_function(batch)\n if step_result is not None:\n self.history.append(step_result)\n\n self.current_iteration += 1\n self._fire_event(Events.ITERATION_COMPLETED)\n if self.should_terminate:\n break\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n return hours, mins, secs\n except BaseException as e:\n self._logger.error(\"Current run is terminating due to exception: %s\", str(e))\n self._fire_event(Events.EXCEPTION_RAISED)\n raise e\n\n @abstractmethod\n def run(self, data, **kwargs):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n data : Iterable\n Collection of batches allowing for the engine to iterate over(e.g., list or DataLoader)\n **kwargs: optional\n Any additional kwargs\n\n Returns\n -------\n None\n \"\"\"\n raise NotImplementedError(\"This method should be implemented by a subclass\")\n", "path": "ignite/engine.py"}, {"content": "from __future__ import division\n\nimport time\nfrom collections import Iterable\nfrom torch.autograd import Variable\n\nfrom ignite.engine import Engine, Events\nfrom ignite._utils import _to_hours_mins_secs, to_variable\n\n__all__ = [\"Trainer\", \"create_supervised_trainer\"]\n\n\nclass Trainer(Engine):\n \"\"\"\n Generic trainer class.\n\n Training update and validation functions receive batches of data and return values which will\n be stored in the `training_history` and `validation_history`. The trainer defines multiple\n events in `TrainingEvents` for which the user can attach event handlers to. The events get\n passed the trainer, so they can access the training/validation history\n\n\n Parameters\n ----------\n training_update_function : callable\n Update function receiving the current training batch in each iteration\n \"\"\"\n\n def __init__(self, training_update_function):\n super(Trainer, self).__init__(training_update_function)\n self.current_epoch = 0\n self.max_epochs = 0\n\n def _train_one_epoch(self, training_data):\n hours, mins, secs = self._run_once_on_dataset(training_data)\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.current_epoch, hours,\n mins, secs)\n\n def run(self, training_data, max_epochs=1):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n training_data : Iterable\n Collection of training batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs: int, optional\n max epochs to train for [default=1]\n\n Returns\n -------\n None\n \"\"\"\n self.dataset = training_data\n self.current_iteration = 0\n self.current_epoch = 0\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n\n self.max_epochs = max_epochs\n\n start_time = time.time()\n\n self._fire_event(Events.STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n self._fire_event(Events.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n self.current_epoch += 1\n\n self._fire_event(Events.COMPLETED)\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(Events.EXCEPTION_RAISED)\n raise e\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn, cuda=False):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (torch.nn.Module): the model to train\n optimizer (torch.optim.Optimizer): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n cuda (bool, optional): whether or not to transfer batch to GPU (default: False)\n\n Returns:\n Trainer: a trainer instance with supervised update function\n \"\"\"\n\n def _prepare_batch(batch):\n x, y = batch\n x = to_variable(x, cuda=cuda)\n y = to_variable(y, cuda=cuda)\n return x, y\n\n def _update(batch):\n model.train()\n optimizer.zero_grad()\n x, y = _prepare_batch(batch)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.data.cpu()[0]\n\n return Trainer(_update)\n", "path": "ignite/trainer.py"}], "after_files": [{"content": "import logging\nfrom abc import ABCMeta, abstractmethod\nfrom enum import Enum\nimport time\n\nfrom ignite.history import History\nfrom ignite._utils import _to_hours_mins_secs\n\n\nclass Events(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\nclass Engine(object):\n __metaclass__ = ABCMeta\n\n \"\"\"\n Abstract Engine class that is the super class of the Trainer and Evaluator engines.\n\n Parameters\n ----------\n process_function : callable\n A function receiving the current training batch in each iteration, outputing data to be stored in the history\n\n \"\"\"\n def __init__(self, process_function):\n self._event_handlers = {}\n self._logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n self._logger.addHandler(logging.NullHandler())\n self._process_function = process_function\n self.current_iteration = 0\n self.history = History()\n self.should_terminate = False\n\n if self._process_function is None:\n raise ValueError(\"Engine must be given a processing function in order to run\")\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"\n Add an event handler to be executed when the specified event is fired\n\n Parameters\n ----------\n event_name: enum\n event from ignite.trainer.TrainingEvents to attach the\n handler to\n handler: Callable\n the callable event handler that should be invoked\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n if event_name not in Events.__members__.values():\n self._logger.error(\"attempt to add event handler to an invalid event %s \", event_name)\n raise ValueError(\"Event {} is not a valid event for this Engine\".format(event_name))\n\n if event_name not in self._event_handlers:\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event % \", event_name)\n\n def on(self, event_name, *args, **kwargs):\n \"\"\"\n Decorator shortcut for add_event_handler\n\n Parameters\n ----------\n event_name: enum\n event to attach the handler to\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n def decorator(f):\n self.add_event_handler(event_name, f, *args, **kwargs)\n return f\n return decorator\n\n def _fire_event(self, event_name):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *args, **kwargs)\n\n def terminate(self):\n \"\"\"\n Sends terminate signal to the engine, so that it terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled. Engine will stop after current iteration is finished\")\n self.should_terminate = True\n\n def _run_once_on_dataset(self, dataset):\n self.dataset = dataset\n try:\n start_time = time.time()\n for batch in dataset:\n self.current_iteration += 1\n self._fire_event(Events.ITERATION_STARTED)\n step_result = self._process_function(batch)\n if step_result is not None:\n self.history.append(step_result)\n\n self._fire_event(Events.ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n return hours, mins, secs\n except BaseException as e:\n self._logger.error(\"Current run is terminating due to exception: %s\", str(e))\n self._fire_event(Events.EXCEPTION_RAISED)\n raise e\n\n @abstractmethod\n def run(self, data, **kwargs):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n data : Iterable\n Collection of batches allowing for the engine to iterate over(e.g., list or DataLoader)\n **kwargs: optional\n Any additional kwargs\n\n Returns\n -------\n None\n \"\"\"\n raise NotImplementedError(\"This method should be implemented by a subclass\")\n", "path": "ignite/engine.py"}, {"content": "from __future__ import division\n\nimport time\nfrom collections import Iterable\nfrom torch.autograd import Variable\n\nfrom ignite.engine import Engine, Events\nfrom ignite._utils import _to_hours_mins_secs, to_variable\n\n__all__ = [\"Trainer\", \"create_supervised_trainer\"]\n\n\nclass Trainer(Engine):\n \"\"\"\n Generic trainer class.\n\n Training update and validation functions receive batches of data and return values which will\n be stored in the `training_history` and `validation_history`. The trainer defines multiple\n events in `TrainingEvents` for which the user can attach event handlers to. The events get\n passed the trainer, so they can access the training/validation history\n\n\n Parameters\n ----------\n training_update_function : callable\n Update function receiving the current training batch in each iteration\n \"\"\"\n\n def __init__(self, training_update_function):\n super(Trainer, self).__init__(training_update_function)\n self.current_epoch = 0\n self.max_epochs = 0\n\n def _train_one_epoch(self, training_data):\n hours, mins, secs = self._run_once_on_dataset(training_data)\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.current_epoch, hours,\n mins, secs)\n\n def run(self, training_data, max_epochs=1):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n training_data : Iterable\n Collection of training batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs: int, optional\n max epochs to train for [default=1]\n\n Returns\n -------\n None\n \"\"\"\n self.dataset = training_data\n self.current_iteration = 0\n self.current_epoch = 0\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n\n self.max_epochs = max_epochs\n\n start_time = time.time()\n\n self._fire_event(Events.STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n self.current_epoch += 1\n self._fire_event(Events.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n\n self._fire_event(Events.COMPLETED)\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(Events.EXCEPTION_RAISED)\n raise e\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn, cuda=False):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (torch.nn.Module): the model to train\n optimizer (torch.optim.Optimizer): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n cuda (bool, optional): whether or not to transfer batch to GPU (default: False)\n\n Returns:\n Trainer: a trainer instance with supervised update function\n \"\"\"\n\n def _prepare_batch(batch):\n x, y = batch\n x = to_variable(x, cuda=cuda)\n y = to_variable(y, cuda=cuda)\n return x, y\n\n def _update(batch):\n model.train()\n optimizer.zero_grad()\n x, y = _prepare_batch(batch)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.data.cpu()[0]\n\n return Trainer(_update)\n", "path": "ignite/trainer.py"}]}
2,837
326
gh_patches_debug_11575
rasdani/github-patches
git_diff
mindsdb__lightwood-968
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve runtime of `LightGBMArray` for long-horizon forecasting Two main approaches: - Disable optuna hyperparam search past some threshold. - Opt for a recursive strategy instead of direct (i.e. same regressor trained for all timesteps v/s one for each step). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightwood/mixer/lightgbm_array.py` Content: ``` 1 from copy import deepcopy 2 from typing import Dict, List, Union 3 4 import numpy as np 5 import pandas as pd 6 7 from lightwood.helpers.log import log 8 from lightwood.mixer.helpers.ts import _apply_stl_on_training, _stl_transform, _stl_inverse_transform 9 from lightwood.encoder.base import BaseEncoder 10 from lightwood.mixer.base import BaseMixer 11 from lightwood.mixer.lightgbm import LightGBM 12 from lightwood.api.types import PredictionArguments, TimeseriesSettings 13 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs 14 15 16 class LightGBMArray(BaseMixer): 17 """LightGBM-based model, intended for usage in time series tasks.""" 18 models: List[LightGBM] 19 submodel_stop_after: float 20 target: str 21 supports_proba: bool 22 ts_analysis: Dict 23 tss: TimeseriesSettings 24 25 def __init__( 26 self, 27 stop_after: float, 28 target: str, 29 dtype_dict: Dict[str, str], 30 input_cols: List[str], 31 fit_on_dev: bool, 32 target_encoder: BaseEncoder, 33 ts_analysis: Dict[str, object], 34 use_stl: bool, 35 tss: TimeseriesSettings 36 ): 37 super().__init__(stop_after) 38 self.tss = tss 39 self.horizon = tss.horizon 40 self.submodel_stop_after = stop_after / self.horizon 41 self.target = target 42 self.offset_pred_cols = [f'{self.target}_timestep_{i}' for i in range(1, self.horizon)] 43 if set(input_cols) != {self.tss.order_by}: 44 input_cols.remove(self.tss.order_by) 45 for col in self.offset_pred_cols: 46 dtype_dict[col] = dtype_dict[self.target] 47 self.models = [LightGBM(self.submodel_stop_after, 48 target_col, 49 dtype_dict, 50 input_cols, 51 False, # fit_on_dev, 52 True, # use_optuna 53 target_encoder) 54 for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)] 55 self.ts_analysis = ts_analysis 56 self.supports_proba = False 57 self.use_stl = False 58 self.stable = True 59 60 def _fit(self, train_data: EncodedDs, dev_data: EncodedDs, submodel_method='fit') -> None: 61 original_train = deepcopy(train_data.data_frame) 62 original_dev = deepcopy(dev_data.data_frame) 63 64 if self.use_stl and self.ts_analysis.get('stl_transforms', False): 65 _apply_stl_on_training(train_data, dev_data, self.target, self.tss, self.ts_analysis) 66 67 for timestep in range(self.horizon): 68 getattr(self.models[timestep], submodel_method)(train_data, dev_data) 69 70 # restore dfs 71 train_data.data_frame = original_train 72 dev_data.data_frame = original_dev 73 74 def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None: 75 log.info('Started fitting LGBM models for array prediction') 76 self._fit(train_data, dev_data, submodel_method='fit') 77 78 def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None: 79 log.info('Updating array of LGBM models...') 80 self._fit(train_data, dev_data, submodel_method='partial_fit') 81 82 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], 83 args: PredictionArguments = PredictionArguments()) -> pd.DataFrame: 84 if args.predict_proba: 85 log.warning('This model does not output probability estimates') 86 87 original_df = deepcopy(ds.data_frame) 88 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds) 89 ydf = pd.DataFrame(0, # zero-filled 90 index=np.arange(length), 91 columns=[f'prediction_{i}' for i in range(self.horizon)]) 92 93 if self.use_stl and self.ts_analysis.get('stl_transforms', False): 94 ds.data_frame = _stl_transform(ydf, ds, self.target, self.tss, self.ts_analysis) 95 96 for timestep in range(self.horizon): 97 ydf[f'prediction_{timestep}'] = self.models[timestep](ds, args)['prediction'].values 98 99 if self.use_stl and self.ts_analysis.get('stl_transforms', False): 100 ydf = _stl_inverse_transform(ydf, ds, self.tss, self.ts_analysis) 101 102 if self.models[0].positive_domain: 103 ydf = ydf.clip(0) 104 105 ydf['prediction'] = ydf.values.tolist() 106 ds.data_frame = original_df 107 return ydf[['prediction']] 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightwood/mixer/lightgbm_array.py b/lightwood/mixer/lightgbm_array.py --- a/lightwood/mixer/lightgbm_array.py +++ b/lightwood/mixer/lightgbm_array.py @@ -49,7 +49,7 @@ dtype_dict, input_cols, False, # fit_on_dev, - True, # use_optuna + True if tss.horizon < 10 else False, # use_optuna target_encoder) for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)] self.ts_analysis = ts_analysis
{"golden_diff": "diff --git a/lightwood/mixer/lightgbm_array.py b/lightwood/mixer/lightgbm_array.py\n--- a/lightwood/mixer/lightgbm_array.py\n+++ b/lightwood/mixer/lightgbm_array.py\n@@ -49,7 +49,7 @@\n dtype_dict,\n input_cols,\n False, # fit_on_dev,\n- True, # use_optuna\n+ True if tss.horizon < 10 else False, # use_optuna\n target_encoder)\n for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]\n self.ts_analysis = ts_analysis\n", "issue": "Improve runtime of `LightGBMArray` for long-horizon forecasting\nTwo main approaches:\r\n\r\n- Disable optuna hyperparam search past some threshold.\r\n- Opt for a recursive strategy instead of direct (i.e. same regressor trained for all timesteps v/s one for each step).\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import Dict, List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom lightwood.helpers.log import log\nfrom lightwood.mixer.helpers.ts import _apply_stl_on_training, _stl_transform, _stl_inverse_transform\nfrom lightwood.encoder.base import BaseEncoder\nfrom lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.lightgbm import LightGBM\nfrom lightwood.api.types import PredictionArguments, TimeseriesSettings\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass LightGBMArray(BaseMixer):\n \"\"\"LightGBM-based model, intended for usage in time series tasks.\"\"\"\n models: List[LightGBM]\n submodel_stop_after: float\n target: str\n supports_proba: bool\n ts_analysis: Dict\n tss: TimeseriesSettings\n\n def __init__(\n self,\n stop_after: float,\n target: str,\n dtype_dict: Dict[str, str],\n input_cols: List[str],\n fit_on_dev: bool,\n target_encoder: BaseEncoder,\n ts_analysis: Dict[str, object],\n use_stl: bool,\n tss: TimeseriesSettings\n ):\n super().__init__(stop_after)\n self.tss = tss\n self.horizon = tss.horizon\n self.submodel_stop_after = stop_after / self.horizon\n self.target = target\n self.offset_pred_cols = [f'{self.target}_timestep_{i}' for i in range(1, self.horizon)]\n if set(input_cols) != {self.tss.order_by}:\n input_cols.remove(self.tss.order_by)\n for col in self.offset_pred_cols:\n dtype_dict[col] = dtype_dict[self.target]\n self.models = [LightGBM(self.submodel_stop_after,\n target_col,\n dtype_dict,\n input_cols,\n False, # fit_on_dev,\n True, # use_optuna\n target_encoder)\n for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]\n self.ts_analysis = ts_analysis\n self.supports_proba = False\n self.use_stl = False\n self.stable = True\n\n def _fit(self, train_data: EncodedDs, dev_data: EncodedDs, submodel_method='fit') -> None:\n original_train = deepcopy(train_data.data_frame)\n original_dev = deepcopy(dev_data.data_frame)\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n _apply_stl_on_training(train_data, dev_data, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n getattr(self.models[timestep], submodel_method)(train_data, dev_data)\n\n # restore dfs\n train_data.data_frame = original_train\n dev_data.data_frame = original_dev\n\n def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Started fitting LGBM models for array prediction')\n self._fit(train_data, dev_data, submodel_method='fit')\n\n def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Updating array of LGBM models...')\n self._fit(train_data, dev_data, submodel_method='partial_fit')\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],\n args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:\n if args.predict_proba:\n log.warning('This model does not output probability estimates')\n\n original_df = deepcopy(ds.data_frame)\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=[f'prediction_{i}' for i in range(self.horizon)])\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ds.data_frame = _stl_transform(ydf, ds, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n ydf[f'prediction_{timestep}'] = self.models[timestep](ds, args)['prediction'].values\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ydf = _stl_inverse_transform(ydf, ds, self.tss, self.ts_analysis)\n\n if self.models[0].positive_domain:\n ydf = ydf.clip(0)\n\n ydf['prediction'] = ydf.values.tolist()\n ds.data_frame = original_df\n return ydf[['prediction']]\n", "path": "lightwood/mixer/lightgbm_array.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import Dict, List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom lightwood.helpers.log import log\nfrom lightwood.mixer.helpers.ts import _apply_stl_on_training, _stl_transform, _stl_inverse_transform\nfrom lightwood.encoder.base import BaseEncoder\nfrom lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.lightgbm import LightGBM\nfrom lightwood.api.types import PredictionArguments, TimeseriesSettings\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass LightGBMArray(BaseMixer):\n \"\"\"LightGBM-based model, intended for usage in time series tasks.\"\"\"\n models: List[LightGBM]\n submodel_stop_after: float\n target: str\n supports_proba: bool\n ts_analysis: Dict\n tss: TimeseriesSettings\n\n def __init__(\n self,\n stop_after: float,\n target: str,\n dtype_dict: Dict[str, str],\n input_cols: List[str],\n fit_on_dev: bool,\n target_encoder: BaseEncoder,\n ts_analysis: Dict[str, object],\n use_stl: bool,\n tss: TimeseriesSettings\n ):\n super().__init__(stop_after)\n self.tss = tss\n self.horizon = tss.horizon\n self.submodel_stop_after = stop_after / self.horizon\n self.target = target\n self.offset_pred_cols = [f'{self.target}_timestep_{i}' for i in range(1, self.horizon)]\n if set(input_cols) != {self.tss.order_by}:\n input_cols.remove(self.tss.order_by)\n for col in self.offset_pred_cols:\n dtype_dict[col] = dtype_dict[self.target]\n self.models = [LightGBM(self.submodel_stop_after,\n target_col,\n dtype_dict,\n input_cols,\n False, # fit_on_dev,\n True if tss.horizon < 10 else False, # use_optuna\n target_encoder)\n for _, target_col in zip(range(self.horizon), [target] + self.offset_pred_cols)]\n self.ts_analysis = ts_analysis\n self.supports_proba = False\n self.use_stl = False\n self.stable = True\n\n def _fit(self, train_data: EncodedDs, dev_data: EncodedDs, submodel_method='fit') -> None:\n original_train = deepcopy(train_data.data_frame)\n original_dev = deepcopy(dev_data.data_frame)\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n _apply_stl_on_training(train_data, dev_data, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n getattr(self.models[timestep], submodel_method)(train_data, dev_data)\n\n # restore dfs\n train_data.data_frame = original_train\n dev_data.data_frame = original_dev\n\n def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Started fitting LGBM models for array prediction')\n self._fit(train_data, dev_data, submodel_method='fit')\n\n def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Updating array of LGBM models...')\n self._fit(train_data, dev_data, submodel_method='partial_fit')\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],\n args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:\n if args.predict_proba:\n log.warning('This model does not output probability estimates')\n\n original_df = deepcopy(ds.data_frame)\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=[f'prediction_{i}' for i in range(self.horizon)])\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ds.data_frame = _stl_transform(ydf, ds, self.target, self.tss, self.ts_analysis)\n\n for timestep in range(self.horizon):\n ydf[f'prediction_{timestep}'] = self.models[timestep](ds, args)['prediction'].values\n\n if self.use_stl and self.ts_analysis.get('stl_transforms', False):\n ydf = _stl_inverse_transform(ydf, ds, self.tss, self.ts_analysis)\n\n if self.models[0].positive_domain:\n ydf = ydf.clip(0)\n\n ydf['prediction'] = ydf.values.tolist()\n ds.data_frame = original_df\n return ydf[['prediction']]\n", "path": "lightwood/mixer/lightgbm_array.py"}]}
1,576
143
gh_patches_debug_16279
rasdani/github-patches
git_diff
scikit-image__scikit-image-1367
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Invalid deprecation of canny, perhaps others ``` $ python -c "from skimage import filters as F; F.canny(0)" Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/stefan/src/scikit-image/skimage/_shared/utils.py", line 46, in __call__ msg = 'Call to deprecated function ``%s``.' % func.__name__ AttributeError: 'int' object has no attribute '__name__' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/filters/__init__.py` Content: ``` 1 from .lpi_filter import inverse, wiener, LPIFilter2D 2 from ._gaussian import gaussian_filter 3 from .edges import (sobel, hsobel, vsobel, sobel_h, sobel_v, 4 scharr, hscharr, vscharr, scharr_h, scharr_v, 5 prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v, 6 roberts, roberts_positive_diagonal, 7 roberts_negative_diagonal, roberts_pos_diag, 8 roberts_neg_diag) 9 from ._rank_order import rank_order 10 from ._gabor import gabor_kernel, gabor_filter 11 from .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen, 12 threshold_isodata) 13 from . import rank 14 from .rank import median 15 16 from .._shared.utils import deprecated 17 from .. import restoration 18 denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\ 19 (restoration.denoise_bilateral) 20 denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\ 21 (restoration.denoise_tv_bregman) 22 denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\ 23 (restoration.denoise_tv_chambolle) 24 25 # Backward compatibility v<0.11 26 @deprecated 27 def canny(*args, **kwargs): 28 # Hack to avoid circular import 29 from ..feature._canny import canny as canny_ 30 return canny_(*args, **kwargs) 31 32 33 __all__ = ['inverse', 34 'wiener', 35 'LPIFilter2D', 36 'gaussian_filter', 37 'median', 38 'canny', 39 'sobel', 40 'hsobel', 41 'vsobel', 42 'sobel_h', 43 'sobel_v', 44 'scharr', 45 'hscharr', 46 'vscharr', 47 'scharr_h', 48 'scharr_v', 49 'prewitt', 50 'hprewitt', 51 'vprewitt', 52 'prewitt_h', 53 'prewitt_v', 54 'roberts', 55 'roberts_positive_diagonal', 56 'roberts_negative_diagonal', 57 'roberts_pos_diag', 58 'roberts_neg_diag', 59 'denoise_tv_chambolle', 60 'denoise_bilateral', 61 'denoise_tv_bregman', 62 'rank_order', 63 'gabor_kernel', 64 'gabor_filter', 65 'threshold_adaptive', 66 'threshold_otsu', 67 'threshold_yen', 68 'threshold_isodata', 69 'rank'] 70 ``` Path: `skimage/filter/__init__.py` Content: ``` 1 from .._shared.utils import skimage_deprecation 2 from warnings import warn 3 4 global _import_warned 5 6 warn(skimage_deprecation('The `skimage.filter` module has been renamed ' 7 'to `skimage.filters`. This placeholder module ' 8 'will be removed in v0.13.')) 9 _import_warned = True 10 11 del warn 12 del skimage_deprecation 13 14 from ..filters.lpi_filter import inverse, wiener, LPIFilter2D 15 from ..filters._gaussian import gaussian_filter 16 from ..filters.edges import (sobel, hsobel, vsobel, sobel_h, sobel_v, 17 scharr, hscharr, vscharr, scharr_h, scharr_v, 18 prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v, 19 roberts, roberts_positive_diagonal, 20 roberts_negative_diagonal, roberts_pos_diag, 21 roberts_neg_diag) 22 from ..filters._rank_order import rank_order 23 from ..filters._gabor import gabor_kernel, gabor_filter 24 from ..filters.thresholding import (threshold_adaptive, threshold_otsu, threshold_yen, 25 threshold_isodata) 26 from ..filters import rank 27 from ..filters.rank import median 28 29 from skimage._shared.utils import deprecated 30 from skimage import restoration 31 denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\ 32 (restoration.denoise_bilateral) 33 denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\ 34 (restoration.denoise_tv_bregman) 35 denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\ 36 (restoration.denoise_tv_chambolle) 37 38 # Backward compatibility v<0.11 39 @deprecated 40 def canny(*args, **kwargs): 41 # Hack to avoid circular import 42 from skimage.feature._canny import canny as canny_ 43 return canny_(*args, **kwargs) 44 45 46 __all__ = ['inverse', 47 'wiener', 48 'LPIFilter2D', 49 'gaussian_filter', 50 'median', 51 'canny', 52 'sobel', 53 'hsobel', 54 'vsobel', 55 'sobel_h', 56 'sobel_v', 57 'scharr', 58 'hscharr', 59 'vscharr', 60 'scharr_h', 61 'scharr_v', 62 'prewitt', 63 'hprewitt', 64 'vprewitt', 65 'prewitt_h', 66 'prewitt_v', 67 'roberts', 68 'roberts_positive_diagonal', 69 'roberts_negative_diagonal', 70 'roberts_pos_diag', 71 'roberts_neg_diag', 72 'denoise_tv_chambolle', 73 'denoise_bilateral', 74 'denoise_tv_bregman', 75 'rank_order', 76 'gabor_kernel', 77 'gabor_filter', 78 'threshold_adaptive', 79 'threshold_otsu', 80 'threshold_yen', 81 'threshold_isodata', 82 'rank'] 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py --- a/skimage/filter/__init__.py +++ b/skimage/filter/__init__.py @@ -36,7 +36,7 @@ (restoration.denoise_tv_chambolle) # Backward compatibility v<0.11 -@deprecated +@deprecated('skimage.feature.canny') def canny(*args, **kwargs): # Hack to avoid circular import from skimage.feature._canny import canny as canny_ diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py --- a/skimage/filters/__init__.py +++ b/skimage/filters/__init__.py @@ -23,7 +23,7 @@ (restoration.denoise_tv_chambolle) # Backward compatibility v<0.11 -@deprecated +@deprecated('skimage.feature.canny') def canny(*args, **kwargs): # Hack to avoid circular import from ..feature._canny import canny as canny_
{"golden_diff": "diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py\n--- a/skimage/filter/__init__.py\n+++ b/skimage/filter/__init__.py\n@@ -36,7 +36,7 @@\n (restoration.denoise_tv_chambolle)\n \n # Backward compatibility v<0.11\n-@deprecated\n+@deprecated('skimage.feature.canny')\n def canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\ndiff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -23,7 +23,7 @@\n (restoration.denoise_tv_chambolle)\n \n # Backward compatibility v<0.11\n-@deprecated\n+@deprecated('skimage.feature.canny')\n def canny(*args, **kwargs):\n # Hack to avoid circular import\n from ..feature._canny import canny as canny_\n", "issue": "Invalid deprecation of canny, perhaps others\n```\n$ python -c \"from skimage import filters as F; F.canny(0)\"\nTraceback (most recent call last):\n File \"<string>\", line 1, in <module>\n File \"/home/stefan/src/scikit-image/skimage/_shared/utils.py\", line 46, in __call__\n msg = 'Call to deprecated function ``%s``.' % func.__name__\nAttributeError: 'int' object has no attribute '__name__'\n```\n\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian_filter\nfrom .edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,\n scharr, hscharr, vscharr, scharr_h, scharr_v,\n prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,\n roberts, roberts_positive_diagonal,\n roberts_negative_diagonal, roberts_pos_diag,\n roberts_neg_diag)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor_filter\nfrom .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated\nfrom .. import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from ..feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'median',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filters/__init__.py"}, {"content": "from .._shared.utils import skimage_deprecation\nfrom warnings import warn\n\nglobal _import_warned\n\nwarn(skimage_deprecation('The `skimage.filter` module has been renamed '\n 'to `skimage.filters`. This placeholder module '\n 'will be removed in v0.13.'))\n_import_warned = True\n\ndel warn\ndel skimage_deprecation\n\nfrom ..filters.lpi_filter import inverse, wiener, LPIFilter2D\nfrom ..filters._gaussian import gaussian_filter\nfrom ..filters.edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,\n scharr, hscharr, vscharr, scharr_h, scharr_v,\n prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,\n roberts, roberts_positive_diagonal,\n roberts_negative_diagonal, roberts_pos_diag,\n roberts_neg_diag)\nfrom ..filters._rank_order import rank_order\nfrom ..filters._gabor import gabor_kernel, gabor_filter\nfrom ..filters.thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom ..filters import rank\nfrom ..filters.rank import median\n\nfrom skimage._shared.utils import deprecated\nfrom skimage import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'median',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filter/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian_filter\nfrom .edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,\n scharr, hscharr, vscharr, scharr_h, scharr_v,\n prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,\n roberts, roberts_positive_diagonal,\n roberts_negative_diagonal, roberts_pos_diag,\n roberts_neg_diag)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor_filter\nfrom .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated\nfrom .. import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated('skimage.feature.canny')\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from ..feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'median',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filters/__init__.py"}, {"content": "from .._shared.utils import skimage_deprecation\nfrom warnings import warn\n\nglobal _import_warned\n\nwarn(skimage_deprecation('The `skimage.filter` module has been renamed '\n 'to `skimage.filters`. This placeholder module '\n 'will be removed in v0.13.'))\n_import_warned = True\n\ndel warn\ndel skimage_deprecation\n\nfrom ..filters.lpi_filter import inverse, wiener, LPIFilter2D\nfrom ..filters._gaussian import gaussian_filter\nfrom ..filters.edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,\n scharr, hscharr, vscharr, scharr_h, scharr_v,\n prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,\n roberts, roberts_positive_diagonal,\n roberts_negative_diagonal, roberts_pos_diag,\n roberts_neg_diag)\nfrom ..filters._rank_order import rank_order\nfrom ..filters._gabor import gabor_kernel, gabor_filter\nfrom ..filters.thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom ..filters import rank\nfrom ..filters.rank import median\n\nfrom skimage._shared.utils import deprecated\nfrom skimage import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated('skimage.feature.canny')\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'median',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filter/__init__.py"}]}
1,937
254
gh_patches_debug_34159
rasdani/github-patches
git_diff
ethereum__web3.py-983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crash in time based gas price strategies * Version: 4.5.0 * Python: 3.6 * OS: osx ### What was wrong? My script to check the gas price calculation crashes after the update to web3 4.5: ``` python test_gas_price.py 0:13:21 RPC: 252000000 Traceback (most recent call last): File "test_gas_price.py", line 14, in <module> print('Slow', w3.eth.generateGasPrice()) File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/eth.py", line 385, in generateGasPrice return self.gasPriceStrategy(self.web3, transaction_params) File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py", line 160, in time_based_gas_price_strategy sample_size=sample_size, File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/eth_utils/functional.py", line 22, in inner return callback(fn(*args, **kwargs)) File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py", line 79, in _compute_probabilities reverse=True, File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py", line 67, in _aggregate_miner_data percentile(gas_prices, percentile=15)) File "/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/utils/math.py", line 5, in percentile raise ValueError("Expected a sequence of at least 3 integers, got {0}".format(values)) ValueError: Expected a sequence of at least 3 integers, got {41000000000, 26000000000} ``` The code is the following: ```py from web3.auto import w3 from web3 import middleware from web3.gas_strategies.time_based import medium_gas_price_strategy, fast_gas_price_strategy, slow_gas_price_strategy from web3.gas_strategies.rpc import rpc_gas_price_strategy w3.middleware_stack.add(middleware.time_based_cache_middleware) w3.middleware_stack.add(middleware.latest_block_based_cache_middleware) w3.middleware_stack.add(middleware.simple_cache_middleware) w3.eth.setGasPriceStrategy(rpc_gas_price_strategy) print('RPC:', w3.eth.generateGasPrice()) w3.eth.setGasPriceStrategy(slow_gas_price_strategy) print('Slow', w3.eth.generateGasPrice()) w3.eth.setGasPriceStrategy(medium_gas_price_strategy) print('Medium:', w3.eth.generateGasPrice()) w3.eth.setGasPriceStrategy(fast_gas_price_strategy) print('Fast:', w3.eth.generateGasPrice()) ``` ### How can it be fixed? It should crash and return a proper gas price. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/gas_strategies/time_based.py` Content: ``` 1 import collections 2 import math 3 import operator 4 5 from eth_utils import ( 6 to_tuple, 7 ) 8 9 from web3.exceptions import ( 10 ValidationError, 11 ) 12 from web3.utils.math import ( 13 percentile, 14 ) 15 from web3.utils.toolz import ( 16 curry, 17 groupby, 18 sliding_window, 19 ) 20 21 MinerData = collections.namedtuple( 22 'MinerData', 23 ['miner', 'num_blocks', 'min_gas_price', 'low_percentile_gas_price']) 24 25 Probability = collections.namedtuple('Probability', ['gas_price', 'prob']) 26 27 28 def _get_avg_block_time(w3, sample_size): 29 latest = w3.eth.getBlock('latest') 30 31 constrained_sample_size = min(sample_size, latest['number']) 32 if constrained_sample_size == 0: 33 raise ValidationError('Constrained sample size is 0') 34 35 oldest = w3.eth.getBlock(latest['number'] - constrained_sample_size) 36 return (latest['timestamp'] - oldest['timestamp']) / constrained_sample_size 37 38 39 def _get_raw_miner_data(w3, sample_size): 40 latest = w3.eth.getBlock('latest', full_transactions=True) 41 42 for transaction in latest['transactions']: 43 yield (latest['miner'], latest['hash'], transaction['gasPrice']) 44 45 block = latest 46 47 for _ in range(sample_size - 1): 48 if block['number'] == 0: 49 break 50 51 # we intentionally trace backwards using parent hashes rather than 52 # block numbers to make caching the data easier to implement. 53 block = w3.eth.getBlock(block['parentHash'], full_transactions=True) 54 for transaction in block['transactions']: 55 yield (block['miner'], block['hash'], transaction['gasPrice']) 56 57 58 def _aggregate_miner_data(raw_data): 59 data_by_miner = groupby(0, raw_data) 60 61 for miner, miner_data in data_by_miner.items(): 62 _, block_hashes, gas_prices = map(set, zip(*miner_data)) 63 yield MinerData( 64 miner, 65 len(set(block_hashes)), 66 min(gas_prices), 67 percentile(gas_prices, percentile=15)) 68 69 70 @to_tuple 71 def _compute_probabilities(miner_data, wait_blocks, sample_size): 72 """ 73 Computes the probabilities that a txn will be accepted at each of the gas 74 prices accepted by the miners. 75 """ 76 miner_data_by_price = tuple(sorted( 77 miner_data, 78 key=operator.attrgetter('low_percentile_gas_price'), 79 reverse=True, 80 )) 81 for idx in range(len(miner_data_by_price)): 82 low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price 83 num_blocks_accepting_price = sum(m.num_blocks for m in miner_data_by_price[idx:]) 84 inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size 85 probability_accepted = 1 - inv_prob_per_block ** wait_blocks 86 yield Probability(low_percentile_gas_price, probability_accepted) 87 88 89 def _compute_gas_price(probabilities, desired_probability): 90 """ 91 Given a sorted range of ``Probability`` named-tuples returns a gas price 92 computed based on where the ``desired_probability`` would fall within the 93 range. 94 95 :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order. 96 :param desired_probability: An floating point representation of the desired 97 probability. (e.g. ``85% -> 0.85``) 98 """ 99 first = probabilities[0] 100 last = probabilities[-1] 101 102 if desired_probability >= first.prob: 103 return first.gas_price 104 elif desired_probability <= last.prob: 105 return last.gas_price 106 107 for left, right in sliding_window(2, probabilities): 108 if desired_probability < right.prob: 109 continue 110 elif desired_probability > left.prob: 111 # This code block should never be reachable as it would indicate 112 # that we already passed by the probability window in which our 113 # `desired_probability` is located. 114 raise Exception('Invariant') 115 116 adj_prob = desired_probability - right.prob 117 window_size = left.prob - right.prob 118 position = adj_prob / window_size 119 gas_window_size = left.gas_price - right.gas_price 120 gas_price = int(math.ceil(right.gas_price + gas_window_size * position)) 121 return gas_price 122 else: 123 # The initial `if/else` clause in this function handles the case where 124 # the `desired_probability` is either above or below the min/max 125 # probability found in the `probabilities`. 126 # 127 # With these two cases handled, the only way this code block should be 128 # reachable would be if the `probabilities` were not sorted correctly. 129 # Otherwise, the `desired_probability` **must** fall between two of the 130 # values in the `probabilities``. 131 raise Exception('Invariant') 132 133 134 @curry 135 def construct_time_based_gas_price_strategy(max_wait_seconds, 136 sample_size=120, 137 probability=98): 138 """ 139 A gas pricing strategy that uses recently mined block data to derive a gas 140 price for which a transaction is likely to be mined within X seconds with 141 probability P. 142 143 :param max_wait_seconds: The desired maxiumum number of seconds the 144 transaction should take to mine. 145 :param sample_size: The number of recent blocks to sample 146 :param probability: An integer representation of the desired probability 147 that the transaction will be mined within ``max_wait_seconds``. 0 means 0% 148 and 100 means 100%. 149 """ 150 def time_based_gas_price_strategy(web3, transaction_params): 151 avg_block_time = _get_avg_block_time(web3, sample_size=sample_size) 152 wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time)) 153 154 raw_miner_data = _get_raw_miner_data(web3, sample_size=sample_size) 155 miner_data = _aggregate_miner_data(raw_miner_data) 156 157 probabilities = _compute_probabilities( 158 miner_data, 159 wait_blocks=wait_blocks, 160 sample_size=sample_size, 161 ) 162 163 gas_price = _compute_gas_price(probabilities, probability / 100) 164 return gas_price 165 return time_based_gas_price_strategy 166 167 168 # fast: mine within 1 minute 169 fast_gas_price_strategy = construct_time_based_gas_price_strategy( 170 max_wait_seconds=60, 171 sample_size=120, 172 ) 173 # medium: mine within 10 minutes 174 medium_gas_price_strategy = construct_time_based_gas_price_strategy( 175 max_wait_seconds=600, 176 sample_size=120, 177 ) 178 # slow: mine within 1 hour (60 minutes) 179 slow_gas_price_strategy = construct_time_based_gas_price_strategy( 180 max_wait_seconds=60 * 60, 181 sample_size=120, 182 ) 183 # glacial: mine within the next 24 hours. 184 glacial_gas_price_strategy = construct_time_based_gas_price_strategy( 185 max_wait_seconds=24 * 60 * 60, 186 sample_size=720, 187 ) 188 ``` Path: `web3/utils/math.py` Content: ``` 1 def percentile(values=None, percentile=None): 2 """Calculates a simplified weighted average percentile 3 """ 4 if values in [None, tuple(), []] or len(values) < 3: 5 raise ValueError("Expected a sequence of at least 3 integers, got {0}".format(values)) 6 if percentile is None: 7 raise ValueError("Expected a percentile choice, got {0}".format(percentile)) 8 9 sorted_values = sorted(values) 10 11 rank = len(values) * percentile / 100 12 if rank > 0: 13 index = rank - 1 14 else: 15 index = rank 16 17 if index % 1 == 0: 18 return sorted_values[int(index)] 19 else: 20 fractional = index % 1 21 integer = int(index - fractional) 22 lower = sorted_values[integer] 23 higher = sorted_values[integer + 1] 24 return lower + fractional * (higher - lower) 25 ``` Path: `web3/exceptions.py` Content: ``` 1 import datetime 2 import time 3 4 5 class BadFunctionCallOutput(Exception): 6 """ 7 We failed to decode ABI output. 8 9 Most likely ABI mismatch. 10 """ 11 pass 12 13 14 class BlockNumberOutofRange(Exception): 15 ''' 16 block_identifier passed does not match known block. 17 ''' 18 pass 19 20 21 class CannotHandleRequest(Exception): 22 """ 23 Raised by a provider to signal that it cannot handle an RPC request and 24 that the manager should proceed to the next provider. 25 """ 26 pass 27 28 29 class InvalidAddress(ValueError): 30 """ 31 The supplied address does not have a valid checksum, as defined in EIP-55 32 """ 33 pass 34 35 36 class NameNotFound(ValueError): 37 """ 38 Raised when a caller provides an Ethereum Name Service name that 39 does not resolve to an address. 40 """ 41 pass 42 43 44 class StaleBlockchain(Exception): 45 """ 46 Raised by the stalecheck_middleware when the latest block is too old. 47 """ 48 def __init__(self, block, allowable_delay): 49 last_block_date = datetime.datetime.fromtimestamp(block.timestamp).strftime('%c') 50 message = ( 51 "The latest block, #%d, is %d seconds old, but is only allowed to be %d s old. " 52 "The date of the most recent block is %s. Continue syncing and try again..." % 53 (block.number, time.time() - block.timestamp, allowable_delay, last_block_date) 54 ) 55 super().__init__(message, block, allowable_delay) 56 57 def __str__(self): 58 return self.args[0] 59 60 61 class UnhandledRequest(Exception): 62 """ 63 Raised by the manager when none of it's providers responds to a request. 64 """ 65 pass 66 67 68 class MismatchedABI(Exception): 69 """ 70 Raised when an ABI does not match with supplied parameters, or when an 71 attempt is made to access a function/event that does not exist in the ABI. 72 """ 73 pass 74 75 76 class FallbackNotFound(Exception): 77 """ 78 Raised when fallback function doesn't exist in contract. 79 """ 80 pass 81 82 83 class ValidationError(Exception): 84 """ 85 Raised when a supplied value is invalid. 86 """ 87 pass 88 89 90 class NoABIFunctionsFound(AttributeError): 91 """ 92 Raised when an ABI doesn't contain any functions. 93 """ 94 pass 95 96 97 class NoABIEventsFound(AttributeError): 98 """ 99 Raised when an ABI doesn't contain any events. 100 """ 101 pass 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/exceptions.py b/web3/exceptions.py --- a/web3/exceptions.py +++ b/web3/exceptions.py @@ -99,3 +99,11 @@ Raised when an ABI doesn't contain any events. """ pass + + +class InsufficientData(Exception): + """ + Raised when there are insufficient data points to + complete a calculation + """ + pass diff --git a/web3/gas_strategies/time_based.py b/web3/gas_strategies/time_based.py --- a/web3/gas_strategies/time_based.py +++ b/web3/gas_strategies/time_based.py @@ -7,6 +7,7 @@ ) from web3.exceptions import ( + InsufficientData, ValidationError, ) from web3.utils.math import ( @@ -60,11 +61,15 @@ for miner, miner_data in data_by_miner.items(): _, block_hashes, gas_prices = map(set, zip(*miner_data)) + try: + price_percentile = percentile(gas_prices, percentile=20) + except InsufficientData: + price_percentile = min(gas_prices) yield MinerData( miner, len(set(block_hashes)), min(gas_prices), - percentile(gas_prices, percentile=15)) + price_percentile) @to_tuple diff --git a/web3/utils/math.py b/web3/utils/math.py --- a/web3/utils/math.py +++ b/web3/utils/math.py @@ -1,8 +1,14 @@ +from web3.exceptions import ( + InsufficientData, +) + + def percentile(values=None, percentile=None): """Calculates a simplified weighted average percentile """ - if values in [None, tuple(), []] or len(values) < 3: - raise ValueError("Expected a sequence of at least 3 integers, got {0}".format(values)) + if values in [None, tuple(), []] or len(values) < 1: + raise InsufficientData( + "Expected a sequence of at least 1 integers, got {0!r}".format(values)) if percentile is None: raise ValueError("Expected a percentile choice, got {0}".format(percentile)) @@ -11,6 +17,8 @@ rank = len(values) * percentile / 100 if rank > 0: index = rank - 1 + if index < 0: + return sorted_values[0] else: index = rank
{"golden_diff": "diff --git a/web3/exceptions.py b/web3/exceptions.py\n--- a/web3/exceptions.py\n+++ b/web3/exceptions.py\n@@ -99,3 +99,11 @@\n Raised when an ABI doesn't contain any events.\n \"\"\"\n pass\n+\n+\n+class InsufficientData(Exception):\n+ \"\"\"\n+ Raised when there are insufficient data points to\n+ complete a calculation\n+ \"\"\"\n+ pass\ndiff --git a/web3/gas_strategies/time_based.py b/web3/gas_strategies/time_based.py\n--- a/web3/gas_strategies/time_based.py\n+++ b/web3/gas_strategies/time_based.py\n@@ -7,6 +7,7 @@\n )\n \n from web3.exceptions import (\n+ InsufficientData,\n ValidationError,\n )\n from web3.utils.math import (\n@@ -60,11 +61,15 @@\n \n for miner, miner_data in data_by_miner.items():\n _, block_hashes, gas_prices = map(set, zip(*miner_data))\n+ try:\n+ price_percentile = percentile(gas_prices, percentile=20)\n+ except InsufficientData:\n+ price_percentile = min(gas_prices)\n yield MinerData(\n miner,\n len(set(block_hashes)),\n min(gas_prices),\n- percentile(gas_prices, percentile=15))\n+ price_percentile)\n \n \n @to_tuple\ndiff --git a/web3/utils/math.py b/web3/utils/math.py\n--- a/web3/utils/math.py\n+++ b/web3/utils/math.py\n@@ -1,8 +1,14 @@\n+from web3.exceptions import (\n+ InsufficientData,\n+)\n+\n+\n def percentile(values=None, percentile=None):\n \"\"\"Calculates a simplified weighted average percentile\n \"\"\"\n- if values in [None, tuple(), []] or len(values) < 3:\n- raise ValueError(\"Expected a sequence of at least 3 integers, got {0}\".format(values))\n+ if values in [None, tuple(), []] or len(values) < 1:\n+ raise InsufficientData(\n+ \"Expected a sequence of at least 1 integers, got {0!r}\".format(values))\n if percentile is None:\n raise ValueError(\"Expected a percentile choice, got {0}\".format(percentile))\n \n@@ -11,6 +17,8 @@\n rank = len(values) * percentile / 100\n if rank > 0:\n index = rank - 1\n+ if index < 0:\n+ return sorted_values[0]\n else:\n index = rank\n", "issue": "Crash in time based gas price strategies\n* Version: 4.5.0\r\n* Python: 3.6\r\n* OS: osx\r\n\r\n\r\n### What was wrong?\r\n\r\nMy script to check the gas price calculation crashes after the update to web3 4.5:\r\n```\r\npython test_gas_price.py 0:13:21\r\nRPC: 252000000\r\nTraceback (most recent call last):\r\n File \"test_gas_price.py\", line 14, in <module>\r\n print('Slow', w3.eth.generateGasPrice())\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/eth.py\", line 385, in generateGasPrice\r\n return self.gasPriceStrategy(self.web3, transaction_params)\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py\", line 160, in time_based_gas_price_strategy\r\n sample_size=sample_size,\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/eth_utils/functional.py\", line 22, in inner\r\n return callback(fn(*args, **kwargs))\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py\", line 79, in _compute_probabilities\r\n reverse=True,\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/gas_strategies/time_based.py\", line 67, in _aggregate_miner_data\r\n percentile(gas_prices, percentile=15))\r\n File \"/Users/paul/Work/venv-raiden/lib/python3.6/site-packages/web3/utils/math.py\", line 5, in percentile\r\n raise ValueError(\"Expected a sequence of at least 3 integers, got {0}\".format(values))\r\nValueError: Expected a sequence of at least 3 integers, got {41000000000, 26000000000}\r\n```\r\n\r\nThe code is the following:\r\n```py\r\nfrom web3.auto import w3\r\n\r\nfrom web3 import middleware\r\nfrom web3.gas_strategies.time_based import medium_gas_price_strategy, fast_gas_price_strategy, slow_gas_price_strategy\r\nfrom web3.gas_strategies.rpc import rpc_gas_price_strategy\r\n\r\nw3.middleware_stack.add(middleware.time_based_cache_middleware)\r\nw3.middleware_stack.add(middleware.latest_block_based_cache_middleware)\r\nw3.middleware_stack.add(middleware.simple_cache_middleware)\r\n\r\nw3.eth.setGasPriceStrategy(rpc_gas_price_strategy)\r\nprint('RPC:', w3.eth.generateGasPrice())\r\nw3.eth.setGasPriceStrategy(slow_gas_price_strategy)\r\nprint('Slow', w3.eth.generateGasPrice())\r\nw3.eth.setGasPriceStrategy(medium_gas_price_strategy)\r\nprint('Medium:', w3.eth.generateGasPrice())\r\nw3.eth.setGasPriceStrategy(fast_gas_price_strategy)\r\nprint('Fast:', w3.eth.generateGasPrice())\r\n```\r\n\r\n\r\n### How can it be fixed?\r\n\r\nIt should crash and return a proper gas price.\r\n\n", "before_files": [{"content": "import collections\nimport math\nimport operator\n\nfrom eth_utils import (\n to_tuple,\n)\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.utils.math import (\n percentile,\n)\nfrom web3.utils.toolz import (\n curry,\n groupby,\n sliding_window,\n)\n\nMinerData = collections.namedtuple(\n 'MinerData',\n ['miner', 'num_blocks', 'min_gas_price', 'low_percentile_gas_price'])\n\nProbability = collections.namedtuple('Probability', ['gas_price', 'prob'])\n\n\ndef _get_avg_block_time(w3, sample_size):\n latest = w3.eth.getBlock('latest')\n\n constrained_sample_size = min(sample_size, latest['number'])\n if constrained_sample_size == 0:\n raise ValidationError('Constrained sample size is 0')\n\n oldest = w3.eth.getBlock(latest['number'] - constrained_sample_size)\n return (latest['timestamp'] - oldest['timestamp']) / constrained_sample_size\n\n\ndef _get_raw_miner_data(w3, sample_size):\n latest = w3.eth.getBlock('latest', full_transactions=True)\n\n for transaction in latest['transactions']:\n yield (latest['miner'], latest['hash'], transaction['gasPrice'])\n\n block = latest\n\n for _ in range(sample_size - 1):\n if block['number'] == 0:\n break\n\n # we intentionally trace backwards using parent hashes rather than\n # block numbers to make caching the data easier to implement.\n block = w3.eth.getBlock(block['parentHash'], full_transactions=True)\n for transaction in block['transactions']:\n yield (block['miner'], block['hash'], transaction['gasPrice'])\n\n\ndef _aggregate_miner_data(raw_data):\n data_by_miner = groupby(0, raw_data)\n\n for miner, miner_data in data_by_miner.items():\n _, block_hashes, gas_prices = map(set, zip(*miner_data))\n yield MinerData(\n miner,\n len(set(block_hashes)),\n min(gas_prices),\n percentile(gas_prices, percentile=15))\n\n\n@to_tuple\ndef _compute_probabilities(miner_data, wait_blocks, sample_size):\n \"\"\"\n Computes the probabilities that a txn will be accepted at each of the gas\n prices accepted by the miners.\n \"\"\"\n miner_data_by_price = tuple(sorted(\n miner_data,\n key=operator.attrgetter('low_percentile_gas_price'),\n reverse=True,\n ))\n for idx in range(len(miner_data_by_price)):\n low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price\n num_blocks_accepting_price = sum(m.num_blocks for m in miner_data_by_price[idx:])\n inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size\n probability_accepted = 1 - inv_prob_per_block ** wait_blocks\n yield Probability(low_percentile_gas_price, probability_accepted)\n\n\ndef _compute_gas_price(probabilities, desired_probability):\n \"\"\"\n Given a sorted range of ``Probability`` named-tuples returns a gas price\n computed based on where the ``desired_probability`` would fall within the\n range.\n\n :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.\n :param desired_probability: An floating point representation of the desired\n probability. (e.g. ``85% -> 0.85``)\n \"\"\"\n first = probabilities[0]\n last = probabilities[-1]\n\n if desired_probability >= first.prob:\n return first.gas_price\n elif desired_probability <= last.prob:\n return last.gas_price\n\n for left, right in sliding_window(2, probabilities):\n if desired_probability < right.prob:\n continue\n elif desired_probability > left.prob:\n # This code block should never be reachable as it would indicate\n # that we already passed by the probability window in which our\n # `desired_probability` is located.\n raise Exception('Invariant')\n\n adj_prob = desired_probability - right.prob\n window_size = left.prob - right.prob\n position = adj_prob / window_size\n gas_window_size = left.gas_price - right.gas_price\n gas_price = int(math.ceil(right.gas_price + gas_window_size * position))\n return gas_price\n else:\n # The initial `if/else` clause in this function handles the case where\n # the `desired_probability` is either above or below the min/max\n # probability found in the `probabilities`.\n #\n # With these two cases handled, the only way this code block should be\n # reachable would be if the `probabilities` were not sorted correctly.\n # Otherwise, the `desired_probability` **must** fall between two of the\n # values in the `probabilities``.\n raise Exception('Invariant')\n\n\n@curry\ndef construct_time_based_gas_price_strategy(max_wait_seconds,\n sample_size=120,\n probability=98):\n \"\"\"\n A gas pricing strategy that uses recently mined block data to derive a gas\n price for which a transaction is likely to be mined within X seconds with\n probability P.\n\n :param max_wait_seconds: The desired maxiumum number of seconds the\n transaction should take to mine.\n :param sample_size: The number of recent blocks to sample\n :param probability: An integer representation of the desired probability\n that the transaction will be mined within ``max_wait_seconds``. 0 means 0%\n and 100 means 100%.\n \"\"\"\n def time_based_gas_price_strategy(web3, transaction_params):\n avg_block_time = _get_avg_block_time(web3, sample_size=sample_size)\n wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))\n\n raw_miner_data = _get_raw_miner_data(web3, sample_size=sample_size)\n miner_data = _aggregate_miner_data(raw_miner_data)\n\n probabilities = _compute_probabilities(\n miner_data,\n wait_blocks=wait_blocks,\n sample_size=sample_size,\n )\n\n gas_price = _compute_gas_price(probabilities, probability / 100)\n return gas_price\n return time_based_gas_price_strategy\n\n\n# fast: mine within 1 minute\nfast_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60,\n sample_size=120,\n)\n# medium: mine within 10 minutes\nmedium_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=600,\n sample_size=120,\n)\n# slow: mine within 1 hour (60 minutes)\nslow_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60 * 60,\n sample_size=120,\n)\n# glacial: mine within the next 24 hours.\nglacial_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=24 * 60 * 60,\n sample_size=720,\n)\n", "path": "web3/gas_strategies/time_based.py"}, {"content": "def percentile(values=None, percentile=None):\n \"\"\"Calculates a simplified weighted average percentile\n \"\"\"\n if values in [None, tuple(), []] or len(values) < 3:\n raise ValueError(\"Expected a sequence of at least 3 integers, got {0}\".format(values))\n if percentile is None:\n raise ValueError(\"Expected a percentile choice, got {0}\".format(percentile))\n\n sorted_values = sorted(values)\n\n rank = len(values) * percentile / 100\n if rank > 0:\n index = rank - 1\n else:\n index = rank\n\n if index % 1 == 0:\n return sorted_values[int(index)]\n else:\n fractional = index % 1\n integer = int(index - fractional)\n lower = sorted_values[integer]\n higher = sorted_values[integer + 1]\n return lower + fractional * (higher - lower)\n", "path": "web3/utils/math.py"}, {"content": "import datetime\nimport time\n\n\nclass BadFunctionCallOutput(Exception):\n \"\"\"\n We failed to decode ABI output.\n\n Most likely ABI mismatch.\n \"\"\"\n pass\n\n\nclass BlockNumberOutofRange(Exception):\n '''\n block_identifier passed does not match known block.\n '''\n pass\n\n\nclass CannotHandleRequest(Exception):\n \"\"\"\n Raised by a provider to signal that it cannot handle an RPC request and\n that the manager should proceed to the next provider.\n \"\"\"\n pass\n\n\nclass InvalidAddress(ValueError):\n \"\"\"\n The supplied address does not have a valid checksum, as defined in EIP-55\n \"\"\"\n pass\n\n\nclass NameNotFound(ValueError):\n \"\"\"\n Raised when a caller provides an Ethereum Name Service name that\n does not resolve to an address.\n \"\"\"\n pass\n\n\nclass StaleBlockchain(Exception):\n \"\"\"\n Raised by the stalecheck_middleware when the latest block is too old.\n \"\"\"\n def __init__(self, block, allowable_delay):\n last_block_date = datetime.datetime.fromtimestamp(block.timestamp).strftime('%c')\n message = (\n \"The latest block, #%d, is %d seconds old, but is only allowed to be %d s old. \"\n \"The date of the most recent block is %s. Continue syncing and try again...\" %\n (block.number, time.time() - block.timestamp, allowable_delay, last_block_date)\n )\n super().__init__(message, block, allowable_delay)\n\n def __str__(self):\n return self.args[0]\n\n\nclass UnhandledRequest(Exception):\n \"\"\"\n Raised by the manager when none of it's providers responds to a request.\n \"\"\"\n pass\n\n\nclass MismatchedABI(Exception):\n \"\"\"\n Raised when an ABI does not match with supplied parameters, or when an\n attempt is made to access a function/event that does not exist in the ABI.\n \"\"\"\n pass\n\n\nclass FallbackNotFound(Exception):\n \"\"\"\n Raised when fallback function doesn't exist in contract.\n \"\"\"\n pass\n\n\nclass ValidationError(Exception):\n \"\"\"\n Raised when a supplied value is invalid.\n \"\"\"\n pass\n\n\nclass NoABIFunctionsFound(AttributeError):\n \"\"\"\n Raised when an ABI doesn't contain any functions.\n \"\"\"\n pass\n\n\nclass NoABIEventsFound(AttributeError):\n \"\"\"\n Raised when an ABI doesn't contain any events.\n \"\"\"\n pass\n", "path": "web3/exceptions.py"}], "after_files": [{"content": "import collections\nimport math\nimport operator\n\nfrom eth_utils import (\n to_tuple,\n)\n\nfrom web3.exceptions import (\n InsufficientData,\n ValidationError,\n)\nfrom web3.utils.math import (\n percentile,\n)\nfrom web3.utils.toolz import (\n curry,\n groupby,\n sliding_window,\n)\n\nMinerData = collections.namedtuple(\n 'MinerData',\n ['miner', 'num_blocks', 'min_gas_price', 'low_percentile_gas_price'])\n\nProbability = collections.namedtuple('Probability', ['gas_price', 'prob'])\n\n\ndef _get_avg_block_time(w3, sample_size):\n latest = w3.eth.getBlock('latest')\n\n constrained_sample_size = min(sample_size, latest['number'])\n if constrained_sample_size == 0:\n raise ValidationError('Constrained sample size is 0')\n\n oldest = w3.eth.getBlock(latest['number'] - constrained_sample_size)\n return (latest['timestamp'] - oldest['timestamp']) / constrained_sample_size\n\n\ndef _get_raw_miner_data(w3, sample_size):\n latest = w3.eth.getBlock('latest', full_transactions=True)\n\n for transaction in latest['transactions']:\n yield (latest['miner'], latest['hash'], transaction['gasPrice'])\n\n block = latest\n\n for _ in range(sample_size - 1):\n if block['number'] == 0:\n break\n\n # we intentionally trace backwards using parent hashes rather than\n # block numbers to make caching the data easier to implement.\n block = w3.eth.getBlock(block['parentHash'], full_transactions=True)\n for transaction in block['transactions']:\n yield (block['miner'], block['hash'], transaction['gasPrice'])\n\n\ndef _aggregate_miner_data(raw_data):\n data_by_miner = groupby(0, raw_data)\n\n for miner, miner_data in data_by_miner.items():\n _, block_hashes, gas_prices = map(set, zip(*miner_data))\n try:\n price_percentile = percentile(gas_prices, percentile=20)\n except InsufficientData:\n price_percentile = min(gas_prices)\n yield MinerData(\n miner,\n len(set(block_hashes)),\n min(gas_prices),\n price_percentile)\n\n\n@to_tuple\ndef _compute_probabilities(miner_data, wait_blocks, sample_size):\n \"\"\"\n Computes the probabilities that a txn will be accepted at each of the gas\n prices accepted by the miners.\n \"\"\"\n miner_data_by_price = tuple(sorted(\n miner_data,\n key=operator.attrgetter('low_percentile_gas_price'),\n reverse=True,\n ))\n for idx in range(len(miner_data_by_price)):\n low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price\n num_blocks_accepting_price = sum(m.num_blocks for m in miner_data_by_price[idx:])\n inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size\n probability_accepted = 1 - inv_prob_per_block ** wait_blocks\n yield Probability(low_percentile_gas_price, probability_accepted)\n\n\ndef _compute_gas_price(probabilities, desired_probability):\n \"\"\"\n Given a sorted range of ``Probability`` named-tuples returns a gas price\n computed based on where the ``desired_probability`` would fall within the\n range.\n\n :param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.\n :param desired_probability: An floating point representation of the desired\n probability. (e.g. ``85% -> 0.85``)\n \"\"\"\n first = probabilities[0]\n last = probabilities[-1]\n\n if desired_probability >= first.prob:\n return first.gas_price\n elif desired_probability <= last.prob:\n return last.gas_price\n\n for left, right in sliding_window(2, probabilities):\n if desired_probability < right.prob:\n continue\n elif desired_probability > left.prob:\n # This code block should never be reachable as it would indicate\n # that we already passed by the probability window in which our\n # `desired_probability` is located.\n raise Exception('Invariant')\n\n adj_prob = desired_probability - right.prob\n window_size = left.prob - right.prob\n position = adj_prob / window_size\n gas_window_size = left.gas_price - right.gas_price\n gas_price = int(math.ceil(right.gas_price + gas_window_size * position))\n return gas_price\n else:\n # The initial `if/else` clause in this function handles the case where\n # the `desired_probability` is either above or below the min/max\n # probability found in the `probabilities`.\n #\n # With these two cases handled, the only way this code block should be\n # reachable would be if the `probabilities` were not sorted correctly.\n # Otherwise, the `desired_probability` **must** fall between two of the\n # values in the `probabilities``.\n raise Exception('Invariant')\n\n\n@curry\ndef construct_time_based_gas_price_strategy(max_wait_seconds,\n sample_size=120,\n probability=98):\n \"\"\"\n A gas pricing strategy that uses recently mined block data to derive a gas\n price for which a transaction is likely to be mined within X seconds with\n probability P.\n\n :param max_wait_seconds: The desired maxiumum number of seconds the\n transaction should take to mine.\n :param sample_size: The number of recent blocks to sample\n :param probability: An integer representation of the desired probability\n that the transaction will be mined within ``max_wait_seconds``. 0 means 0%\n and 100 means 100%.\n \"\"\"\n def time_based_gas_price_strategy(web3, transaction_params):\n avg_block_time = _get_avg_block_time(web3, sample_size=sample_size)\n wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))\n\n raw_miner_data = _get_raw_miner_data(web3, sample_size=sample_size)\n miner_data = _aggregate_miner_data(raw_miner_data)\n\n probabilities = _compute_probabilities(\n miner_data,\n wait_blocks=wait_blocks,\n sample_size=sample_size,\n )\n\n gas_price = _compute_gas_price(probabilities, probability / 100)\n return gas_price\n return time_based_gas_price_strategy\n\n\n# fast: mine within 1 minute\nfast_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60,\n sample_size=120,\n)\n# medium: mine within 10 minutes\nmedium_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=600,\n sample_size=120,\n)\n# slow: mine within 1 hour (60 minutes)\nslow_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60 * 60,\n sample_size=120,\n)\n# glacial: mine within the next 24 hours.\nglacial_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=24 * 60 * 60,\n sample_size=720,\n)\n", "path": "web3/gas_strategies/time_based.py"}, {"content": "from web3.exceptions import (\n InsufficientData,\n)\n\n\ndef percentile(values=None, percentile=None):\n \"\"\"Calculates a simplified weighted average percentile\n \"\"\"\n if values in [None, tuple(), []] or len(values) < 1:\n raise InsufficientData(\n \"Expected a sequence of at least 1 integers, got {0!r}\".format(values))\n if percentile is None:\n raise ValueError(\"Expected a percentile choice, got {0}\".format(percentile))\n\n sorted_values = sorted(values)\n\n rank = len(values) * percentile / 100\n if rank > 0:\n index = rank - 1\n if index < 0:\n return sorted_values[0]\n else:\n index = rank\n\n if index % 1 == 0:\n return sorted_values[int(index)]\n else:\n fractional = index % 1\n integer = int(index - fractional)\n lower = sorted_values[integer]\n higher = sorted_values[integer + 1]\n return lower + fractional * (higher - lower)\n", "path": "web3/utils/math.py"}, {"content": "import datetime\nimport time\n\n\nclass BadFunctionCallOutput(Exception):\n \"\"\"\n We failed to decode ABI output.\n\n Most likely ABI mismatch.\n \"\"\"\n pass\n\n\nclass BlockNumberOutofRange(Exception):\n '''\n block_identifier passed does not match known block.\n '''\n pass\n\n\nclass CannotHandleRequest(Exception):\n \"\"\"\n Raised by a provider to signal that it cannot handle an RPC request and\n that the manager should proceed to the next provider.\n \"\"\"\n pass\n\n\nclass InvalidAddress(ValueError):\n \"\"\"\n The supplied address does not have a valid checksum, as defined in EIP-55\n \"\"\"\n pass\n\n\nclass NameNotFound(ValueError):\n \"\"\"\n Raised when a caller provides an Ethereum Name Service name that\n does not resolve to an address.\n \"\"\"\n pass\n\n\nclass StaleBlockchain(Exception):\n \"\"\"\n Raised by the stalecheck_middleware when the latest block is too old.\n \"\"\"\n def __init__(self, block, allowable_delay):\n last_block_date = datetime.datetime.fromtimestamp(block.timestamp).strftime('%c')\n message = (\n \"The latest block, #%d, is %d seconds old, but is only allowed to be %d s old. \"\n \"The date of the most recent block is %s. Continue syncing and try again...\" %\n (block.number, time.time() - block.timestamp, allowable_delay, last_block_date)\n )\n super().__init__(message, block, allowable_delay)\n\n def __str__(self):\n return self.args[0]\n\n\nclass UnhandledRequest(Exception):\n \"\"\"\n Raised by the manager when none of it's providers responds to a request.\n \"\"\"\n pass\n\n\nclass MismatchedABI(Exception):\n \"\"\"\n Raised when an ABI does not match with supplied parameters, or when an\n attempt is made to access a function/event that does not exist in the ABI.\n \"\"\"\n pass\n\n\nclass FallbackNotFound(Exception):\n \"\"\"\n Raised when fallback function doesn't exist in contract.\n \"\"\"\n pass\n\n\nclass ValidationError(Exception):\n \"\"\"\n Raised when a supplied value is invalid.\n \"\"\"\n pass\n\n\nclass NoABIFunctionsFound(AttributeError):\n \"\"\"\n Raised when an ABI doesn't contain any functions.\n \"\"\"\n pass\n\n\nclass NoABIEventsFound(AttributeError):\n \"\"\"\n Raised when an ABI doesn't contain any events.\n \"\"\"\n pass\n\n\nclass InsufficientData(Exception):\n \"\"\"\n Raised when there are insufficient data points to\n complete a calculation\n \"\"\"\n pass\n", "path": "web3/exceptions.py"}]}
3,896
565
gh_patches_debug_23827
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-709
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Celery exceptions cause error in capture_stacktrace The latest version of celery, 5.2.1, raises some exceptions in an unexpected way. For this particular case, `tb` was a string of value: ``` 'Traceback (most recent call last):\n File "/app/.heroku/python/lib/python3.9/site-packages/billiard/pool.py", line 366, in workloop\n put((READY, (job, i, result, inqW_fd)))\n File "/app/.heroku/python/lib/python3.9/site-packages/billiard/queues.py", line 366, in put\n self.send_payload(ForkingPickler.dumps(obj))\n File "/app/.heroku/python/lib/python3.9/site-packages/billiard/reduction.py", line 56, in dumps\n cls(buf, protocol).dump(obj)\nbilliard.pool.MaybeEncodingError: Error sending res...' ``` Stacktrace of error ``` AttributeError: 'str' object has no attribute 'tb_frame' File "celery/utils/dispatch/signal.py", line 276, in send response = receiver(signal=self, sender=sender, **named) File "scout_apm/celery.py", line 114, in task_failure_callback ErrorMonitor.send( File "scout_apm/core/error.py", line 69, in send for frame in capture_stacktrace(traceback) File "scout_apm/core/backtrace.py", line 132, in capture_stacktrace return list(reversed(list(itertools.islice(walker, LIMIT)))) File "scout_apm/core/backtrace.py", line 75, in stacktrace_walker for frame, lineno in traceback.walk_tb(tb): File "traceback.py", line 312, in walk_tb yield tb.tb_frame, tb.tb_lineno ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/celery.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import datetime as dt 5 import logging 6 7 from celery.signals import before_task_publish, task_failure, task_postrun, task_prerun 8 9 try: 10 import django 11 12 if django.VERSION < (3, 1): 13 from django.views.debug import get_safe_settings 14 else: 15 from django.views.debug import SafeExceptionReporterFilter 16 17 def get_safe_settings(): 18 return SafeExceptionReporterFilter().get_safe_settings() 19 20 21 except ImportError: 22 # Django not installed 23 get_safe_settings = None 24 25 import scout_apm.core 26 from scout_apm.compat import datetime_to_timestamp 27 from scout_apm.core.config import scout_config 28 from scout_apm.core.error import ErrorMonitor 29 from scout_apm.core.tracked_request import TrackedRequest 30 31 logger = logging.getLogger(__name__) 32 33 34 def before_task_publish_callback(headers=None, properties=None, **kwargs): 35 if "scout_task_start" not in headers: 36 headers["scout_task_start"] = datetime_to_timestamp(dt.datetime.utcnow()) 37 38 39 def task_prerun_callback(task=None, **kwargs): 40 tracked_request = TrackedRequest.instance() 41 tracked_request.is_real_request = True 42 43 start = getattr(task.request, "scout_task_start", None) 44 if start is not None: 45 now = datetime_to_timestamp(dt.datetime.utcnow()) 46 try: 47 queue_time = now - start 48 except TypeError: 49 pass 50 else: 51 tracked_request.tag("queue_time", queue_time) 52 53 task_id = getattr(task.request, "id", None) 54 if task_id: 55 tracked_request.tag("task_id", task_id) 56 parent_task_id = getattr(task.request, "parent_id", None) 57 if parent_task_id: 58 tracked_request.tag("parent_task_id", parent_task_id) 59 60 delivery_info = task.request.delivery_info 61 tracked_request.tag("is_eager", delivery_info.get("is_eager", False)) 62 tracked_request.tag("exchange", delivery_info.get("exchange", "unknown")) 63 tracked_request.tag("priority", delivery_info.get("priority", "unknown")) 64 tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown")) 65 tracked_request.tag("queue", delivery_info.get("queue", "unknown")) 66 67 tracked_request.start_span(operation=("Job/" + task.name)) 68 69 70 def task_postrun_callback(task=None, **kwargs): 71 tracked_request = TrackedRequest.instance() 72 tracked_request.stop_span() 73 74 75 def task_failure_callback( 76 sender, 77 task_id=None, 78 exception=None, 79 args=None, 80 kwargs=None, 81 traceback=None, 82 **remaining 83 ): 84 tracked_request = TrackedRequest.instance() 85 tracked_request.tag("error", "true") 86 87 custom_controller = sender.name 88 custom_params = { 89 "celery": { 90 "task_id": task_id, 91 "args": args, 92 "kwargs": kwargs, 93 } 94 } 95 96 # Look up the django settings if populated. 97 environment = None 98 if get_safe_settings: 99 try: 100 environment = get_safe_settings() 101 except django.core.exceptions.ImproperlyConfigured as exc: 102 # Django not setup correctly 103 logger.debug( 104 "Celery integration does not have django configured properly: %r", exc 105 ) 106 pass 107 except Exception as exc: 108 logger.debug( 109 "Celery task_failure callback exception: %r", exc, exc_info=exc 110 ) 111 pass 112 113 exc_info = (exception.__class__, exception, traceback) 114 ErrorMonitor.send( 115 exc_info, 116 environment=environment, 117 custom_params=custom_params, 118 custom_controller=custom_controller, 119 ) 120 121 122 def install(app=None): 123 if app is not None: 124 copy_configuration(app) 125 126 installed = scout_apm.core.install() 127 if not installed: 128 return 129 130 before_task_publish.connect(before_task_publish_callback) 131 task_prerun.connect(task_prerun_callback) 132 task_failure.connect(task_failure_callback) 133 task_postrun.connect(task_postrun_callback) 134 135 136 def copy_configuration(app): 137 prefix = "scout_" 138 prefix_len = len(prefix) 139 140 to_set = {} 141 for key, value in app.conf.items(): 142 key_lower = key.lower() 143 if key_lower.startswith(prefix) and len(key_lower) > prefix_len: 144 scout_key = key_lower[prefix_len:] 145 to_set[scout_key] = value 146 147 scout_config.set(**to_set) 148 149 150 def uninstall(): 151 before_task_publish.disconnect(before_task_publish_callback) 152 task_prerun.disconnect(task_prerun_callback) 153 task_postrun.disconnect(task_postrun_callback) 154 task_failure.disconnect(task_failure_callback) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py --- a/src/scout_apm/celery.py +++ b/src/scout_apm/celery.py @@ -23,7 +23,7 @@ get_safe_settings = None import scout_apm.core -from scout_apm.compat import datetime_to_timestamp +from scout_apm.compat import datetime_to_timestamp, string_type from scout_apm.core.config import scout_config from scout_apm.core.error import ErrorMonitor from scout_apm.core.tracked_request import TrackedRequest @@ -79,6 +79,7 @@ args=None, kwargs=None, traceback=None, + einfo=None, **remaining ): tracked_request = TrackedRequest.instance() @@ -110,6 +111,12 @@ ) pass + # Celery occassionally will send the traceback as a string rather + # than a Stack trace object as the docs indicate. In that case, + # fall back to the billiard ExceptionInfo instance + traceback = ( + traceback if traceback and not isinstance(traceback, string_type) else einfo.tb + ) exc_info = (exception.__class__, exception, traceback) ErrorMonitor.send( exc_info,
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -23,7 +23,7 @@\n get_safe_settings = None\n \n import scout_apm.core\n-from scout_apm.compat import datetime_to_timestamp\n+from scout_apm.compat import datetime_to_timestamp, string_type\n from scout_apm.core.config import scout_config\n from scout_apm.core.error import ErrorMonitor\n from scout_apm.core.tracked_request import TrackedRequest\n@@ -79,6 +79,7 @@\n args=None,\n kwargs=None,\n traceback=None,\n+ einfo=None,\n **remaining\n ):\n tracked_request = TrackedRequest.instance()\n@@ -110,6 +111,12 @@\n )\n pass\n \n+ # Celery occassionally will send the traceback as a string rather\n+ # than a Stack trace object as the docs indicate. In that case,\n+ # fall back to the billiard ExceptionInfo instance\n+ traceback = (\n+ traceback if traceback and not isinstance(traceback, string_type) else einfo.tb\n+ )\n exc_info = (exception.__class__, exception, traceback)\n ErrorMonitor.send(\n exc_info,\n", "issue": "Celery exceptions cause error in capture_stacktrace\nThe latest version of celery, 5.2.1, raises some exceptions in an unexpected way.\r\n\r\nFor this particular case, `tb` was a string of value:\r\n\r\n```\r\n'Traceback (most recent call last):\\n File \"/app/.heroku/python/lib/python3.9/site-packages/billiard/pool.py\", line 366, in workloop\\n put((READY, (job, i, result, inqW_fd)))\\n File \"/app/.heroku/python/lib/python3.9/site-packages/billiard/queues.py\", line 366, in put\\n self.send_payload(ForkingPickler.dumps(obj))\\n File \"/app/.heroku/python/lib/python3.9/site-packages/billiard/reduction.py\", line 56, in dumps\\n cls(buf, protocol).dump(obj)\\nbilliard.pool.MaybeEncodingError: Error sending res...'\r\n```\r\n\r\nStacktrace of error\r\n```\r\nAttributeError: 'str' object has no attribute 'tb_frame'\r\n File \"celery/utils/dispatch/signal.py\", line 276, in send\r\n response = receiver(signal=self, sender=sender, **named)\r\n File \"scout_apm/celery.py\", line 114, in task_failure_callback\r\n ErrorMonitor.send(\r\n File \"scout_apm/core/error.py\", line 69, in send\r\n for frame in capture_stacktrace(traceback)\r\n File \"scout_apm/core/backtrace.py\", line 132, in capture_stacktrace\r\n return list(reversed(list(itertools.islice(walker, LIMIT))))\r\n File \"scout_apm/core/backtrace.py\", line 75, in stacktrace_walker\r\n for frame, lineno in traceback.walk_tb(tb):\r\n File \"traceback.py\", line 312, in walk_tb\r\n yield tb.tb_frame, tb.tb_lineno\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\n\nfrom celery.signals import before_task_publish, task_failure, task_postrun, task_prerun\n\ntry:\n import django\n\n if django.VERSION < (3, 1):\n from django.views.debug import get_safe_settings\n else:\n from django.views.debug import SafeExceptionReporterFilter\n\n def get_safe_settings():\n return SafeExceptionReporterFilter().get_safe_settings()\n\n\nexcept ImportError:\n # Django not installed\n get_safe_settings = None\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.error import ErrorMonitor\nfrom scout_apm.core.tracked_request import TrackedRequest\n\nlogger = logging.getLogger(__name__)\n\n\ndef before_task_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef task_prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n task_id = getattr(task.request, \"id\", None)\n if task_id:\n tracked_request.tag(\"task_id\", task_id)\n parent_task_id = getattr(task.request, \"parent_id\", None)\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"priority\", delivery_info.get(\"priority\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef task_postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef task_failure_callback(\n sender,\n task_id=None,\n exception=None,\n args=None,\n kwargs=None,\n traceback=None,\n **remaining\n):\n tracked_request = TrackedRequest.instance()\n tracked_request.tag(\"error\", \"true\")\n\n custom_controller = sender.name\n custom_params = {\n \"celery\": {\n \"task_id\": task_id,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n }\n\n # Look up the django settings if populated.\n environment = None\n if get_safe_settings:\n try:\n environment = get_safe_settings()\n except django.core.exceptions.ImproperlyConfigured as exc:\n # Django not setup correctly\n logger.debug(\n \"Celery integration does not have django configured properly: %r\", exc\n )\n pass\n except Exception as exc:\n logger.debug(\n \"Celery task_failure callback exception: %r\", exc, exc_info=exc\n )\n pass\n\n exc_info = (exception.__class__, exception, traceback)\n ErrorMonitor.send(\n exc_info,\n environment=environment,\n custom_params=custom_params,\n custom_controller=custom_controller,\n )\n\n\ndef install(app=None):\n if app is not None:\n copy_configuration(app)\n\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_task_publish_callback)\n task_prerun.connect(task_prerun_callback)\n task_failure.connect(task_failure_callback)\n task_postrun.connect(task_postrun_callback)\n\n\ndef copy_configuration(app):\n prefix = \"scout_\"\n prefix_len = len(prefix)\n\n to_set = {}\n for key, value in app.conf.items():\n key_lower = key.lower()\n if key_lower.startswith(prefix) and len(key_lower) > prefix_len:\n scout_key = key_lower[prefix_len:]\n to_set[scout_key] = value\n\n scout_config.set(**to_set)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_task_publish_callback)\n task_prerun.disconnect(task_prerun_callback)\n task_postrun.disconnect(task_postrun_callback)\n task_failure.disconnect(task_failure_callback)\n", "path": "src/scout_apm/celery.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\n\nfrom celery.signals import before_task_publish, task_failure, task_postrun, task_prerun\n\ntry:\n import django\n\n if django.VERSION < (3, 1):\n from django.views.debug import get_safe_settings\n else:\n from django.views.debug import SafeExceptionReporterFilter\n\n def get_safe_settings():\n return SafeExceptionReporterFilter().get_safe_settings()\n\n\nexcept ImportError:\n # Django not installed\n get_safe_settings = None\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp, string_type\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.error import ErrorMonitor\nfrom scout_apm.core.tracked_request import TrackedRequest\n\nlogger = logging.getLogger(__name__)\n\n\ndef before_task_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef task_prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n task_id = getattr(task.request, \"id\", None)\n if task_id:\n tracked_request.tag(\"task_id\", task_id)\n parent_task_id = getattr(task.request, \"parent_id\", None)\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"priority\", delivery_info.get(\"priority\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef task_postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef task_failure_callback(\n sender,\n task_id=None,\n exception=None,\n args=None,\n kwargs=None,\n traceback=None,\n einfo=None,\n **remaining\n):\n tracked_request = TrackedRequest.instance()\n tracked_request.tag(\"error\", \"true\")\n\n custom_controller = sender.name\n custom_params = {\n \"celery\": {\n \"task_id\": task_id,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n }\n\n # Look up the django settings if populated.\n environment = None\n if get_safe_settings:\n try:\n environment = get_safe_settings()\n except django.core.exceptions.ImproperlyConfigured as exc:\n # Django not setup correctly\n logger.debug(\n \"Celery integration does not have django configured properly: %r\", exc\n )\n pass\n except Exception as exc:\n logger.debug(\n \"Celery task_failure callback exception: %r\", exc, exc_info=exc\n )\n pass\n\n # Celery occassionally will send the traceback as a string rather\n # than a Stack trace object as the docs indicate. In that case,\n # fall back to the billiard ExceptionInfo instance\n traceback = (\n traceback if traceback and not isinstance(traceback, string_type) else einfo.tb\n )\n exc_info = (exception.__class__, exception, traceback)\n ErrorMonitor.send(\n exc_info,\n environment=environment,\n custom_params=custom_params,\n custom_controller=custom_controller,\n )\n\n\ndef install(app=None):\n if app is not None:\n copy_configuration(app)\n\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_task_publish_callback)\n task_prerun.connect(task_prerun_callback)\n task_failure.connect(task_failure_callback)\n task_postrun.connect(task_postrun_callback)\n\n\ndef copy_configuration(app):\n prefix = \"scout_\"\n prefix_len = len(prefix)\n\n to_set = {}\n for key, value in app.conf.items():\n key_lower = key.lower()\n if key_lower.startswith(prefix) and len(key_lower) > prefix_len:\n scout_key = key_lower[prefix_len:]\n to_set[scout_key] = value\n\n scout_config.set(**to_set)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_task_publish_callback)\n task_prerun.disconnect(task_prerun_callback)\n task_postrun.disconnect(task_postrun_callback)\n task_failure.disconnect(task_failure_callback)\n", "path": "src/scout_apm/celery.py"}]}
2,047
291
gh_patches_debug_21243
rasdani/github-patches
git_diff
pypa__pip-9207
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Revert "Redact auth from URL in UpdatingDefaultsHelpFormatter" Reverts pypa/pip#9163 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pip/_internal/cli/parser.py` Content: ``` 1 """Base option parser setup""" 2 3 # The following comment should be removed at some point in the future. 4 # mypy: disallow-untyped-defs=False 5 6 from __future__ import absolute_import 7 8 import logging 9 import optparse 10 import sys 11 import textwrap 12 from distutils.util import strtobool 13 14 from pip._vendor.contextlib2 import suppress 15 from pip._vendor.six import string_types 16 17 from pip._internal.cli.status_codes import UNKNOWN_ERROR 18 from pip._internal.configuration import Configuration, ConfigurationError 19 from pip._internal.utils.compat import get_terminal_size 20 from pip._internal.utils.misc import redact_auth_from_url 21 22 logger = logging.getLogger(__name__) 23 24 25 class PrettyHelpFormatter(optparse.IndentedHelpFormatter): 26 """A prettier/less verbose help formatter for optparse.""" 27 28 def __init__(self, *args, **kwargs): 29 # help position must be aligned with __init__.parseopts.description 30 kwargs['max_help_position'] = 30 31 kwargs['indent_increment'] = 1 32 kwargs['width'] = get_terminal_size()[0] - 2 33 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) 34 35 def format_option_strings(self, option): 36 return self._format_option_strings(option) 37 38 def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '): 39 """ 40 Return a comma-separated list of option strings and metavars. 41 42 :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') 43 :param mvarfmt: metavar format string 44 :param optsep: separator 45 """ 46 opts = [] 47 48 if option._short_opts: 49 opts.append(option._short_opts[0]) 50 if option._long_opts: 51 opts.append(option._long_opts[0]) 52 if len(opts) > 1: 53 opts.insert(1, optsep) 54 55 if option.takes_value(): 56 metavar = option.metavar or option.dest.lower() 57 opts.append(mvarfmt.format(metavar.lower())) 58 59 return ''.join(opts) 60 61 def format_heading(self, heading): 62 if heading == 'Options': 63 return '' 64 return heading + ':\n' 65 66 def format_usage(self, usage): 67 """ 68 Ensure there is only one newline between usage and the first heading 69 if there is no description. 70 """ 71 msg = '\nUsage: {}\n'.format( 72 self.indent_lines(textwrap.dedent(usage), " ")) 73 return msg 74 75 def format_description(self, description): 76 # leave full control over description to us 77 if description: 78 if hasattr(self.parser, 'main'): 79 label = 'Commands' 80 else: 81 label = 'Description' 82 # some doc strings have initial newlines, some don't 83 description = description.lstrip('\n') 84 # some doc strings have final newlines and spaces, some don't 85 description = description.rstrip() 86 # dedent, then reindent 87 description = self.indent_lines(textwrap.dedent(description), " ") 88 description = '{}:\n{}\n'.format(label, description) 89 return description 90 else: 91 return '' 92 93 def format_epilog(self, epilog): 94 # leave full control over epilog to us 95 if epilog: 96 return epilog 97 else: 98 return '' 99 100 def indent_lines(self, text, indent): 101 new_lines = [indent + line for line in text.split('\n')] 102 return "\n".join(new_lines) 103 104 105 class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): 106 """Custom help formatter for use in ConfigOptionParser. 107 108 This is updates the defaults before expanding them, allowing 109 them to show up correctly in the help listing. 110 111 Also redact auth from url type options 112 """ 113 114 def expand_default(self, option): 115 default_value = None 116 if self.parser is not None: 117 self.parser._update_defaults(self.parser.defaults) 118 default_value = self.parser.defaults.get(option.dest) 119 help_text = optparse.IndentedHelpFormatter.expand_default(self, option) 120 121 if default_value and option.metavar == 'URL': 122 help_text = help_text.replace( 123 default_value, redact_auth_from_url(default_value)) 124 125 return help_text 126 127 128 class CustomOptionParser(optparse.OptionParser): 129 130 def insert_option_group(self, idx, *args, **kwargs): 131 """Insert an OptionGroup at a given position.""" 132 group = self.add_option_group(*args, **kwargs) 133 134 self.option_groups.pop() 135 self.option_groups.insert(idx, group) 136 137 return group 138 139 @property 140 def option_list_all(self): 141 """Get a list of all options, including those in option groups.""" 142 res = self.option_list[:] 143 for i in self.option_groups: 144 res.extend(i.option_list) 145 146 return res 147 148 149 class ConfigOptionParser(CustomOptionParser): 150 """Custom option parser which updates its defaults by checking the 151 configuration files and environmental variables""" 152 153 def __init__(self, *args, **kwargs): 154 self.name = kwargs.pop('name') 155 156 isolated = kwargs.pop("isolated", False) 157 self.config = Configuration(isolated) 158 159 assert self.name 160 optparse.OptionParser.__init__(self, *args, **kwargs) 161 162 def check_default(self, option, key, val): 163 try: 164 return option.check_value(key, val) 165 except optparse.OptionValueError as exc: 166 print("An error occurred during configuration: {}".format(exc)) 167 sys.exit(3) 168 169 def _get_ordered_configuration_items(self): 170 # Configuration gives keys in an unordered manner. Order them. 171 override_order = ["global", self.name, ":env:"] 172 173 # Pool the options into different groups 174 section_items = {name: [] for name in override_order} 175 for section_key, val in self.config.items(): 176 # ignore empty values 177 if not val: 178 logger.debug( 179 "Ignoring configuration key '%s' as it's value is empty.", 180 section_key 181 ) 182 continue 183 184 section, key = section_key.split(".", 1) 185 if section in override_order: 186 section_items[section].append((key, val)) 187 188 # Yield each group in their override order 189 for section in override_order: 190 for key, val in section_items[section]: 191 yield key, val 192 193 def _update_defaults(self, defaults): 194 """Updates the given defaults with values from the config files and 195 the environ. Does a little special handling for certain types of 196 options (lists).""" 197 198 # Accumulate complex default state. 199 self.values = optparse.Values(self.defaults) 200 late_eval = set() 201 # Then set the options with those values 202 for key, val in self._get_ordered_configuration_items(): 203 # '--' because configuration supports only long names 204 option = self.get_option('--' + key) 205 206 # Ignore options not present in this parser. E.g. non-globals put 207 # in [global] by users that want them to apply to all applicable 208 # commands. 209 if option is None: 210 continue 211 212 if option.action in ('store_true', 'store_false'): 213 try: 214 val = strtobool(val) 215 except ValueError: 216 self.error( 217 '{} is not a valid value for {} option, ' # noqa 218 'please specify a boolean value like yes/no, ' 219 'true/false or 1/0 instead.'.format(val, key) 220 ) 221 elif option.action == 'count': 222 with suppress(ValueError): 223 val = strtobool(val) 224 with suppress(ValueError): 225 val = int(val) 226 if not isinstance(val, int) or val < 0: 227 self.error( 228 '{} is not a valid value for {} option, ' # noqa 229 'please instead specify either a non-negative integer ' 230 'or a boolean value like yes/no or false/true ' 231 'which is equivalent to 1/0.'.format(val, key) 232 ) 233 elif option.action == 'append': 234 val = val.split() 235 val = [self.check_default(option, key, v) for v in val] 236 elif option.action == 'callback': 237 late_eval.add(option.dest) 238 opt_str = option.get_opt_string() 239 val = option.convert_value(opt_str, val) 240 # From take_action 241 args = option.callback_args or () 242 kwargs = option.callback_kwargs or {} 243 option.callback(option, opt_str, val, self, *args, **kwargs) 244 else: 245 val = self.check_default(option, key, val) 246 247 defaults[option.dest] = val 248 249 for key in late_eval: 250 defaults[key] = getattr(self.values, key) 251 self.values = None 252 return defaults 253 254 def get_default_values(self): 255 """Overriding to make updating the defaults after instantiation of 256 the option parser possible, _update_defaults() does the dirty work.""" 257 if not self.process_default_values: 258 # Old, pre-Optik 1.5 behaviour. 259 return optparse.Values(self.defaults) 260 261 # Load the configuration, or error out in case of an error 262 try: 263 self.config.load() 264 except ConfigurationError as err: 265 self.exit(UNKNOWN_ERROR, str(err)) 266 267 defaults = self._update_defaults(self.defaults.copy()) # ours 268 for option in self._get_all_options(): 269 default = defaults.get(option.dest) 270 if isinstance(default, string_types): 271 opt_str = option.get_opt_string() 272 defaults[option.dest] = option.check_value(opt_str, default) 273 return optparse.Values(defaults) 274 275 def error(self, msg): 276 self.print_usage(sys.stderr) 277 self.exit(UNKNOWN_ERROR, "{}\n".format(msg)) 278 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pip/_internal/cli/parser.py b/src/pip/_internal/cli/parser.py --- a/src/pip/_internal/cli/parser.py +++ b/src/pip/_internal/cli/parser.py @@ -112,15 +112,23 @@ """ def expand_default(self, option): - default_value = None + default_values = None if self.parser is not None: self.parser._update_defaults(self.parser.defaults) - default_value = self.parser.defaults.get(option.dest) + default_values = self.parser.defaults.get(option.dest) help_text = optparse.IndentedHelpFormatter.expand_default(self, option) - if default_value and option.metavar == 'URL': - help_text = help_text.replace( - default_value, redact_auth_from_url(default_value)) + if default_values and option.metavar == 'URL': + if isinstance(default_values, string_types): + default_values = [default_values] + + # If its not a list, we should abort and just return the help text + if not isinstance(default_values, list): + default_values = [] + + for val in default_values: + help_text = help_text.replace( + val, redact_auth_from_url(val)) return help_text
{"golden_diff": "diff --git a/src/pip/_internal/cli/parser.py b/src/pip/_internal/cli/parser.py\n--- a/src/pip/_internal/cli/parser.py\n+++ b/src/pip/_internal/cli/parser.py\n@@ -112,15 +112,23 @@\n \"\"\"\n \n def expand_default(self, option):\n- default_value = None\n+ default_values = None\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n- default_value = self.parser.defaults.get(option.dest)\n+ default_values = self.parser.defaults.get(option.dest)\n help_text = optparse.IndentedHelpFormatter.expand_default(self, option)\n \n- if default_value and option.metavar == 'URL':\n- help_text = help_text.replace(\n- default_value, redact_auth_from_url(default_value))\n+ if default_values and option.metavar == 'URL':\n+ if isinstance(default_values, string_types):\n+ default_values = [default_values]\n+\n+ # If its not a list, we should abort and just return the help text\n+ if not isinstance(default_values, list):\n+ default_values = []\n+\n+ for val in default_values:\n+ help_text = help_text.replace(\n+ val, redact_auth_from_url(val))\n \n return help_text\n", "issue": "Revert \"Redact auth from URL in UpdatingDefaultsHelpFormatter\"\nReverts pypa/pip#9163\n", "before_files": [{"content": "\"\"\"Base option parser setup\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport optparse\nimport sys\nimport textwrap\nfrom distutils.util import strtobool\n\nfrom pip._vendor.contextlib2 import suppress\nfrom pip._vendor.six import string_types\n\nfrom pip._internal.cli.status_codes import UNKNOWN_ERROR\nfrom pip._internal.configuration import Configuration, ConfigurationError\nfrom pip._internal.utils.compat import get_terminal_size\nfrom pip._internal.utils.misc import redact_auth_from_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass PrettyHelpFormatter(optparse.IndentedHelpFormatter):\n \"\"\"A prettier/less verbose help formatter for optparse.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # help position must be aligned with __init__.parseopts.description\n kwargs['max_help_position'] = 30\n kwargs['indent_increment'] = 1\n kwargs['width'] = get_terminal_size()[0] - 2\n optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)\n\n def format_option_strings(self, option):\n return self._format_option_strings(option)\n\n def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):\n \"\"\"\n Return a comma-separated list of option strings and metavars.\n\n :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')\n :param mvarfmt: metavar format string\n :param optsep: separator\n \"\"\"\n opts = []\n\n if option._short_opts:\n opts.append(option._short_opts[0])\n if option._long_opts:\n opts.append(option._long_opts[0])\n if len(opts) > 1:\n opts.insert(1, optsep)\n\n if option.takes_value():\n metavar = option.metavar or option.dest.lower()\n opts.append(mvarfmt.format(metavar.lower()))\n\n return ''.join(opts)\n\n def format_heading(self, heading):\n if heading == 'Options':\n return ''\n return heading + ':\\n'\n\n def format_usage(self, usage):\n \"\"\"\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n \"\"\"\n msg = '\\nUsage: {}\\n'.format(\n self.indent_lines(textwrap.dedent(usage), \" \"))\n return msg\n\n def format_description(self, description):\n # leave full control over description to us\n if description:\n if hasattr(self.parser, 'main'):\n label = 'Commands'\n else:\n label = 'Description'\n # some doc strings have initial newlines, some don't\n description = description.lstrip('\\n')\n # some doc strings have final newlines and spaces, some don't\n description = description.rstrip()\n # dedent, then reindent\n description = self.indent_lines(textwrap.dedent(description), \" \")\n description = '{}:\\n{}\\n'.format(label, description)\n return description\n else:\n return ''\n\n def format_epilog(self, epilog):\n # leave full control over epilog to us\n if epilog:\n return epilog\n else:\n return ''\n\n def indent_lines(self, text, indent):\n new_lines = [indent + line for line in text.split('\\n')]\n return \"\\n\".join(new_lines)\n\n\nclass UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):\n \"\"\"Custom help formatter for use in ConfigOptionParser.\n\n This is updates the defaults before expanding them, allowing\n them to show up correctly in the help listing.\n\n Also redact auth from url type options\n \"\"\"\n\n def expand_default(self, option):\n default_value = None\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n default_value = self.parser.defaults.get(option.dest)\n help_text = optparse.IndentedHelpFormatter.expand_default(self, option)\n\n if default_value and option.metavar == 'URL':\n help_text = help_text.replace(\n default_value, redact_auth_from_url(default_value))\n\n return help_text\n\n\nclass CustomOptionParser(optparse.OptionParser):\n\n def insert_option_group(self, idx, *args, **kwargs):\n \"\"\"Insert an OptionGroup at a given position.\"\"\"\n group = self.add_option_group(*args, **kwargs)\n\n self.option_groups.pop()\n self.option_groups.insert(idx, group)\n\n return group\n\n @property\n def option_list_all(self):\n \"\"\"Get a list of all options, including those in option groups.\"\"\"\n res = self.option_list[:]\n for i in self.option_groups:\n res.extend(i.option_list)\n\n return res\n\n\nclass ConfigOptionParser(CustomOptionParser):\n \"\"\"Custom option parser which updates its defaults by checking the\n configuration files and environmental variables\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.name = kwargs.pop('name')\n\n isolated = kwargs.pop(\"isolated\", False)\n self.config = Configuration(isolated)\n\n assert self.name\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n def check_default(self, option, key, val):\n try:\n return option.check_value(key, val)\n except optparse.OptionValueError as exc:\n print(\"An error occurred during configuration: {}\".format(exc))\n sys.exit(3)\n\n def _get_ordered_configuration_items(self):\n # Configuration gives keys in an unordered manner. Order them.\n override_order = [\"global\", self.name, \":env:\"]\n\n # Pool the options into different groups\n section_items = {name: [] for name in override_order}\n for section_key, val in self.config.items():\n # ignore empty values\n if not val:\n logger.debug(\n \"Ignoring configuration key '%s' as it's value is empty.\",\n section_key\n )\n continue\n\n section, key = section_key.split(\".\", 1)\n if section in override_order:\n section_items[section].append((key, val))\n\n # Yield each group in their override order\n for section in override_order:\n for key, val in section_items[section]:\n yield key, val\n\n def _update_defaults(self, defaults):\n \"\"\"Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).\"\"\"\n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option('--' + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n if option.action in ('store_true', 'store_false'):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please specify a boolean value like yes/no, '\n 'true/false or 1/0 instead.'.format(val, key)\n )\n elif option.action == 'count':\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please instead specify either a non-negative integer '\n 'or a boolean value like yes/no or false/true '\n 'which is equivalent to 1/0.'.format(val, key)\n )\n elif option.action == 'append':\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == 'callback':\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n\n def get_default_values(self):\n \"\"\"Overriding to make updating the defaults after instantiation of\n the option parser possible, _update_defaults() does the dirty work.\"\"\"\n if not self.process_default_values:\n # Old, pre-Optik 1.5 behaviour.\n return optparse.Values(self.defaults)\n\n # Load the configuration, or error out in case of an error\n try:\n self.config.load()\n except ConfigurationError as err:\n self.exit(UNKNOWN_ERROR, str(err))\n\n defaults = self._update_defaults(self.defaults.copy()) # ours\n for option in self._get_all_options():\n default = defaults.get(option.dest)\n if isinstance(default, string_types):\n opt_str = option.get_opt_string()\n defaults[option.dest] = option.check_value(opt_str, default)\n return optparse.Values(defaults)\n\n def error(self, msg):\n self.print_usage(sys.stderr)\n self.exit(UNKNOWN_ERROR, \"{}\\n\".format(msg))\n", "path": "src/pip/_internal/cli/parser.py"}], "after_files": [{"content": "\"\"\"Base option parser setup\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport optparse\nimport sys\nimport textwrap\nfrom distutils.util import strtobool\n\nfrom pip._vendor.contextlib2 import suppress\nfrom pip._vendor.six import string_types\n\nfrom pip._internal.cli.status_codes import UNKNOWN_ERROR\nfrom pip._internal.configuration import Configuration, ConfigurationError\nfrom pip._internal.utils.compat import get_terminal_size\nfrom pip._internal.utils.misc import redact_auth_from_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass PrettyHelpFormatter(optparse.IndentedHelpFormatter):\n \"\"\"A prettier/less verbose help formatter for optparse.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # help position must be aligned with __init__.parseopts.description\n kwargs['max_help_position'] = 30\n kwargs['indent_increment'] = 1\n kwargs['width'] = get_terminal_size()[0] - 2\n optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)\n\n def format_option_strings(self, option):\n return self._format_option_strings(option)\n\n def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):\n \"\"\"\n Return a comma-separated list of option strings and metavars.\n\n :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')\n :param mvarfmt: metavar format string\n :param optsep: separator\n \"\"\"\n opts = []\n\n if option._short_opts:\n opts.append(option._short_opts[0])\n if option._long_opts:\n opts.append(option._long_opts[0])\n if len(opts) > 1:\n opts.insert(1, optsep)\n\n if option.takes_value():\n metavar = option.metavar or option.dest.lower()\n opts.append(mvarfmt.format(metavar.lower()))\n\n return ''.join(opts)\n\n def format_heading(self, heading):\n if heading == 'Options':\n return ''\n return heading + ':\\n'\n\n def format_usage(self, usage):\n \"\"\"\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n \"\"\"\n msg = '\\nUsage: {}\\n'.format(\n self.indent_lines(textwrap.dedent(usage), \" \"))\n return msg\n\n def format_description(self, description):\n # leave full control over description to us\n if description:\n if hasattr(self.parser, 'main'):\n label = 'Commands'\n else:\n label = 'Description'\n # some doc strings have initial newlines, some don't\n description = description.lstrip('\\n')\n # some doc strings have final newlines and spaces, some don't\n description = description.rstrip()\n # dedent, then reindent\n description = self.indent_lines(textwrap.dedent(description), \" \")\n description = '{}:\\n{}\\n'.format(label, description)\n return description\n else:\n return ''\n\n def format_epilog(self, epilog):\n # leave full control over epilog to us\n if epilog:\n return epilog\n else:\n return ''\n\n def indent_lines(self, text, indent):\n new_lines = [indent + line for line in text.split('\\n')]\n return \"\\n\".join(new_lines)\n\n\nclass UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):\n \"\"\"Custom help formatter for use in ConfigOptionParser.\n\n This is updates the defaults before expanding them, allowing\n them to show up correctly in the help listing.\n\n Also redact auth from url type options\n \"\"\"\n\n def expand_default(self, option):\n default_values = None\n if self.parser is not None:\n self.parser._update_defaults(self.parser.defaults)\n default_values = self.parser.defaults.get(option.dest)\n help_text = optparse.IndentedHelpFormatter.expand_default(self, option)\n\n if default_values and option.metavar == 'URL':\n if isinstance(default_values, string_types):\n default_values = [default_values]\n\n # If its not a list, we should abort and just return the help text\n if not isinstance(default_values, list):\n default_values = []\n\n for val in default_values:\n help_text = help_text.replace(\n val, redact_auth_from_url(val))\n\n return help_text\n\n\nclass CustomOptionParser(optparse.OptionParser):\n\n def insert_option_group(self, idx, *args, **kwargs):\n \"\"\"Insert an OptionGroup at a given position.\"\"\"\n group = self.add_option_group(*args, **kwargs)\n\n self.option_groups.pop()\n self.option_groups.insert(idx, group)\n\n return group\n\n @property\n def option_list_all(self):\n \"\"\"Get a list of all options, including those in option groups.\"\"\"\n res = self.option_list[:]\n for i in self.option_groups:\n res.extend(i.option_list)\n\n return res\n\n\nclass ConfigOptionParser(CustomOptionParser):\n \"\"\"Custom option parser which updates its defaults by checking the\n configuration files and environmental variables\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.name = kwargs.pop('name')\n\n isolated = kwargs.pop(\"isolated\", False)\n self.config = Configuration(isolated)\n\n assert self.name\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n def check_default(self, option, key, val):\n try:\n return option.check_value(key, val)\n except optparse.OptionValueError as exc:\n print(\"An error occurred during configuration: {}\".format(exc))\n sys.exit(3)\n\n def _get_ordered_configuration_items(self):\n # Configuration gives keys in an unordered manner. Order them.\n override_order = [\"global\", self.name, \":env:\"]\n\n # Pool the options into different groups\n section_items = {name: [] for name in override_order}\n for section_key, val in self.config.items():\n # ignore empty values\n if not val:\n logger.debug(\n \"Ignoring configuration key '%s' as it's value is empty.\",\n section_key\n )\n continue\n\n section, key = section_key.split(\".\", 1)\n if section in override_order:\n section_items[section].append((key, val))\n\n # Yield each group in their override order\n for section in override_order:\n for key, val in section_items[section]:\n yield key, val\n\n def _update_defaults(self, defaults):\n \"\"\"Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).\"\"\"\n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option('--' + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n if option.action in ('store_true', 'store_false'):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please specify a boolean value like yes/no, '\n 'true/false or 1/0 instead.'.format(val, key)\n )\n elif option.action == 'count':\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n '{} is not a valid value for {} option, ' # noqa\n 'please instead specify either a non-negative integer '\n 'or a boolean value like yes/no or false/true '\n 'which is equivalent to 1/0.'.format(val, key)\n )\n elif option.action == 'append':\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == 'callback':\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n\n def get_default_values(self):\n \"\"\"Overriding to make updating the defaults after instantiation of\n the option parser possible, _update_defaults() does the dirty work.\"\"\"\n if not self.process_default_values:\n # Old, pre-Optik 1.5 behaviour.\n return optparse.Values(self.defaults)\n\n # Load the configuration, or error out in case of an error\n try:\n self.config.load()\n except ConfigurationError as err:\n self.exit(UNKNOWN_ERROR, str(err))\n\n defaults = self._update_defaults(self.defaults.copy()) # ours\n for option in self._get_all_options():\n default = defaults.get(option.dest)\n if isinstance(default, string_types):\n opt_str = option.get_opt_string()\n defaults[option.dest] = option.check_value(opt_str, default)\n return optparse.Values(defaults)\n\n def error(self, msg):\n self.print_usage(sys.stderr)\n self.exit(UNKNOWN_ERROR, \"{}\\n\".format(msg))\n", "path": "src/pip/_internal/cli/parser.py"}]}
3,110
282
gh_patches_debug_14153
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-797
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mitmweb can't run When I run mitmweb,then open 121.0.0.1:8081, the error happened. ``` Traceback (most recent call last): File "/Users/venv.mitmproxy/lib/python2.7/site-packages/tornado/ioloop.py", line 1020, in _run return self.callback() File "/Users/mitmproxy/libmproxy/web/__init__.py", line 141, in tick flow.FlowMaster.tick(self, self.masterq, timeout=0) File "/Users/mitmproxy/libmproxy/flow.py", line 812, in tick return super(FlowMaster, self).tick(q, timeout) File "/Users/mitmproxy/libmproxy/controller.py", line 108, in tick self.handle(*msg) File "/Users/mitmproxy/libmproxy/controller.py", line 129, in handle m(obj) File "/Users/mitmproxy/libmproxy/web/__init__.py", line 167, in handle_request super(WebMaster, self).handle_request(f) File "/Users/mitmproxy/libmproxy/flow.py", line 990, in handle_request self.state.add_flow(f) File "/Users/mitmproxy/libmproxy/flow.py", line 561, in add_flow self.flows._add(f) File "/Users/mitmproxy/libmproxy/flow.py", line 469, in _add view._add(f) File "/Users/mitmproxy/libmproxy/web/__init__.py", line 23, in _add data=f.get_state(short=True) File "/Users/mitmproxy/libmproxy/models/flow.py", line 88, in get_state d = super(Flow, self).get_state(short) File "/Users/mitmproxy/libmproxy/stateobject.py", line 32, in get_state state[attr] = val.get_state(short) File "/Users/mitmproxy/libmproxy/models/connections.py", line 138, in get_state address={"address": self.address(), TypeError: 'NoneType' object is not callable ERROR:tornado.application:Exception in callback <bound method WebMaster.tick of <libmproxy.web.WebMaster object at 0x10cfe5a90>> ``` Mitmweb can't run When I run mitmweb,then open 121.0.0.1:8081, the error happened. ``` Traceback (most recent call last): File "/Users/venv.mitmproxy/lib/python2.7/site-packages/tornado/ioloop.py", line 1020, in _run return self.callback() File "/Users/mitmproxy/libmproxy/web/__init__.py", line 141, in tick flow.FlowMaster.tick(self, self.masterq, timeout=0) File "/Users/mitmproxy/libmproxy/flow.py", line 812, in tick return super(FlowMaster, self).tick(q, timeout) File "/Users/mitmproxy/libmproxy/controller.py", line 108, in tick self.handle(*msg) File "/Users/mitmproxy/libmproxy/controller.py", line 129, in handle m(obj) File "/Users/mitmproxy/libmproxy/web/__init__.py", line 167, in handle_request super(WebMaster, self).handle_request(f) File "/Users/mitmproxy/libmproxy/flow.py", line 990, in handle_request self.state.add_flow(f) File "/Users/mitmproxy/libmproxy/flow.py", line 561, in add_flow self.flows._add(f) File "/Users/mitmproxy/libmproxy/flow.py", line 469, in _add view._add(f) File "/Users/mitmproxy/libmproxy/web/__init__.py", line 23, in _add data=f.get_state(short=True) File "/Users/mitmproxy/libmproxy/models/flow.py", line 88, in get_state d = super(Flow, self).get_state(short) File "/Users/mitmproxy/libmproxy/stateobject.py", line 32, in get_state state[attr] = val.get_state(short) File "/Users/mitmproxy/libmproxy/models/connections.py", line 138, in get_state address={"address": self.address(), TypeError: 'NoneType' object is not callable ERROR:tornado.application:Exception in callback <bound method WebMaster.tick of <libmproxy.web.WebMaster object at 0x10cfe5a90>> ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libmproxy/models/connections.py` Content: ``` 1 from __future__ import (absolute_import, print_function, division) 2 3 import copy 4 import os 5 6 from netlib import tcp, certutils 7 from .. import stateobject, utils 8 9 10 class ClientConnection(tcp.BaseHandler, stateobject.StateObject): 11 def __init__(self, client_connection, address, server): 12 # Eventually, this object is restored from state. We don't have a 13 # connection then. 14 if client_connection: 15 super(ClientConnection, self).__init__(client_connection, address, server) 16 else: 17 self.connection = None 18 self.server = None 19 self.wfile = None 20 self.rfile = None 21 self.address = None 22 self.clientcert = None 23 self.ssl_established = None 24 25 self.timestamp_start = utils.timestamp() 26 self.timestamp_end = None 27 self.timestamp_ssl_setup = None 28 self.protocol = None 29 30 def __nonzero__(self): 31 return bool(self.connection) and not self.finished 32 33 def __repr__(self): 34 return "<ClientConnection: {ssl}{host}:{port}>".format( 35 ssl="[ssl] " if self.ssl_established else "", 36 host=self.address.host, 37 port=self.address.port 38 ) 39 40 @property 41 def tls_established(self): 42 return self.ssl_established 43 44 _stateobject_attributes = dict( 45 ssl_established=bool, 46 timestamp_start=float, 47 timestamp_end=float, 48 timestamp_ssl_setup=float 49 ) 50 51 def get_state(self, short=False): 52 d = super(ClientConnection, self).get_state(short) 53 d.update( 54 address={ 55 "address": self.address(), 56 "use_ipv6": self.address.use_ipv6}, 57 clientcert=self.cert.to_pem() if self.clientcert else None) 58 return d 59 60 def load_state(self, state): 61 super(ClientConnection, self).load_state(state) 62 self.address = tcp.Address( 63 **state["address"]) if state["address"] else None 64 self.clientcert = certutils.SSLCert.from_pem( 65 state["clientcert"]) if state["clientcert"] else None 66 67 def copy(self): 68 return copy.copy(self) 69 70 def send(self, message): 71 if isinstance(message, list): 72 message = b''.join(message) 73 self.wfile.write(message) 74 self.wfile.flush() 75 76 @classmethod 77 def from_state(cls, state): 78 f = cls(None, tuple(), None) 79 f.load_state(state) 80 return f 81 82 def convert_to_ssl(self, *args, **kwargs): 83 super(ClientConnection, self).convert_to_ssl(*args, **kwargs) 84 self.timestamp_ssl_setup = utils.timestamp() 85 86 def finish(self): 87 super(ClientConnection, self).finish() 88 self.timestamp_end = utils.timestamp() 89 90 91 class ServerConnection(tcp.TCPClient, stateobject.StateObject): 92 def __init__(self, address): 93 tcp.TCPClient.__init__(self, address) 94 95 self.via = None 96 self.timestamp_start = None 97 self.timestamp_end = None 98 self.timestamp_tcp_setup = None 99 self.timestamp_ssl_setup = None 100 self.protocol = None 101 102 def __nonzero__(self): 103 return bool(self.connection) and not self.finished 104 105 def __repr__(self): 106 if self.ssl_established and self.sni: 107 ssl = "[ssl: {0}] ".format(self.sni) 108 elif self.ssl_established: 109 ssl = "[ssl] " 110 else: 111 ssl = "" 112 return "<ServerConnection: {ssl}{host}:{port}>".format( 113 ssl=ssl, 114 host=self.address.host, 115 port=self.address.port 116 ) 117 118 @property 119 def tls_established(self): 120 return self.ssl_established 121 122 _stateobject_attributes = dict( 123 timestamp_start=float, 124 timestamp_end=float, 125 timestamp_tcp_setup=float, 126 timestamp_ssl_setup=float, 127 address=tcp.Address, 128 source_address=tcp.Address, 129 cert=certutils.SSLCert, 130 ssl_established=bool, 131 sni=str 132 ) 133 _stateobject_long_attributes = {"cert"} 134 135 def get_state(self, short=False): 136 d = super(ServerConnection, self).get_state(short) 137 d.update( 138 address={"address": self.address(), 139 "use_ipv6": self.address.use_ipv6}, 140 source_address=({"address": self.source_address(), 141 "use_ipv6": self.source_address.use_ipv6} if self.source_address else None), 142 cert=self.cert.to_pem() if self.cert else None 143 ) 144 return d 145 146 def load_state(self, state): 147 super(ServerConnection, self).load_state(state) 148 149 self.address = tcp.Address( 150 **state["address"]) if state["address"] else None 151 self.source_address = tcp.Address( 152 **state["source_address"]) if state["source_address"] else None 153 self.cert = certutils.SSLCert.from_pem( 154 state["cert"]) if state["cert"] else None 155 156 @classmethod 157 def from_state(cls, state): 158 f = cls(tuple()) 159 f.load_state(state) 160 return f 161 162 def copy(self): 163 return copy.copy(self) 164 165 def connect(self): 166 self.timestamp_start = utils.timestamp() 167 tcp.TCPClient.connect(self) 168 self.timestamp_tcp_setup = utils.timestamp() 169 170 def send(self, message): 171 if isinstance(message, list): 172 message = b''.join(message) 173 self.wfile.write(message) 174 self.wfile.flush() 175 176 def establish_ssl(self, clientcerts, sni, **kwargs): 177 clientcert = None 178 if clientcerts: 179 path = os.path.join( 180 clientcerts, 181 self.address.host.encode("idna")) + ".pem" 182 if os.path.exists(path): 183 clientcert = path 184 185 self.convert_to_ssl(cert=clientcert, sni=sni, **kwargs) 186 self.sni = sni 187 self.timestamp_ssl_setup = utils.timestamp() 188 189 def finish(self): 190 tcp.TCPClient.finish(self) 191 self.timestamp_end = utils.timestamp() 192 193 194 ServerConnection._stateobject_attributes["via"] = ServerConnection 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libmproxy/models/connections.py b/libmproxy/models/connections.py --- a/libmproxy/models/connections.py +++ b/libmproxy/models/connections.py @@ -135,8 +135,8 @@ def get_state(self, short=False): d = super(ServerConnection, self).get_state(short) d.update( - address={"address": self.address(), - "use_ipv6": self.address.use_ipv6}, + address=({"address": self.address(), + "use_ipv6": self.address.use_ipv6} if self.address else {}), source_address=({"address": self.source_address(), "use_ipv6": self.source_address.use_ipv6} if self.source_address else None), cert=self.cert.to_pem() if self.cert else None
{"golden_diff": "diff --git a/libmproxy/models/connections.py b/libmproxy/models/connections.py\n--- a/libmproxy/models/connections.py\n+++ b/libmproxy/models/connections.py\n@@ -135,8 +135,8 @@\n def get_state(self, short=False):\n d = super(ServerConnection, self).get_state(short)\n d.update(\n- address={\"address\": self.address(),\n- \"use_ipv6\": self.address.use_ipv6},\n+ address=({\"address\": self.address(),\n+ \"use_ipv6\": self.address.use_ipv6} if self.address else {}), \n source_address=({\"address\": self.source_address(),\n \"use_ipv6\": self.source_address.use_ipv6} if self.source_address else None),\n cert=self.cert.to_pem() if self.cert else None\n", "issue": "Mitmweb can't run\nWhen I run mitmweb,then open 121.0.0.1:8081, the error happened.\n\n```\nTraceback (most recent call last):\n File \"/Users/venv.mitmproxy/lib/python2.7/site-packages/tornado/ioloop.py\", line 1020, in _run\n return self.callback()\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 141, in tick\n flow.FlowMaster.tick(self, self.masterq, timeout=0)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 812, in tick\n return super(FlowMaster, self).tick(q, timeout)\n File \"/Users/mitmproxy/libmproxy/controller.py\", line 108, in tick\n self.handle(*msg)\n File \"/Users/mitmproxy/libmproxy/controller.py\", line 129, in handle\n m(obj)\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 167, in handle_request\n super(WebMaster, self).handle_request(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 990, in handle_request\n self.state.add_flow(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 561, in add_flow\n self.flows._add(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 469, in _add\n view._add(f)\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 23, in _add\n data=f.get_state(short=True)\n File \"/Users/mitmproxy/libmproxy/models/flow.py\", line 88, in get_state\n d = super(Flow, self).get_state(short)\n File \"/Users/mitmproxy/libmproxy/stateobject.py\", line 32, in get_state\n state[attr] = val.get_state(short)\n File \"/Users/mitmproxy/libmproxy/models/connections.py\", line 138, in get_state\n address={\"address\": self.address(),\nTypeError: 'NoneType' object is not callable\nERROR:tornado.application:Exception in callback <bound method WebMaster.tick of <libmproxy.web.WebMaster object at 0x10cfe5a90>>\n```\n\nMitmweb can't run\nWhen I run mitmweb,then open 121.0.0.1:8081, the error happened.\n\n```\nTraceback (most recent call last):\n File \"/Users/venv.mitmproxy/lib/python2.7/site-packages/tornado/ioloop.py\", line 1020, in _run\n return self.callback()\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 141, in tick\n flow.FlowMaster.tick(self, self.masterq, timeout=0)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 812, in tick\n return super(FlowMaster, self).tick(q, timeout)\n File \"/Users/mitmproxy/libmproxy/controller.py\", line 108, in tick\n self.handle(*msg)\n File \"/Users/mitmproxy/libmproxy/controller.py\", line 129, in handle\n m(obj)\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 167, in handle_request\n super(WebMaster, self).handle_request(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 990, in handle_request\n self.state.add_flow(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 561, in add_flow\n self.flows._add(f)\n File \"/Users/mitmproxy/libmproxy/flow.py\", line 469, in _add\n view._add(f)\n File \"/Users/mitmproxy/libmproxy/web/__init__.py\", line 23, in _add\n data=f.get_state(short=True)\n File \"/Users/mitmproxy/libmproxy/models/flow.py\", line 88, in get_state\n d = super(Flow, self).get_state(short)\n File \"/Users/mitmproxy/libmproxy/stateobject.py\", line 32, in get_state\n state[attr] = val.get_state(short)\n File \"/Users/mitmproxy/libmproxy/models/connections.py\", line 138, in get_state\n address={\"address\": self.address(),\nTypeError: 'NoneType' object is not callable\nERROR:tornado.application:Exception in callback <bound method WebMaster.tick of <libmproxy.web.WebMaster object at 0x10cfe5a90>>\n```\n\n", "before_files": [{"content": "from __future__ import (absolute_import, print_function, division)\n\nimport copy\nimport os\n\nfrom netlib import tcp, certutils\nfrom .. import stateobject, utils\n\n\nclass ClientConnection(tcp.BaseHandler, stateobject.StateObject):\n def __init__(self, client_connection, address, server):\n # Eventually, this object is restored from state. We don't have a\n # connection then.\n if client_connection:\n super(ClientConnection, self).__init__(client_connection, address, server)\n else:\n self.connection = None\n self.server = None\n self.wfile = None\n self.rfile = None\n self.address = None\n self.clientcert = None\n self.ssl_established = None\n\n self.timestamp_start = utils.timestamp()\n self.timestamp_end = None\n self.timestamp_ssl_setup = None\n self.protocol = None\n\n def __nonzero__(self):\n return bool(self.connection) and not self.finished\n\n def __repr__(self):\n return \"<ClientConnection: {ssl}{host}:{port}>\".format(\n ssl=\"[ssl] \" if self.ssl_established else \"\",\n host=self.address.host,\n port=self.address.port\n )\n\n @property\n def tls_established(self):\n return self.ssl_established\n\n _stateobject_attributes = dict(\n ssl_established=bool,\n timestamp_start=float,\n timestamp_end=float,\n timestamp_ssl_setup=float\n )\n\n def get_state(self, short=False):\n d = super(ClientConnection, self).get_state(short)\n d.update(\n address={\n \"address\": self.address(),\n \"use_ipv6\": self.address.use_ipv6},\n clientcert=self.cert.to_pem() if self.clientcert else None)\n return d\n\n def load_state(self, state):\n super(ClientConnection, self).load_state(state)\n self.address = tcp.Address(\n **state[\"address\"]) if state[\"address\"] else None\n self.clientcert = certutils.SSLCert.from_pem(\n state[\"clientcert\"]) if state[\"clientcert\"] else None\n\n def copy(self):\n return copy.copy(self)\n\n def send(self, message):\n if isinstance(message, list):\n message = b''.join(message)\n self.wfile.write(message)\n self.wfile.flush()\n\n @classmethod\n def from_state(cls, state):\n f = cls(None, tuple(), None)\n f.load_state(state)\n return f\n\n def convert_to_ssl(self, *args, **kwargs):\n super(ClientConnection, self).convert_to_ssl(*args, **kwargs)\n self.timestamp_ssl_setup = utils.timestamp()\n\n def finish(self):\n super(ClientConnection, self).finish()\n self.timestamp_end = utils.timestamp()\n\n\nclass ServerConnection(tcp.TCPClient, stateobject.StateObject):\n def __init__(self, address):\n tcp.TCPClient.__init__(self, address)\n\n self.via = None\n self.timestamp_start = None\n self.timestamp_end = None\n self.timestamp_tcp_setup = None\n self.timestamp_ssl_setup = None\n self.protocol = None\n\n def __nonzero__(self):\n return bool(self.connection) and not self.finished\n\n def __repr__(self):\n if self.ssl_established and self.sni:\n ssl = \"[ssl: {0}] \".format(self.sni)\n elif self.ssl_established:\n ssl = \"[ssl] \"\n else:\n ssl = \"\"\n return \"<ServerConnection: {ssl}{host}:{port}>\".format(\n ssl=ssl,\n host=self.address.host,\n port=self.address.port\n )\n\n @property\n def tls_established(self):\n return self.ssl_established\n\n _stateobject_attributes = dict(\n timestamp_start=float,\n timestamp_end=float,\n timestamp_tcp_setup=float,\n timestamp_ssl_setup=float,\n address=tcp.Address,\n source_address=tcp.Address,\n cert=certutils.SSLCert,\n ssl_established=bool,\n sni=str\n )\n _stateobject_long_attributes = {\"cert\"}\n\n def get_state(self, short=False):\n d = super(ServerConnection, self).get_state(short)\n d.update(\n address={\"address\": self.address(),\n \"use_ipv6\": self.address.use_ipv6},\n source_address=({\"address\": self.source_address(),\n \"use_ipv6\": self.source_address.use_ipv6} if self.source_address else None),\n cert=self.cert.to_pem() if self.cert else None\n )\n return d\n\n def load_state(self, state):\n super(ServerConnection, self).load_state(state)\n\n self.address = tcp.Address(\n **state[\"address\"]) if state[\"address\"] else None\n self.source_address = tcp.Address(\n **state[\"source_address\"]) if state[\"source_address\"] else None\n self.cert = certutils.SSLCert.from_pem(\n state[\"cert\"]) if state[\"cert\"] else None\n\n @classmethod\n def from_state(cls, state):\n f = cls(tuple())\n f.load_state(state)\n return f\n\n def copy(self):\n return copy.copy(self)\n\n def connect(self):\n self.timestamp_start = utils.timestamp()\n tcp.TCPClient.connect(self)\n self.timestamp_tcp_setup = utils.timestamp()\n\n def send(self, message):\n if isinstance(message, list):\n message = b''.join(message)\n self.wfile.write(message)\n self.wfile.flush()\n\n def establish_ssl(self, clientcerts, sni, **kwargs):\n clientcert = None\n if clientcerts:\n path = os.path.join(\n clientcerts,\n self.address.host.encode(\"idna\")) + \".pem\"\n if os.path.exists(path):\n clientcert = path\n\n self.convert_to_ssl(cert=clientcert, sni=sni, **kwargs)\n self.sni = sni\n self.timestamp_ssl_setup = utils.timestamp()\n\n def finish(self):\n tcp.TCPClient.finish(self)\n self.timestamp_end = utils.timestamp()\n\n\nServerConnection._stateobject_attributes[\"via\"] = ServerConnection\n", "path": "libmproxy/models/connections.py"}], "after_files": [{"content": "from __future__ import (absolute_import, print_function, division)\n\nimport copy\nimport os\n\nfrom netlib import tcp, certutils\nfrom .. import stateobject, utils\n\n\nclass ClientConnection(tcp.BaseHandler, stateobject.StateObject):\n def __init__(self, client_connection, address, server):\n # Eventually, this object is restored from state. We don't have a\n # connection then.\n if client_connection:\n super(ClientConnection, self).__init__(client_connection, address, server)\n else:\n self.connection = None\n self.server = None\n self.wfile = None\n self.rfile = None\n self.address = None\n self.clientcert = None\n self.ssl_established = None\n\n self.timestamp_start = utils.timestamp()\n self.timestamp_end = None\n self.timestamp_ssl_setup = None\n self.protocol = None\n\n def __nonzero__(self):\n return bool(self.connection) and not self.finished\n\n def __repr__(self):\n return \"<ClientConnection: {ssl}{host}:{port}>\".format(\n ssl=\"[ssl] \" if self.ssl_established else \"\",\n host=self.address.host,\n port=self.address.port\n )\n\n @property\n def tls_established(self):\n return self.ssl_established\n\n _stateobject_attributes = dict(\n ssl_established=bool,\n timestamp_start=float,\n timestamp_end=float,\n timestamp_ssl_setup=float\n )\n\n def get_state(self, short=False):\n d = super(ClientConnection, self).get_state(short)\n d.update(\n address={\n \"address\": self.address(),\n \"use_ipv6\": self.address.use_ipv6},\n clientcert=self.cert.to_pem() if self.clientcert else None)\n return d\n\n def load_state(self, state):\n super(ClientConnection, self).load_state(state)\n self.address = tcp.Address(\n **state[\"address\"]) if state[\"address\"] else None\n self.clientcert = certutils.SSLCert.from_pem(\n state[\"clientcert\"]) if state[\"clientcert\"] else None\n\n def copy(self):\n return copy.copy(self)\n\n def send(self, message):\n if isinstance(message, list):\n message = b''.join(message)\n self.wfile.write(message)\n self.wfile.flush()\n\n @classmethod\n def from_state(cls, state):\n f = cls(None, tuple(), None)\n f.load_state(state)\n return f\n\n def convert_to_ssl(self, *args, **kwargs):\n super(ClientConnection, self).convert_to_ssl(*args, **kwargs)\n self.timestamp_ssl_setup = utils.timestamp()\n\n def finish(self):\n super(ClientConnection, self).finish()\n self.timestamp_end = utils.timestamp()\n\n\nclass ServerConnection(tcp.TCPClient, stateobject.StateObject):\n def __init__(self, address):\n tcp.TCPClient.__init__(self, address)\n\n self.via = None\n self.timestamp_start = None\n self.timestamp_end = None\n self.timestamp_tcp_setup = None\n self.timestamp_ssl_setup = None\n self.protocol = None\n\n def __nonzero__(self):\n return bool(self.connection) and not self.finished\n\n def __repr__(self):\n if self.ssl_established and self.sni:\n ssl = \"[ssl: {0}] \".format(self.sni)\n elif self.ssl_established:\n ssl = \"[ssl] \"\n else:\n ssl = \"\"\n return \"<ServerConnection: {ssl}{host}:{port}>\".format(\n ssl=ssl,\n host=self.address.host,\n port=self.address.port\n )\n\n @property\n def tls_established(self):\n return self.ssl_established\n\n _stateobject_attributes = dict(\n timestamp_start=float,\n timestamp_end=float,\n timestamp_tcp_setup=float,\n timestamp_ssl_setup=float,\n address=tcp.Address,\n source_address=tcp.Address,\n cert=certutils.SSLCert,\n ssl_established=bool,\n sni=str\n )\n _stateobject_long_attributes = {\"cert\"}\n\n def get_state(self, short=False):\n d = super(ServerConnection, self).get_state(short)\n d.update(\n address=({\"address\": self.address(),\n \"use_ipv6\": self.address.use_ipv6} if self.address else {}), \n source_address=({\"address\": self.source_address(),\n \"use_ipv6\": self.source_address.use_ipv6} if self.source_address else None),\n cert=self.cert.to_pem() if self.cert else None\n )\n return d\n\n def load_state(self, state):\n super(ServerConnection, self).load_state(state)\n\n self.address = tcp.Address(\n **state[\"address\"]) if state[\"address\"] else None\n self.source_address = tcp.Address(\n **state[\"source_address\"]) if state[\"source_address\"] else None\n self.cert = certutils.SSLCert.from_pem(\n state[\"cert\"]) if state[\"cert\"] else None\n\n @classmethod\n def from_state(cls, state):\n f = cls(tuple())\n f.load_state(state)\n return f\n\n def copy(self):\n return copy.copy(self)\n\n def connect(self):\n self.timestamp_start = utils.timestamp()\n tcp.TCPClient.connect(self)\n self.timestamp_tcp_setup = utils.timestamp()\n\n def send(self, message):\n if isinstance(message, list):\n message = b''.join(message)\n self.wfile.write(message)\n self.wfile.flush()\n\n def establish_ssl(self, clientcerts, sni, **kwargs):\n clientcert = None\n if clientcerts:\n path = os.path.join(\n clientcerts,\n self.address.host.encode(\"idna\")) + \".pem\"\n if os.path.exists(path):\n clientcert = path\n\n self.convert_to_ssl(cert=clientcert, sni=sni, **kwargs)\n self.sni = sni\n self.timestamp_ssl_setup = utils.timestamp()\n\n def finish(self):\n tcp.TCPClient.finish(self)\n self.timestamp_end = utils.timestamp()\n\n\nServerConnection._stateobject_attributes[\"via\"] = ServerConnection\n", "path": "libmproxy/models/connections.py"}]}
3,131
177
gh_patches_debug_17763
rasdani/github-patches
git_diff
Mailu__Mailu-2794
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dovecot: support for zstd/lz4 compression is not compiled in ## Environment & Versions ### Environment - [x] docker-compose - [ ] kubernetes - [ ] docker swarm ### Versions 1.9 ## Description When `COMPRESSION` in `mailu.env` is set to `zstd` or `lz4` and a new mail arrives, the imap container logs something like this: ``` Jan 04 23:32:46 lmtp(mymailaddress@mydomain.com)<32616><kRbzEw7L1GFofwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd Jan 04 23:32:46 indexer-worker(mymailaddress@mydomain.com)<32619><D5ZaIcXUkKDAqMsD:Z6zPGQ7L1GFrfwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd ``` ``` Jan 04 23:43:03 imap(mymailaddress@mydomain.com)<178><2CwwXMnUwMXAqMsE>: Error: zlib_save: Support not compiled in for handler: lz4 Jan 04 23:43:03 indexer-worker(mymailaddress@mydomain.com)<180><B4qhJXfN1GGvAAAAflj66Q:/8frM3fN1GG0AAAAflj66Q>: Error: zlib_save: Support not compiled in for handler: lz4 ``` As far as I can tell the Dovecot package from Alpine is probably not compiled with zstd/lz4 support, but I'm not sure since the build log of the last Alpine package returns 404 :roll_eyes: This is __not__ breaking anything, mails will be received, but they won't be compressed. ## Replication Steps 1. in `mailu.env` set `COMPRESSION=zstd` or `COMPRESSION=lz4` 2. `docker-compose down && docker-compose up` 3. send yourself a mail 4. watch the logs of the imap container ## Expected behaviour With `COMPRESSION=zstd`/`COMPRESSION=lz4` set, mails should be compressed with zstd/lz4 and then stored. ## Workaround Set `COMPRESSION=gz` if you need compressed mail storage. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/postfix/start.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import os 4 import glob 5 import shutil 6 import multiprocessing 7 import sys 8 import re 9 10 from podop import run_server 11 from socrate import system, conf 12 13 system.set_env(log_filters=[ 14 r'the Postfix mail system is running\: \d+$', 15 r'(dis)?connect from localhost\[(\:\:1|127\.0\.0\.1)\]( quit=1 commands=1)?$', 16 r'haproxy read\: short protocol header\: QUIT$', 17 r'discarding EHLO keywords\: PIPELINING$', 18 ], log_file=os.environ.get('POSTFIX_LOG_FILE')) 19 20 os.system("flock -n /queue/pid/master.pid rm /queue/pid/master.pid") 21 22 def start_podop(): 23 system.drop_privs_to('postfix') 24 os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True) 25 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/" 26 # TODO: Remove verbosity setting from Podop? 27 run_server(0, "postfix", "/tmp/podop.socket", [ 28 ("transport", "url", url + "transport/§"), 29 ("alias", "url", url + "alias/§"), 30 ("dane", "url", url + "dane/§"), 31 ("domain", "url", url + "domain/§"), 32 ("mailbox", "url", url + "mailbox/§"), 33 ("recipientmap", "url", url + "recipient/map/§"), 34 ("sendermap", "url", url + "sender/map/§"), 35 ("senderlogin", "url", url + "sender/login/§"), 36 ("senderrate", "url", url + "sender/rate/§") 37 ]) 38 39 def start_mta_sts_daemon(): 40 os.chmod("/root/", 0o755) # read access to /root/.netrc required 41 system.drop_privs_to('postfix') 42 from postfix_mta_sts_resolver import daemon 43 daemon.main() 44 45 def is_valid_postconf_line(line): 46 return not line.startswith("#") \ 47 and not line == '' 48 49 # Actual startup script 50 os.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True' 51 52 # Postfix requires IPv6 addresses to be wrapped in square brackets 53 if 'RELAYNETS' in os.environ: 54 os.environ["RELAYNETS"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\1]/', os.environ["RELAYNETS"]) 55 56 for postfix_file in glob.glob("/conf/*.cf"): 57 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file))) 58 59 if os.path.exists("/overrides/postfix.cf"): 60 for line in open("/overrides/postfix.cf").read().strip().split("\n"): 61 if is_valid_postconf_line(line): 62 os.system('postconf -e "{}"'.format(line)) 63 64 if os.path.exists("/overrides/postfix.master"): 65 for line in open("/overrides/postfix.master").read().strip().split("\n"): 66 if is_valid_postconf_line(line): 67 os.system('postconf -Me "{}"'.format(line)) 68 69 for map_file in glob.glob("/overrides/*.map"): 70 destination = os.path.join("/etc/postfix", os.path.basename(map_file)) 71 shutil.copyfile(map_file, destination) 72 os.system("postmap {}".format(destination)) 73 os.remove(destination) 74 75 if os.path.exists("/overrides/mta-sts-daemon.yml"): 76 shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml") 77 else: 78 conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml") 79 80 for policy in ['tls_policy', 'transport']: 81 if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'): 82 open(f'/etc/postfix/{policy}.map', 'a').close() 83 os.system(f'postmap /etc/postfix/{policy}.map') 84 85 if "RELAYUSER" in os.environ: 86 path = "/etc/postfix/sasl_passwd" 87 conf.jinja("/conf/sasl_passwd", os.environ, path) 88 os.system("postmap {}".format(path)) 89 90 # Configure logrotate and start crond 91 if os.environ.get('POSTFIX_LOG_FILE'): 92 conf.jinja("/conf/logrotate.conf", os.environ, "/etc/logrotate.d/postfix.conf") 93 os.system("/usr/sbin/crond") 94 if os.path.exists("/overrides/logrotate.conf"): 95 shutil.copyfile("/overrides/logrotate.conf", "/etc/logrotate.d/postfix.conf") 96 97 # Run Podop and Postfix 98 multiprocessing.Process(target=start_podop).start() 99 multiprocessing.Process(target=start_mta_sts_daemon).start() 100 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing") 101 # Before starting postfix, we need to check permissions on /queue 102 # in the event that postfix,postdrop id have changed 103 os.system("postfix set-permissions") 104 os.system("postfix start-fg") 105 ``` Path: `core/dovecot/start.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import os 4 import glob 5 import multiprocessing 6 import logging as log 7 import sys 8 9 from podop import run_server 10 from socrate import system, conf 11 12 system.set_env(log_filters=r'waitpid\(\) returned unknown PID \d+$') 13 14 def start_podop(): 15 system.drop_privs_to('mail') 16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" 17 run_server(0, "dovecot", "/tmp/podop.socket", [ 18 ("quota", "url", url ), 19 ("auth", "url", url), 20 ("sieve", "url", url), 21 ]) 22 23 # Actual startup script 24 for dovecot_file in glob.glob("/conf/*.conf"): 25 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 26 27 os.makedirs("/conf/bin", exist_ok=True) 28 for script_file in glob.glob("/conf/*.script"): 29 out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script','')) 30 conf.jinja(script_file, os.environ, out_file) 31 os.chmod(out_file, 0o555) 32 33 # Run Podop, then postfix 34 os.system("chown mail:mail /mail") 35 os.system("chown -R mail:mail /var/lib/dovecot /conf") 36 37 multiprocessing.Process(target=start_podop).start() 38 os.system("dovecot -c /etc/dovecot/dovecot.conf -F") 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -9,7 +9,7 @@ from podop import run_server from socrate import system, conf -system.set_env(log_filters=r'waitpid\(\) returned unknown PID \d+$') +system.set_env(log_filters=r'Error\: SSL context initialization failed, disabling SSL\: Can\'t load SSL certificate \(ssl_cert setting\)\: The certificate is empty$') def start_podop(): system.drop_privs_to('mail') diff --git a/core/postfix/start.py b/core/postfix/start.py --- a/core/postfix/start.py +++ b/core/postfix/start.py @@ -11,7 +11,6 @@ from socrate import system, conf system.set_env(log_filters=[ - r'the Postfix mail system is running\: \d+$', r'(dis)?connect from localhost\[(\:\:1|127\.0\.0\.1)\]( quit=1 commands=1)?$', r'haproxy read\: short protocol header\: QUIT$', r'discarding EHLO keywords\: PIPELINING$',
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -9,7 +9,7 @@\n from podop import run_server\n from socrate import system, conf\n \n-system.set_env(log_filters=r'waitpid\\(\\) returned unknown PID \\d+$')\n+system.set_env(log_filters=r'Error\\: SSL context initialization failed, disabling SSL\\: Can\\'t load SSL certificate \\(ssl_cert setting\\)\\: The certificate is empty$')\n \n def start_podop():\n system.drop_privs_to('mail')\ndiff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -11,7 +11,6 @@\n from socrate import system, conf\n \n system.set_env(log_filters=[\n- r'the Postfix mail system is running\\: \\d+$',\n r'(dis)?connect from localhost\\[(\\:\\:1|127\\.0\\.0\\.1)\\]( quit=1 commands=1)?$',\n r'haproxy read\\: short protocol header\\: QUIT$',\n r'discarding EHLO keywords\\: PIPELINING$',\n", "issue": "dovecot: support for zstd/lz4 compression is not compiled in\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n1.9\r\n\r\n## Description\r\nWhen `COMPRESSION` in `mailu.env` is set to `zstd` or `lz4` and a new mail arrives, the imap container logs something like this:\r\n```\r\nJan 04 23:32:46 lmtp(mymailaddress@mydomain.com)<32616><kRbzEw7L1GFofwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd\r\nJan 04 23:32:46 indexer-worker(mymailaddress@mydomain.com)<32619><D5ZaIcXUkKDAqMsD:Z6zPGQ7L1GFrfwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd\r\n```\r\n\r\n```\r\nJan 04 23:43:03 imap(mymailaddress@mydomain.com)<178><2CwwXMnUwMXAqMsE>: Error: zlib_save: Support not compiled in for handler: lz4\r\nJan 04 23:43:03 indexer-worker(mymailaddress@mydomain.com)<180><B4qhJXfN1GGvAAAAflj66Q:/8frM3fN1GG0AAAAflj66Q>: Error: zlib_save: Support not compiled in for handler: lz4\r\n```\r\n\r\nAs far as I can tell the Dovecot package from Alpine is probably not compiled with zstd/lz4 support, but I'm not sure since the build log of the last Alpine package returns 404 :roll_eyes:\r\n\r\nThis is __not__ breaking anything, mails will be received, but they won't be compressed.\r\n\r\n## Replication Steps\r\n1. in `mailu.env` set `COMPRESSION=zstd` or `COMPRESSION=lz4`\r\n2. `docker-compose down && docker-compose up`\r\n3. send yourself a mail\r\n4. watch the logs of the imap container\r\n\r\n## Expected behaviour\r\nWith `COMPRESSION=zstd`/`COMPRESSION=lz4` set, mails should be compressed with zstd/lz4 and then stored.\r\n\r\n## Workaround\r\nSet `COMPRESSION=gz` if you need compressed mail storage.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport sys\nimport re\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nsystem.set_env(log_filters=[\n r'the Postfix mail system is running\\: \\d+$',\n r'(dis)?connect from localhost\\[(\\:\\:1|127\\.0\\.0\\.1)\\]( quit=1 commands=1)?$',\n r'haproxy read\\: short protocol header\\: QUIT$',\n r'discarding EHLO keywords\\: PIPELINING$',\n ], log_file=os.environ.get('POSTFIX_LOG_FILE'))\n\nos.system(\"flock -n /queue/pid/master.pid rm /queue/pid/master.pid\")\n\ndef start_podop():\n system.drop_privs_to('postfix')\n os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n system.drop_privs_to('postfix')\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\n\n# Postfix requires IPv6 addresses to be wrapped in square brackets\nif 'RELAYNETS' in os.environ:\n os.environ[\"RELAYNETS\"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\\\1]/', os.environ[\"RELAYNETS\"])\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nfor policy in ['tls_policy', 'transport']:\n if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n open(f'/etc/postfix/{policy}.map', 'a').close()\n os.system(f'postmap /etc/postfix/{policy}.map')\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure logrotate and start crond\nif os.environ.get('POSTFIX_LOG_FILE'):\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}, {"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nsystem.set_env(log_filters=r'waitpid\\(\\) returned unknown PID \\d+$')\n\ndef start_podop():\n system.drop_privs_to('mail')\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\n\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"dovecot -c /etc/dovecot/dovecot.conf -F\")\n", "path": "core/dovecot/start.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport sys\nimport re\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nsystem.set_env(log_filters=[\n r'(dis)?connect from localhost\\[(\\:\\:1|127\\.0\\.0\\.1)\\]( quit=1 commands=1)?$',\n r'haproxy read\\: short protocol header\\: QUIT$',\n r'discarding EHLO keywords\\: PIPELINING$',\n ], log_file=os.environ.get('POSTFIX_LOG_FILE'))\n\nos.system(\"flock -n /queue/pid/master.pid rm /queue/pid/master.pid\")\n\ndef start_podop():\n system.drop_privs_to('postfix')\n os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n system.drop_privs_to('postfix')\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\n\n# Postfix requires IPv6 addresses to be wrapped in square brackets\nif 'RELAYNETS' in os.environ:\n os.environ[\"RELAYNETS\"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\\\1]/', os.environ[\"RELAYNETS\"])\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nfor policy in ['tls_policy', 'transport']:\n if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n open(f'/etc/postfix/{policy}.map', 'a').close()\n os.system(f'postmap /etc/postfix/{policy}.map')\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure logrotate and start crond\nif os.environ.get('POSTFIX_LOG_FILE'):\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}, {"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nsystem.set_env(log_filters=r'Error\\: SSL context initialization failed, disabling SSL\\: Can\\'t load SSL certificate \\(ssl_cert setting\\)\\: The certificate is empty$')\n\ndef start_podop():\n system.drop_privs_to('mail')\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\n\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"dovecot -c /etc/dovecot/dovecot.conf -F\")\n", "path": "core/dovecot/start.py"}]}
2,579
272
gh_patches_debug_56612
rasdani/github-patches
git_diff
spacetelescope__jwql-677
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update Bokeh to latest version I remember there was some reason that we were holding off on upgrading Bokeh from 1.3.4. However, Bokeh is now up to version 2.2.1 I believe. We should look into upgrading the version used for JWQL in order to take advantage of new features and so that we minimize the number of plots created under 1.3.4 which may need to be tweaked to work under the new version. For example, one difference I ran into today was that the keyword "legend", which is used in 1.3.4 to denote the string printed in the legend for a particular element, has been changed to "legend_label" in version 2.2.1. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import numpy as np 2 from setuptools import setup 3 from setuptools import find_packages 4 5 VERSION = '0.24.0' 6 7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, ' 8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist' 9 10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project' 11 12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'] 13 14 REQUIRES = [ 15 'asdf>=2.3.3', 16 'astropy>=3.2.1', 17 'astroquery>=0.3.9', 18 'authlib', 19 'bokeh>=1.0,<1.4', 20 'codecov', 21 'crds', 22 'cryptography', 23 'django', 24 'flake8', 25 'inflection', 26 'ipython', 27 'jinja2', 28 'jsonschema', 29 'jwedb>=0.0.3', 30 'jwst', 31 'matplotlib', 32 'nodejs', 33 'numpy', 34 'numpydoc', 35 'pandas', 36 'psycopg2', 37 'pysiaf', 38 'pytest', 39 'pytest-cov', 40 'scipy', 41 'sphinx', 42 'sqlalchemy', 43 'stsci_rtd_theme', 44 'twine', 45 'wtforms' 46 ] 47 48 setup( 49 name='jwql', 50 version=VERSION, 51 description=DESCRIPTION, 52 url='https://github.com/spacetelescope/jwql.git', 53 author=AUTHORS, 54 author_email='jwql@stsci.edu', 55 license='BSD', 56 keywords=['astronomy', 'python'], 57 classifiers=['Programming Language :: Python'], 58 packages=find_packages(), 59 install_requires=REQUIRES, 60 dependency_links=DEPENDENCY_LINKS, 61 include_package_data=True, 62 include_dirs=[np.get_include()], 63 ) 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ 'astropy>=3.2.1', 'astroquery>=0.3.9', 'authlib', - 'bokeh>=1.0,<1.4', + 'bokeh', 'codecov', 'crds', 'cryptography',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0,<1.4',\n+ 'bokeh',\n 'codecov',\n 'crds',\n 'cryptography',\n", "issue": "Update Bokeh to latest version\nI remember there was some reason that we were holding off on upgrading Bokeh from 1.3.4. However, Bokeh is now up to version 2.2.1 I believe. We should look into upgrading the version used for JWQL in order to take advantage of new features and so that we minimize the number of plots created under 1.3.4 which may need to be tweaked to work under the new version.\r\n\r\nFor example, one difference I ran into today was that the keyword \"legend\", which is used in 1.3.4 to denote the string printed in the legend for a particular element, has been changed to \"legend_label\" in version 2.2.1.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='jwql@stsci.edu',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='jwql@stsci.edu',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
981
94
gh_patches_debug_826
rasdani/github-patches
git_diff
internetarchive__openlibrary-4557
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- /openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template) Patron is reporting the following error: `/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template)` ### Evidence / Screenshot (if possible) ### Relevant url? https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ... https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan 2. Do ... view error. <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: * Expected: ### Details - **Logged in (Y/N)?** - **Browser type/version?** - **Operating system?** - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/core/helpers.py` Content: ``` 1 """Generic helper functions to use in the templates and the webapp. 2 """ 3 import web 4 from datetime import datetime 5 import re 6 7 import six 8 from six.moves.urllib.parse import urlsplit 9 10 if six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2 11 import simplejson as json 12 else: 13 import json 14 15 import babel 16 import babel.core 17 import babel.dates 18 import babel.numbers 19 20 try: 21 import genshi 22 import genshi.filters 23 except ImportError: 24 genshi = None 25 26 try: 27 from bs4 import BeautifulSoup 28 except ImportError: 29 BeautifulSoup = None 30 31 from infogami import config 32 33 # handy utility to parse ISO date strings 34 from infogami.infobase.utils import parse_datetime 35 from infogami.utils.view import safeint 36 37 # TODO: i18n should be moved to core or infogami 38 from openlibrary.i18n import gettext as _ # noqa: F401 39 40 __all__ = [ 41 "sanitize", 42 "json_encode", 43 "safesort", 44 "days_since", "datestr", "format_date", 45 "sprintf", "cond", "commify", "truncate", "datetimestr_utc", 46 "urlsafe", "texsafe", 47 "percentage", "affiliate_id", "bookreader_host", 48 "private_collections", "private_collection_in", 49 50 # functions imported from elsewhere 51 "parse_datetime", "safeint" 52 ] 53 __docformat__ = "restructuredtext en" 54 55 def sanitize(html, encoding='utf8'): 56 """Removes unsafe tags and attributes from html and adds 57 ``rel="nofollow"`` attribute to all external links. 58 Using encoding=None if passing unicode strings e.g. for Python 3. 59 encoding="utf8" matches default format for earlier versions of Genshi 60 https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version 61 """ 62 63 # Can't sanitize unless genshi module is available 64 if genshi is None: 65 return html 66 67 def get_nofollow(name, event): 68 attrs = event[1][1] 69 href = attrs.get('href', '') 70 71 if href: 72 # add rel=nofollow to all absolute links 73 _, host, _, _, _ = urlsplit(href) 74 if host: 75 return 'nofollow' 76 77 try: 78 html = genshi.HTML(html, encoding=encoding) 79 80 # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e: 81 # don't catch Unicode errors so we can tell if we're getting bytes 82 except genshi.ParseError: 83 if BeautifulSoup: 84 # Bad html. Tidy it up using BeautifulSoup 85 html = str(BeautifulSoup(html, "lxml")) 86 try: 87 html = genshi.HTML(html) 88 except Exception: 89 # Failed to sanitize. 90 # We can't do any better than returning the original HTML, without sanitizing. 91 return html 92 else: 93 raise 94 95 stream = html \ 96 | genshi.filters.HTMLSanitizer() \ 97 | genshi.filters.Transformer("//a").attr("rel", get_nofollow) 98 return stream.render() 99 100 101 def json_encode(d, **kw): 102 """Same as json.dumps. 103 """ 104 return json.dumps(d, **kw) 105 106 107 def safesort(iterable, key=None, reverse=False): 108 """Sorts heterogeneous of objects without raising errors. 109 110 Sorting heterogeneous objects sometimes causes error. For example, 111 datetime and Nones don't go well together. This function takes special 112 care to make that work. 113 """ 114 key = key or (lambda x: x) 115 def safekey(x): 116 k = key(x) 117 return (k.__class__.__name__, k) 118 return sorted(iterable, key=safekey, reverse=reverse) 119 120 121 def days_since(then, now=None): 122 delta = then - (now or datetime.now()) 123 return abs(delta.days) 124 125 126 def datestr(then, now=None, lang=None, relative=True): 127 """Internationalized version of web.datestr.""" 128 lang = lang or web.ctx.get('lang') or "en" 129 if relative: 130 if now is None: 131 now = datetime.now() 132 delta = then - now 133 if abs(delta.days) < 4: # Threshold from web.py 134 return babel.dates.format_timedelta(delta, 135 add_direction=True, 136 locale=_get_babel_locale(lang)) 137 return format_date(then, lang=lang) 138 139 140 def datetimestr_utc(then): 141 return then.strftime("%Y-%m-%dT%H:%M:%SZ") 142 143 def format_date(date, lang=None): 144 lang = lang or web.ctx.get('lang') or "en" 145 locale = _get_babel_locale(lang) 146 return babel.dates.format_date(date, format="long", locale=locale) 147 148 def _get_babel_locale(lang): 149 try: 150 return babel.Locale(lang) 151 except babel.core.UnknownLocaleError: 152 return babel.Locale("en") 153 154 155 def sprintf(s, *a, **kw): 156 """Handy utility for string replacements. 157 158 >>> sprintf('hello %s', 'python') 159 'hello python' 160 >>> sprintf('hello %(name)s', name='python') 161 'hello python' 162 """ 163 args = kw or a 164 if args: 165 return s % args 166 else: 167 return s 168 169 170 def cond(pred, true_value, false_value=""): 171 """Lisp style cond function. 172 173 Hanly to use instead of if-else expression. 174 """ 175 if pred: 176 return true_value 177 else: 178 return false_value 179 180 181 def commify(number, lang=None): 182 """localized version of web.commify""" 183 try: 184 lang = lang or web.ctx.get("lang") or "en" 185 return babel.numbers.format_number(int(number), lang) 186 except: 187 return six.text_type(number) 188 189 190 def truncate(text, limit): 191 """Truncate text and add ellipses if it longer than specified limit.""" 192 if not text: 193 return '' 194 if len(text) <= limit: 195 return text 196 return text[:limit] + "..." 197 198 199 def urlsafe(path): 200 """Replaces the unsafe chars from path with underscores. 201 """ 202 return _get_safepath_re().sub('_', path).strip('_')[:100] 203 204 @web.memoize 205 def _get_safepath_re(): 206 """Make regular expression that matches all unsafe chars.""" 207 # unsafe chars according to RFC 2396 208 reserved = ";/?:@&=+$," 209 delims = '<>#%"' 210 unwise = "{}|\\^[]`" 211 space = ' \n\r' 212 213 unsafe = reserved + delims + unwise + space 214 pattern = '[%s]+' % "".join(re.escape(c) for c in unsafe) 215 return re.compile(pattern) 216 217 218 def get_coverstore_url(): 219 """Returns the base url of coverstore by looking at the config.""" 220 return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/') 221 222 223 _texsafe_map = { 224 '"': r'\textquotedbl{}', 225 '#': r'\#', 226 '$': r'\$', 227 '%': r'\%', 228 '&': r'\&', 229 '<': r'\textless{}', 230 '>': r'\textgreater{}', 231 '\\': r'\textbackslash{}', 232 '^': r'\^{}', 233 '_': r'\_{}', 234 '{': r'\{', 235 '}': r'\}', 236 '|': r'\textbar{}', 237 '~': r'\~{}', 238 } 239 240 _texsafe_re = None 241 242 def texsafe(text): 243 """Escapes the special characters in the given text for using it in tex type setting. 244 245 Tex (or Latex) uses some characters in the ascii character range for 246 special notations. These characters must be escaped when occur in the 247 regular text. This function escapes those special characters. 248 249 The list of special characters and the latex command to typeset them can 250 be found in `The Comprehensive LaTeX Symbol List`_. 251 252 .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf 253 """ 254 global _texsafe_re 255 if _texsafe_re is None: 256 pattern = "[%s]" % re.escape("".join(list(_texsafe_map))) 257 _texsafe_re = re.compile(pattern) 258 259 return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text) 260 261 def percentage(value, total): 262 """Computes percentage. 263 264 >>> percentage(1, 10) 265 10.0 266 >>> percentage(0, 0) 267 0.0 268 """ 269 return (value * 100.0) / total if total else 0.0 270 271 def uniq(values, key=None): 272 """Returns the unique entries from the given values in the original order. 273 274 The value of the optional `key` parameter should be a function that takes 275 a single argument and returns a key to test the uniqueness. 276 """ 277 key = key or (lambda x: x) 278 s = set() 279 result = [] 280 for v in values: 281 k = key(v) 282 if k not in s: 283 s.add(k) 284 result.append(v) 285 return result 286 287 def affiliate_id(affiliate): 288 return config.get('affiliate_ids', {}).get(affiliate, '') 289 290 def bookreader_host(): 291 return config.get('bookreader_host', '') 292 293 def private_collections(): 294 """Collections which are lendable but should not be linked from OL 295 TODO: Remove when we can handle institutional books""" 296 return ['georgetown-university-law-library-rr'] 297 298 def private_collection_in(collections): 299 return any(x in private_collections() for x in collections) 300 301 def _get_helpers(): 302 _globals = globals() 303 return web.storage((k, _globals[k]) for k in __all__) 304 305 306 ## This must be at the end of this module 307 helpers = _get_helpers() 308 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py --- a/openlibrary/core/helpers.py +++ b/openlibrary/core/helpers.py @@ -101,7 +101,7 @@ def json_encode(d, **kw): """Same as json.dumps. """ - return json.dumps(d, **kw) + return json.dumps(d or {}, **kw) def safesort(iterable, key=None, reverse=False):
{"golden_diff": "diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py\n--- a/openlibrary/core/helpers.py\n+++ b/openlibrary/core/helpers.py\n@@ -101,7 +101,7 @@\n def json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n- return json.dumps(d, **kw)\n+ return json.dumps(d or {}, **kw)\n \n \n def safesort(iterable, key=None, reverse=False):\n", "issue": "/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template)\nPatron is reporting the following error:\r\n`/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template)`\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\nhttps://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ... https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan\r\n2. Do ... view error.\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: \r\n* Expected: \r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** \r\n- **Browser type/version?** \r\n- **Operating system?** \r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\n@web.memoize\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}], "after_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d or {}, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\n@web.memoize\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}]}
3,639
98
gh_patches_debug_23985
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-6539
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pyi_rth_pkgutil.py hook might not work under MacOS in onefile mode ## Description of the issue I'm using `iter_modules` in our cross-platform OpenSource Project [Specter-Desktop](https://github.com/cryptoadvance/specter-desktop) ([spec-file](https://github.com/cryptoadvance/specter-desktop/blob/master/pyinstaller/specterd.spec)). For that, the [pyi_rth_pkgutil.py-hook](https://github.com/pyinstaller/pyinstaller/blob/v4.8/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py) is quite useful and solved my issue on the linux-platform. However, i have an issue with the MacOS-build at runtime: ``` $ ./dist/specterd server --config=DevelopmentConfig Initializing HWI... [2022-01-24 11:49:07,827] INFO in server: Configuration: cryptoadvance.specter.config.DevelopmentConfig [2022-01-24 11:49:07,828] INFO in specter_migrator: Initiated MigDataManager(/Users/thomasmuller/.specter_dev/migration_data.json events:3 execs:2 ) Traceback (most recent call last): File "specterd.py", line 4, in <module> entry_point() File "click/core.py", line 829, in __call__ File "click/core.py", line 782, in main File "click/core.py", line 1259, in invoke File "click/core.py", line 1066, in invoke File "click/core.py", line 610, in invoke File "cryptoadvance/specter/cli/cli_server.py", line 114, in server File "cryptoadvance/specter/server.py", line 104, in init_app File "cryptoadvance/specter/util/specter_migrator.py", line 125, in execute_migrations File "cryptoadvance/specter/util/specter_migrator.py", line 107, in plan_migration File "cryptoadvance/specter/util/specter_migrator.py", line 161, in get_migration_classes File "PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py", line 59, in _pyi_pkgutil_iter_modules AssertionError [20771] Failed to execute script 'specterd' due to unhandled exception! (.env) thomasmuller @ Thomass-Air.fritz.box ➜ pyinstaller git:(service_swan) ✗ ``` So the relevant code is this: ``` SYS_PREFIX = sys._MEIPASS + os.path.sep SYS_PREFIXLEN = len(SYS_PREFIX) # Only single path is supported, and it must start with sys._MEIPASS. pkg_path = os.path.normpath(path[0]) assert pkg_path.startswith(SYS_PREFIX) ``` So i inserted some debugging code and figured out that: ``` SYS_PREFIX: /var/folders/bd/k__jfmz112sb1_dyz4rxwfzh0000gn/T/ pkg_path: /private/var/folders/bd/k__jfmz112sb1_dyz4rxwfzh0000gn/T/_MEIBqFxGW/cryptoadvance/specter/util/migrations ``` So in combination with the comment above, it seems that MacOS is using "multiple path" and so MacOS is not supported here? On the other hand, this [link](https://difyel.com/apple/macos/macos-directory-structure/#Other_directories) is claiming that `private/var` is only a symlink to `/var`. So i added this and this seem to have solved it? ``` if pkg_path.startswith("/private"): pkg_path = pkg_path[8:] assert pkg_path.startswith(SYS_PREFIX) ``` Is this a valid fix? I might make a PR if someone can give me some guidance on this. ### Context information (for bug reports) * Output of `pyinstaller --version`: ```4.8``` * Version of Python: <!-- e.g. 3.9.7 --> * Platform: OS X * How you installed Python: brew * Did you also try this on another platform? Does it work there? --> Yes, works on Linux --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2021, PyInstaller Development Team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: Apache-2.0 10 #----------------------------------------------------------------------------- 11 # 12 # This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list 13 # sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive 14 # build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder). 15 # 16 # The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to 17 # FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while 18 # PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter 19 # into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension 20 # modules) in addition to PYZ-embedded ones. 21 # 22 # Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of 23 # original implementation with contents of PYZ archive from FrozenImporter's TOC. 24 25 import os 26 import pkgutil 27 import sys 28 29 from pyimod03_importers import FrozenImporter 30 31 _orig_pkgutil_iter_modules = pkgutil.iter_modules 32 33 34 def _pyi_pkgutil_iter_modules(path=None, prefix=''): 35 # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary 36 # extensions and compiled pyc modules in noarchive debug builds). 37 yield from _orig_pkgutil_iter_modules(path, prefix) 38 39 # Find the instance of PyInstaller's FrozenImporter. 40 for importer in pkgutil.iter_importers(): 41 if isinstance(importer, FrozenImporter): 42 break 43 else: 44 return 45 46 if path is None: 47 # Search for all top-level packages/modules. These will have no dots in their entry names. 48 for entry in importer.toc: 49 if entry.count('.') != 0: 50 continue 51 is_pkg = importer.is_package(entry) 52 yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg) 53 else: 54 # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook. 55 SYS_PREFIX = sys._MEIPASS + os.path.sep 56 SYS_PREFIXLEN = len(SYS_PREFIX) 57 58 for pkg_path in path: 59 pkg_path = os.path.normpath(pkg_path) 60 if not pkg_path.startswith(SYS_PREFIX): 61 # if the path does not start with sys._MEIPASS then it cannot be a bundled package. 62 continue 63 # Construct package prefix from path... 64 pkg_prefix = pkg_path[SYS_PREFIXLEN:] 65 pkg_prefix = pkg_prefix.replace(os.path.sep, '.') 66 # ... and ensure it ends with a dot (so we can directly filter out the package itself). 67 if not pkg_prefix.endswith('.'): 68 pkg_prefix += '.' 69 pkg_prefix_len = len(pkg_prefix) 70 71 for entry in importer.toc: 72 if not entry.startswith(pkg_prefix): 73 continue 74 name = entry[pkg_prefix_len:] 75 if name.count('.') != 0: 76 continue 77 is_pkg = importer.is_package(entry) 78 yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg) 79 80 81 pkgutil.iter_modules = _pyi_pkgutil_iter_modules 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py --- a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py +++ b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py @@ -52,11 +52,16 @@ yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg) else: # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook. - SYS_PREFIX = sys._MEIPASS + os.path.sep + # + # Use os.path.realpath() to fully resolve any symbolic links in sys._MEIPASS, in order to avoid path mis-matches + # when the given search paths also contain symbolic links and are already fully resolved. See #6537 for an + # example of such a problem with onefile build on macOS, where the temporary directory is placed under /var, + # which is actually a symbolic link to /private/var. + SYS_PREFIX = os.path.realpath(sys._MEIPASS) + os.path.sep SYS_PREFIXLEN = len(SYS_PREFIX) for pkg_path in path: - pkg_path = os.path.normpath(pkg_path) + pkg_path = os.path.realpath(pkg_path) # Fully resolve the given path, in case it contains symbolic links. if not pkg_path.startswith(SYS_PREFIX): # if the path does not start with sys._MEIPASS then it cannot be a bundled package. continue
{"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n@@ -52,11 +52,16 @@\n yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)\n else:\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n- SYS_PREFIX = sys._MEIPASS + os.path.sep\n+ #\n+ # Use os.path.realpath() to fully resolve any symbolic links in sys._MEIPASS, in order to avoid path mis-matches\n+ # when the given search paths also contain symbolic links and are already fully resolved. See #6537 for an\n+ # example of such a problem with onefile build on macOS, where the temporary directory is placed under /var,\n+ # which is actually a symbolic link to /private/var.\n+ SYS_PREFIX = os.path.realpath(sys._MEIPASS) + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n \n for pkg_path in path:\n- pkg_path = os.path.normpath(pkg_path)\n+ pkg_path = os.path.realpath(pkg_path) # Fully resolve the given path, in case it contains symbolic links.\n if not pkg_path.startswith(SYS_PREFIX):\n # if the path does not start with sys._MEIPASS then it cannot be a bundled package.\n continue\n", "issue": "pyi_rth_pkgutil.py hook might not work under MacOS in onefile mode\n## Description of the issue\r\nI'm using `iter_modules` in our cross-platform OpenSource Project [Specter-Desktop](https://github.com/cryptoadvance/specter-desktop) ([spec-file](https://github.com/cryptoadvance/specter-desktop/blob/master/pyinstaller/specterd.spec)).\r\n\r\nFor that, the [pyi_rth_pkgutil.py-hook](https://github.com/pyinstaller/pyinstaller/blob/v4.8/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py) is quite useful and solved my issue on the linux-platform. However, i have an issue with the MacOS-build at runtime:\r\n```\r\n$ ./dist/specterd server --config=DevelopmentConfig\r\nInitializing HWI...\r\n[2022-01-24 11:49:07,827] INFO in server: Configuration: cryptoadvance.specter.config.DevelopmentConfig\r\n[2022-01-24 11:49:07,828] INFO in specter_migrator: Initiated MigDataManager(/Users/thomasmuller/.specter_dev/migration_data.json events:3 execs:2 )\r\nTraceback (most recent call last):\r\n File \"specterd.py\", line 4, in <module>\r\n entry_point()\r\n File \"click/core.py\", line 829, in __call__\r\n File \"click/core.py\", line 782, in main\r\n File \"click/core.py\", line 1259, in invoke\r\n File \"click/core.py\", line 1066, in invoke\r\n File \"click/core.py\", line 610, in invoke\r\n File \"cryptoadvance/specter/cli/cli_server.py\", line 114, in server\r\n File \"cryptoadvance/specter/server.py\", line 104, in init_app\r\n File \"cryptoadvance/specter/util/specter_migrator.py\", line 125, in execute_migrations\r\n File \"cryptoadvance/specter/util/specter_migrator.py\", line 107, in plan_migration\r\n File \"cryptoadvance/specter/util/specter_migrator.py\", line 161, in get_migration_classes\r\n File \"PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\", line 59, in _pyi_pkgutil_iter_modules\r\nAssertionError\r\n[20771] Failed to execute script 'specterd' due to unhandled exception!\r\n(.env) thomasmuller @ Thomass-Air.fritz.box \u279c pyinstaller git:(service_swan) \u2717\r\n```\r\n\r\nSo the relevant code is this:\r\n```\r\n SYS_PREFIX = sys._MEIPASS + os.path.sep\r\n SYS_PREFIXLEN = len(SYS_PREFIX)\r\n # Only single path is supported, and it must start with sys._MEIPASS.\r\n pkg_path = os.path.normpath(path[0])\r\n assert pkg_path.startswith(SYS_PREFIX)\r\n```\r\nSo i inserted some debugging code and figured out that:\r\n\r\n```\r\nSYS_PREFIX: /var/folders/bd/k__jfmz112sb1_dyz4rxwfzh0000gn/T/\r\npkg_path: /private/var/folders/bd/k__jfmz112sb1_dyz4rxwfzh0000gn/T/_MEIBqFxGW/cryptoadvance/specter/util/migrations\r\n```\r\nSo in combination with the comment above, it seems that MacOS is using \"multiple path\" and so MacOS is not supported here?\r\nOn the other hand, this [link](https://difyel.com/apple/macos/macos-directory-structure/#Other_directories) is claiming that `private/var` is only a symlink to `/var`. So i added this and this seem to have solved it?\r\n```\r\n if pkg_path.startswith(\"/private\"):\r\n pkg_path = pkg_path[8:]\r\n assert pkg_path.startswith(SYS_PREFIX)\r\n```\r\nIs this a valid fix? I might make a PR if someone can give me some guidance on this.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.8```\r\n* Version of Python: <!-- e.g. 3.9.7 -->\r\n* Platform: OS X\r\n* How you installed Python: brew\r\n* Did you also try this on another platform? Does it work there? --> Yes, works on Linux\r\n\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n#\n# This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list\n# sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive\n# build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder).\n#\n# The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to\n# FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while\n# PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter\n# into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension\n# modules) in addition to PYZ-embedded ones.\n#\n# Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of\n# original implementation with contents of PYZ archive from FrozenImporter's TOC.\n\nimport os\nimport pkgutil\nimport sys\n\nfrom pyimod03_importers import FrozenImporter\n\n_orig_pkgutil_iter_modules = pkgutil.iter_modules\n\n\ndef _pyi_pkgutil_iter_modules(path=None, prefix=''):\n # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary\n # extensions and compiled pyc modules in noarchive debug builds).\n yield from _orig_pkgutil_iter_modules(path, prefix)\n\n # Find the instance of PyInstaller's FrozenImporter.\n for importer in pkgutil.iter_importers():\n if isinstance(importer, FrozenImporter):\n break\n else:\n return\n\n if path is None:\n # Search for all top-level packages/modules. These will have no dots in their entry names.\n for entry in importer.toc:\n if entry.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)\n else:\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n SYS_PREFIX = sys._MEIPASS + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n\n for pkg_path in path:\n pkg_path = os.path.normpath(pkg_path)\n if not pkg_path.startswith(SYS_PREFIX):\n # if the path does not start with sys._MEIPASS then it cannot be a bundled package.\n continue\n # Construct package prefix from path...\n pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n if not pkg_prefix.endswith('.'):\n pkg_prefix += '.'\n pkg_prefix_len = len(pkg_prefix)\n\n for entry in importer.toc:\n if not entry.startswith(pkg_prefix):\n continue\n name = entry[pkg_prefix_len:]\n if name.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n\n\npkgutil.iter_modules = _pyi_pkgutil_iter_modules\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n#\n# This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list\n# sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive\n# build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder).\n#\n# The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to\n# FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while\n# PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter\n# into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension\n# modules) in addition to PYZ-embedded ones.\n#\n# Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of\n# original implementation with contents of PYZ archive from FrozenImporter's TOC.\n\nimport os\nimport pkgutil\nimport sys\n\nfrom pyimod03_importers import FrozenImporter\n\n_orig_pkgutil_iter_modules = pkgutil.iter_modules\n\n\ndef _pyi_pkgutil_iter_modules(path=None, prefix=''):\n # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary\n # extensions and compiled pyc modules in noarchive debug builds).\n yield from _orig_pkgutil_iter_modules(path, prefix)\n\n # Find the instance of PyInstaller's FrozenImporter.\n for importer in pkgutil.iter_importers():\n if isinstance(importer, FrozenImporter):\n break\n else:\n return\n\n if path is None:\n # Search for all top-level packages/modules. These will have no dots in their entry names.\n for entry in importer.toc:\n if entry.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)\n else:\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n #\n # Use os.path.realpath() to fully resolve any symbolic links in sys._MEIPASS, in order to avoid path mis-matches\n # when the given search paths also contain symbolic links and are already fully resolved. See #6537 for an\n # example of such a problem with onefile build on macOS, where the temporary directory is placed under /var,\n # which is actually a symbolic link to /private/var.\n SYS_PREFIX = os.path.realpath(sys._MEIPASS) + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n\n for pkg_path in path:\n pkg_path = os.path.realpath(pkg_path) # Fully resolve the given path, in case it contains symbolic links.\n if not pkg_path.startswith(SYS_PREFIX):\n # if the path does not start with sys._MEIPASS then it cannot be a bundled package.\n continue\n # Construct package prefix from path...\n pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n if not pkg_prefix.endswith('.'):\n pkg_prefix += '.'\n pkg_prefix_len = len(pkg_prefix)\n\n for entry in importer.toc:\n if not entry.startswith(pkg_prefix):\n continue\n name = entry[pkg_prefix_len:]\n if name.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n\n\npkgutil.iter_modules = _pyi_pkgutil_iter_modules\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py"}]}
2,191
349
gh_patches_debug_26224
rasdani/github-patches
git_diff
mirumee__ariadne-24
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `add_resolve_functions_to_schema` should support Scalars parse_value and parse_literal Currently Ariadne Scalar support is limited to serializing python types to JSON before returning them to client, but we also want to support using custom scalars for input. Our `add_resolve_functions_to_scalar` utility could support following use-cases: Code below results in one-way only scalar: - `type_defs = {'Scalar': {'serialize': callable}}` And this code results in two-way scalar: - `type_defs = {'Scalar': {'serialize': callable, 'parse_value': callable, 'parse_literal': callable}}` - explicit syntax for two-directional scalar. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ariadne/resolvers.py` Content: ``` 1 from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema 2 from graphql.execution.base import ResolveInfo 3 4 5 def resolve_parent_field(parent, name: str): 6 if isinstance(parent, dict): 7 return parent.get(name) 8 return getattr(parent, name, None) 9 10 11 def default_resolver(parent, info: ResolveInfo): 12 return resolve_parent_field(parent, info.field_name) 13 14 15 def resolve_to(name: str): 16 def resolver(parent, *_): 17 return resolve_parent_field(parent, name) 18 19 return resolver 20 21 22 def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict): 23 for type_name, type_object in schema.get_type_map().items(): 24 if isinstance(type_object, GraphQLObjectType): 25 add_resolve_functions_to_object(type_name, type_object, resolvers) 26 if isinstance(type_object, GraphQLScalarType): 27 add_resolve_function_to_scalar(type_name, type_object, resolvers) 28 29 30 def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict): 31 type_resolver = resolvers.get(name, {}) 32 for field_name, field_object in obj.fields.items(): 33 field_resolver = type_resolver.get(field_name, default_resolver) 34 field_object.resolver = field_resolver 35 36 37 def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict): 38 serializer = resolvers.get(name, obj.serialize) 39 obj.serialize = serializer 40 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py --- a/ariadne/resolvers.py +++ b/ariadne/resolvers.py @@ -24,16 +24,24 @@ if isinstance(type_object, GraphQLObjectType): add_resolve_functions_to_object(type_name, type_object, resolvers) if isinstance(type_object, GraphQLScalarType): - add_resolve_function_to_scalar(type_name, type_object, resolvers) + add_resolve_functions_to_scalar(type_name, type_object, resolvers) def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict): - type_resolver = resolvers.get(name, {}) + type_resolvers = resolvers.get(name, {}) for field_name, field_object in obj.fields.items(): - field_resolver = type_resolver.get(field_name, default_resolver) + field_resolver = type_resolvers.get(field_name, default_resolver) field_object.resolver = field_resolver -def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict): - serializer = resolvers.get(name, obj.serialize) - obj.serialize = serializer +def add_resolve_functions_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict): + scalar_resolvers = resolvers.get(name, {}) + + serialize = scalar_resolvers.get("serialize", obj.serialize) + obj.serialize = serialize + + parse_literal = scalar_resolvers.get("parse_literal", obj.parse_literal) + obj.parse_literal = parse_literal + + parse_value = scalar_resolvers.get("parse_value", obj.parse_value) + obj.parse_value = parse_value
{"golden_diff": "diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -24,16 +24,24 @@\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n- add_resolve_function_to_scalar(type_name, type_object, resolvers)\n+ add_resolve_functions_to_scalar(type_name, type_object, resolvers)\n \n \n def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n- type_resolver = resolvers.get(name, {})\n+ type_resolvers = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n- field_resolver = type_resolver.get(field_name, default_resolver)\n+ field_resolver = type_resolvers.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n \n \n-def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n- serializer = resolvers.get(name, obj.serialize)\n- obj.serialize = serializer\n+def add_resolve_functions_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n+ scalar_resolvers = resolvers.get(name, {})\n+\n+ serialize = scalar_resolvers.get(\"serialize\", obj.serialize)\n+ obj.serialize = serialize\n+\n+ parse_literal = scalar_resolvers.get(\"parse_literal\", obj.parse_literal)\n+ obj.parse_literal = parse_literal\n+\n+ parse_value = scalar_resolvers.get(\"parse_value\", obj.parse_value)\n+ obj.parse_value = parse_value\n", "issue": "`add_resolve_functions_to_schema` should support Scalars parse_value and parse_literal\nCurrently Ariadne Scalar support is limited to serializing python types to JSON before returning them to client, but we also want to support using custom scalars for input.\r\n\r\nOur `add_resolve_functions_to_scalar` utility could support following use-cases:\r\n\r\nCode below results in one-way only scalar:\r\n\r\n- `type_defs = {'Scalar': {'serialize': callable}}`\r\n\r\nAnd this code results in two-way scalar:\r\n\r\n- `type_defs = {'Scalar': {'serialize': callable, 'parse_value': callable, 'parse_literal': callable}}` - explicit syntax for two-directional scalar.\r\n\n", "before_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n return parent.get(name)\n return getattr(parent, name, None)\n\n\ndef default_resolver(parent, info: ResolveInfo):\n return resolve_parent_field(parent, info.field_name)\n\n\ndef resolve_to(name: str):\n def resolver(parent, *_):\n return resolve_parent_field(parent, name)\n\n return resolver\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}], "after_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n return parent.get(name)\n return getattr(parent, name, None)\n\n\ndef default_resolver(parent, info: ResolveInfo):\n return resolve_parent_field(parent, info.field_name)\n\n\ndef resolve_to(name: str):\n def resolver(parent, *_):\n return resolve_parent_field(parent, name)\n\n return resolver\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_functions_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolvers = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolvers.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_functions_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n scalar_resolvers = resolvers.get(name, {})\n\n serialize = scalar_resolvers.get(\"serialize\", obj.serialize)\n obj.serialize = serialize\n\n parse_literal = scalar_resolvers.get(\"parse_literal\", obj.parse_literal)\n obj.parse_literal = parse_literal\n\n parse_value = scalar_resolvers.get(\"parse_value\", obj.parse_value)\n obj.parse_value = parse_value\n", "path": "ariadne/resolvers.py"}]}
762
363
gh_patches_debug_25909
rasdani/github-patches
git_diff
pypi__warehouse-13144
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OIDC: Create an audience route Creating this as a reminder for myself. The OIDC minting routes should include an `/_/oidc/audience` endpoint that just returns the OIDC audience expected by any particular Warehouse instance. xref #12465 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/oidc/views.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import time 14 15 from pydantic import BaseModel, StrictStr, ValidationError 16 from pyramid.view import view_config 17 from sqlalchemy import func 18 19 from warehouse.admin.flags import AdminFlagValue 20 from warehouse.email import send_pending_oidc_publisher_invalidated_email 21 from warehouse.events.tags import EventTag 22 from warehouse.macaroons import caveats 23 from warehouse.macaroons.interfaces import IMacaroonService 24 from warehouse.oidc.interfaces import IOIDCPublisherService 25 from warehouse.oidc.models import PendingOIDCPublisher 26 from warehouse.packaging.interfaces import IProjectService 27 from warehouse.packaging.models import ProjectFactory 28 from warehouse.rate_limiting.interfaces import IRateLimiter 29 30 31 class TokenPayload(BaseModel): 32 token: StrictStr 33 34 35 def _ratelimiters(request): 36 return { 37 "user.oidc": request.find_service( 38 IRateLimiter, name="user_oidc.publisher.register" 39 ), 40 "ip.oidc": request.find_service( 41 IRateLimiter, name="ip_oidc.publisher.register" 42 ), 43 } 44 45 46 @view_config( 47 route_name="oidc.mint_token", 48 require_methods=["POST"], 49 renderer="json", 50 require_csrf=False, 51 has_translations=False, 52 ) 53 def mint_token_from_oidc(request): 54 def _invalid(errors): 55 request.response.status = 422 56 return {"message": "Token request failed", "errors": errors} 57 58 oidc_enabled = request.registry.settings[ 59 "warehouse.oidc.enabled" 60 ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC) 61 if not oidc_enabled: 62 return _invalid( 63 errors=[ 64 { 65 "code": "not-enabled", 66 "description": "OIDC functionality not enabled", 67 } 68 ] 69 ) 70 71 try: 72 payload = TokenPayload.parse_raw(request.body) 73 unverified_jwt = payload.token 74 except ValidationError as exc: 75 return _invalid(errors=[{"code": "invalid-payload", "description": str(exc)}]) 76 77 # For the time being, GitHub is our only OIDC publisher. 78 # In the future, this should locate the correct service based on an 79 # identifier in the request body. 80 oidc_service = request.find_service(IOIDCPublisherService, name="github") 81 claims = oidc_service.verify_jwt_signature(unverified_jwt) 82 if not claims: 83 return _invalid( 84 errors=[ 85 {"code": "invalid-token", "description": "malformed or invalid token"} 86 ] 87 ) 88 89 # First, try to find a pending publisher. 90 pending_publisher = oidc_service.find_publisher(claims, pending=True) 91 if pending_publisher is not None: 92 factory = ProjectFactory(request) 93 94 # If the project already exists, this pending publisher is no longer 95 # valid and needs to be removed. 96 # NOTE: This is mostly a sanity check, since we dispose of invalidated 97 # pending publishers below. 98 if pending_publisher.project_name in factory: 99 request.db.delete(pending_publisher) 100 return _invalid( 101 errors=[ 102 { 103 "code": "invalid-pending-publisher", 104 "description": "valid token, but project already exists", 105 } 106 ] 107 ) 108 109 # Create the new project, and reify the pending publisher against it. 110 project_service = request.find_service(IProjectService) 111 new_project = project_service.create_project( 112 pending_publisher.project_name, 113 pending_publisher.added_by, 114 ratelimited=False, 115 ) 116 oidc_service.reify_pending_publisher(pending_publisher, new_project) 117 118 # Successfully converting a pending publisher into a normal publisher 119 # is a positive signal, so we reset the associated ratelimits. 120 ratelimiters = _ratelimiters(request) 121 ratelimiters["user.oidc"].clear(pending_publisher.added_by.id) 122 ratelimiters["ip.oidc"].clear(request.remote_addr) 123 124 # There might be other pending publishers for the same project name, 125 # which we've now invalidated by creating the project. These would 126 # be disposed of on use, but we explicitly dispose of them here while 127 # also sending emails to their owners. 128 stale_pending_publishers = ( 129 request.db.query(PendingOIDCPublisher) 130 .filter( 131 func.normalize_pep426_name(PendingOIDCPublisher.project_name) 132 == func.normalize_pep426_name(pending_publisher.project_name) 133 ) 134 .all() 135 ) 136 for stale_publisher in stale_pending_publishers: 137 send_pending_oidc_publisher_invalidated_email( 138 request, 139 stale_publisher.added_by, 140 project_name=stale_publisher.project_name, 141 ) 142 request.db.delete(stale_publisher) 143 144 # We either don't have a pending OIDC publisher, or we *did* 145 # have one and we've just converted it. Either way, look for a full publisher 146 # to actually do the macaroon minting with. 147 publisher = oidc_service.find_publisher(claims, pending=False) 148 if not publisher: 149 return _invalid( 150 errors=[ 151 { 152 "code": "invalid-publisher", 153 "description": "valid token, but no corresponding publisher", 154 } 155 ] 156 ) 157 158 # At this point, we've verified that the given JWT is valid for the given 159 # project. All we need to do is mint a new token. 160 # NOTE: For OIDC-minted API tokens, the Macaroon's description string 161 # is purely an implementation detail and is not displayed to the user. 162 macaroon_service = request.find_service(IMacaroonService, context=None) 163 not_before = int(time.time()) 164 expires_at = not_before + 900 165 serialized, dm = macaroon_service.create_macaroon( 166 request.domain, 167 f"OpenID token: {publisher.publisher_url} ({not_before})", 168 [ 169 caveats.OIDCPublisher(oidc_publisher_id=str(publisher.id)), 170 caveats.ProjectID(project_ids=[str(p.id) for p in publisher.projects]), 171 caveats.Expiration(expires_at=expires_at, not_before=not_before), 172 ], 173 oidc_publisher_id=publisher.id, 174 ) 175 for project in publisher.projects: 176 project.record_event( 177 tag=EventTag.Project.ShortLivedAPITokenAdded, 178 ip_address=request.remote_addr, 179 additional={ 180 "expires": expires_at, 181 "publisher_name": publisher.publisher_name, 182 "publisher_url": publisher.publisher_url, 183 }, 184 ) 185 return {"success": True, "token": serialized} 186 ``` Path: `warehouse/oidc/__init__.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 from warehouse.oidc.interfaces import IOIDCPublisherService 14 from warehouse.oidc.services import OIDCPublisherServiceFactory 15 from warehouse.oidc.utils import GITHUB_OIDC_ISSUER_URL 16 17 18 def includeme(config): 19 oidc_publisher_service_class = config.maybe_dotted( 20 config.registry.settings["oidc.backend"] 21 ) 22 23 config.register_service_factory( 24 OIDCPublisherServiceFactory( 25 publisher="github", 26 issuer_url=GITHUB_OIDC_ISSUER_URL, 27 service_class=oidc_publisher_service_class, 28 ), 29 IOIDCPublisherService, 30 name="github", 31 ) 32 33 # During deployments, we separate auth routes into their own subdomain 34 # to simplify caching exclusion. 35 auth = config.get_settings().get("auth.domain") 36 37 config.add_route("oidc.mint_token", "/_/oidc/github/mint-token", domain=auth) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/oidc/__init__.py b/warehouse/oidc/__init__.py --- a/warehouse/oidc/__init__.py +++ b/warehouse/oidc/__init__.py @@ -34,4 +34,5 @@ # to simplify caching exclusion. auth = config.get_settings().get("auth.domain") - config.add_route("oidc.mint_token", "/_/oidc/github/mint-token", domain=auth) + config.add_route("oidc.audience", "/_/oidc/audience", domain=auth) + config.add_route("oidc.github.mint_token", "/_/oidc/github/mint-token", domain=auth) diff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py --- a/warehouse/oidc/views.py +++ b/warehouse/oidc/views.py @@ -13,6 +13,7 @@ import time from pydantic import BaseModel, StrictStr, ValidationError +from pyramid.response import Response from pyramid.view import view_config from sqlalchemy import func @@ -44,7 +45,26 @@ @view_config( - route_name="oidc.mint_token", + route_name="oidc.audience", + require_methods=["GET"], + renderer="json", + require_csrf=False, + has_translations=False, +) +def oidc_audience(request): + oidc_enabled = request.registry.settings[ + "warehouse.oidc.enabled" + ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC) + + if not oidc_enabled: + return Response(status=403, json={"message": "OIDC functionality not enabled"}) + + audience = request.registry.settings["warehouse.oidc.audience"] + return {"audience": audience} + + +@view_config( + route_name="oidc.github.mint_token", require_methods=["POST"], renderer="json", require_csrf=False,
{"golden_diff": "diff --git a/warehouse/oidc/__init__.py b/warehouse/oidc/__init__.py\n--- a/warehouse/oidc/__init__.py\n+++ b/warehouse/oidc/__init__.py\n@@ -34,4 +34,5 @@\n # to simplify caching exclusion.\n auth = config.get_settings().get(\"auth.domain\")\n \n- config.add_route(\"oidc.mint_token\", \"/_/oidc/github/mint-token\", domain=auth)\n+ config.add_route(\"oidc.audience\", \"/_/oidc/audience\", domain=auth)\n+ config.add_route(\"oidc.github.mint_token\", \"/_/oidc/github/mint-token\", domain=auth)\ndiff --git a/warehouse/oidc/views.py b/warehouse/oidc/views.py\n--- a/warehouse/oidc/views.py\n+++ b/warehouse/oidc/views.py\n@@ -13,6 +13,7 @@\n import time\n \n from pydantic import BaseModel, StrictStr, ValidationError\n+from pyramid.response import Response\n from pyramid.view import view_config\n from sqlalchemy import func\n \n@@ -44,7 +45,26 @@\n \n \n @view_config(\n- route_name=\"oidc.mint_token\",\n+ route_name=\"oidc.audience\",\n+ require_methods=[\"GET\"],\n+ renderer=\"json\",\n+ require_csrf=False,\n+ has_translations=False,\n+)\n+def oidc_audience(request):\n+ oidc_enabled = request.registry.settings[\n+ \"warehouse.oidc.enabled\"\n+ ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC)\n+\n+ if not oidc_enabled:\n+ return Response(status=403, json={\"message\": \"OIDC functionality not enabled\"})\n+\n+ audience = request.registry.settings[\"warehouse.oidc.audience\"]\n+ return {\"audience\": audience}\n+\n+\n+@view_config(\n+ route_name=\"oidc.github.mint_token\",\n require_methods=[\"POST\"],\n renderer=\"json\",\n require_csrf=False,\n", "issue": "OIDC: Create an audience route\nCreating this as a reminder for myself.\r\n\r\nThe OIDC minting routes should include an `/_/oidc/audience` endpoint that just returns the OIDC audience expected by any particular Warehouse instance.\r\n\r\nxref #12465 \n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nfrom pydantic import BaseModel, StrictStr, ValidationError\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\n\nfrom warehouse.admin.flags import AdminFlagValue\nfrom warehouse.email import send_pending_oidc_publisher_invalidated_email\nfrom warehouse.events.tags import EventTag\nfrom warehouse.macaroons import caveats\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.oidc.interfaces import IOIDCPublisherService\nfrom warehouse.oidc.models import PendingOIDCPublisher\nfrom warehouse.packaging.interfaces import IProjectService\nfrom warehouse.packaging.models import ProjectFactory\nfrom warehouse.rate_limiting.interfaces import IRateLimiter\n\n\nclass TokenPayload(BaseModel):\n token: StrictStr\n\n\ndef _ratelimiters(request):\n return {\n \"user.oidc\": request.find_service(\n IRateLimiter, name=\"user_oidc.publisher.register\"\n ),\n \"ip.oidc\": request.find_service(\n IRateLimiter, name=\"ip_oidc.publisher.register\"\n ),\n }\n\n\n@view_config(\n route_name=\"oidc.mint_token\",\n require_methods=[\"POST\"],\n renderer=\"json\",\n require_csrf=False,\n has_translations=False,\n)\ndef mint_token_from_oidc(request):\n def _invalid(errors):\n request.response.status = 422\n return {\"message\": \"Token request failed\", \"errors\": errors}\n\n oidc_enabled = request.registry.settings[\n \"warehouse.oidc.enabled\"\n ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC)\n if not oidc_enabled:\n return _invalid(\n errors=[\n {\n \"code\": \"not-enabled\",\n \"description\": \"OIDC functionality not enabled\",\n }\n ]\n )\n\n try:\n payload = TokenPayload.parse_raw(request.body)\n unverified_jwt = payload.token\n except ValidationError as exc:\n return _invalid(errors=[{\"code\": \"invalid-payload\", \"description\": str(exc)}])\n\n # For the time being, GitHub is our only OIDC publisher.\n # In the future, this should locate the correct service based on an\n # identifier in the request body.\n oidc_service = request.find_service(IOIDCPublisherService, name=\"github\")\n claims = oidc_service.verify_jwt_signature(unverified_jwt)\n if not claims:\n return _invalid(\n errors=[\n {\"code\": \"invalid-token\", \"description\": \"malformed or invalid token\"}\n ]\n )\n\n # First, try to find a pending publisher.\n pending_publisher = oidc_service.find_publisher(claims, pending=True)\n if pending_publisher is not None:\n factory = ProjectFactory(request)\n\n # If the project already exists, this pending publisher is no longer\n # valid and needs to be removed.\n # NOTE: This is mostly a sanity check, since we dispose of invalidated\n # pending publishers below.\n if pending_publisher.project_name in factory:\n request.db.delete(pending_publisher)\n return _invalid(\n errors=[\n {\n \"code\": \"invalid-pending-publisher\",\n \"description\": \"valid token, but project already exists\",\n }\n ]\n )\n\n # Create the new project, and reify the pending publisher against it.\n project_service = request.find_service(IProjectService)\n new_project = project_service.create_project(\n pending_publisher.project_name,\n pending_publisher.added_by,\n ratelimited=False,\n )\n oidc_service.reify_pending_publisher(pending_publisher, new_project)\n\n # Successfully converting a pending publisher into a normal publisher\n # is a positive signal, so we reset the associated ratelimits.\n ratelimiters = _ratelimiters(request)\n ratelimiters[\"user.oidc\"].clear(pending_publisher.added_by.id)\n ratelimiters[\"ip.oidc\"].clear(request.remote_addr)\n\n # There might be other pending publishers for the same project name,\n # which we've now invalidated by creating the project. These would\n # be disposed of on use, but we explicitly dispose of them here while\n # also sending emails to their owners.\n stale_pending_publishers = (\n request.db.query(PendingOIDCPublisher)\n .filter(\n func.normalize_pep426_name(PendingOIDCPublisher.project_name)\n == func.normalize_pep426_name(pending_publisher.project_name)\n )\n .all()\n )\n for stale_publisher in stale_pending_publishers:\n send_pending_oidc_publisher_invalidated_email(\n request,\n stale_publisher.added_by,\n project_name=stale_publisher.project_name,\n )\n request.db.delete(stale_publisher)\n\n # We either don't have a pending OIDC publisher, or we *did*\n # have one and we've just converted it. Either way, look for a full publisher\n # to actually do the macaroon minting with.\n publisher = oidc_service.find_publisher(claims, pending=False)\n if not publisher:\n return _invalid(\n errors=[\n {\n \"code\": \"invalid-publisher\",\n \"description\": \"valid token, but no corresponding publisher\",\n }\n ]\n )\n\n # At this point, we've verified that the given JWT is valid for the given\n # project. All we need to do is mint a new token.\n # NOTE: For OIDC-minted API tokens, the Macaroon's description string\n # is purely an implementation detail and is not displayed to the user.\n macaroon_service = request.find_service(IMacaroonService, context=None)\n not_before = int(time.time())\n expires_at = not_before + 900\n serialized, dm = macaroon_service.create_macaroon(\n request.domain,\n f\"OpenID token: {publisher.publisher_url} ({not_before})\",\n [\n caveats.OIDCPublisher(oidc_publisher_id=str(publisher.id)),\n caveats.ProjectID(project_ids=[str(p.id) for p in publisher.projects]),\n caveats.Expiration(expires_at=expires_at, not_before=not_before),\n ],\n oidc_publisher_id=publisher.id,\n )\n for project in publisher.projects:\n project.record_event(\n tag=EventTag.Project.ShortLivedAPITokenAdded,\n ip_address=request.remote_addr,\n additional={\n \"expires\": expires_at,\n \"publisher_name\": publisher.publisher_name,\n \"publisher_url\": publisher.publisher_url,\n },\n )\n return {\"success\": True, \"token\": serialized}\n", "path": "warehouse/oidc/views.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom warehouse.oidc.interfaces import IOIDCPublisherService\nfrom warehouse.oidc.services import OIDCPublisherServiceFactory\nfrom warehouse.oidc.utils import GITHUB_OIDC_ISSUER_URL\n\n\ndef includeme(config):\n oidc_publisher_service_class = config.maybe_dotted(\n config.registry.settings[\"oidc.backend\"]\n )\n\n config.register_service_factory(\n OIDCPublisherServiceFactory(\n publisher=\"github\",\n issuer_url=GITHUB_OIDC_ISSUER_URL,\n service_class=oidc_publisher_service_class,\n ),\n IOIDCPublisherService,\n name=\"github\",\n )\n\n # During deployments, we separate auth routes into their own subdomain\n # to simplify caching exclusion.\n auth = config.get_settings().get(\"auth.domain\")\n\n config.add_route(\"oidc.mint_token\", \"/_/oidc/github/mint-token\", domain=auth)\n", "path": "warehouse/oidc/__init__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nfrom pydantic import BaseModel, StrictStr, ValidationError\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\n\nfrom warehouse.admin.flags import AdminFlagValue\nfrom warehouse.email import send_pending_oidc_publisher_invalidated_email\nfrom warehouse.events.tags import EventTag\nfrom warehouse.macaroons import caveats\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.oidc.interfaces import IOIDCPublisherService\nfrom warehouse.oidc.models import PendingOIDCPublisher\nfrom warehouse.packaging.interfaces import IProjectService\nfrom warehouse.packaging.models import ProjectFactory\nfrom warehouse.rate_limiting.interfaces import IRateLimiter\n\n\nclass TokenPayload(BaseModel):\n token: StrictStr\n\n\ndef _ratelimiters(request):\n return {\n \"user.oidc\": request.find_service(\n IRateLimiter, name=\"user_oidc.publisher.register\"\n ),\n \"ip.oidc\": request.find_service(\n IRateLimiter, name=\"ip_oidc.publisher.register\"\n ),\n }\n\n\n@view_config(\n route_name=\"oidc.audience\",\n require_methods=[\"GET\"],\n renderer=\"json\",\n require_csrf=False,\n has_translations=False,\n)\ndef oidc_audience(request):\n oidc_enabled = request.registry.settings[\n \"warehouse.oidc.enabled\"\n ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC)\n\n if not oidc_enabled:\n return Response(status=403, json={\"message\": \"OIDC functionality not enabled\"})\n\n audience = request.registry.settings[\"warehouse.oidc.audience\"]\n return {\"audience\": audience}\n\n\n@view_config(\n route_name=\"oidc.github.mint_token\",\n require_methods=[\"POST\"],\n renderer=\"json\",\n require_csrf=False,\n has_translations=False,\n)\ndef mint_token_from_oidc(request):\n def _invalid(errors):\n request.response.status = 422\n return {\"message\": \"Token request failed\", \"errors\": errors}\n\n oidc_enabled = request.registry.settings[\n \"warehouse.oidc.enabled\"\n ] and not request.flags.enabled(AdminFlagValue.DISALLOW_OIDC)\n if not oidc_enabled:\n return _invalid(\n errors=[\n {\n \"code\": \"not-enabled\",\n \"description\": \"OIDC functionality not enabled\",\n }\n ]\n )\n\n try:\n payload = TokenPayload.parse_raw(request.body)\n unverified_jwt = payload.token\n except ValidationError as exc:\n return _invalid(errors=[{\"code\": \"invalid-payload\", \"description\": str(exc)}])\n\n # For the time being, GitHub is our only OIDC publisher.\n # In the future, this should locate the correct service based on an\n # identifier in the request body.\n oidc_service = request.find_service(IOIDCPublisherService, name=\"github\")\n claims = oidc_service.verify_jwt_signature(unverified_jwt)\n if not claims:\n return _invalid(\n errors=[\n {\"code\": \"invalid-token\", \"description\": \"malformed or invalid token\"}\n ]\n )\n\n # First, try to find a pending publisher.\n pending_publisher = oidc_service.find_publisher(claims, pending=True)\n if pending_publisher is not None:\n factory = ProjectFactory(request)\n\n # If the project already exists, this pending publisher is no longer\n # valid and needs to be removed.\n # NOTE: This is mostly a sanity check, since we dispose of invalidated\n # pending publishers below.\n if pending_publisher.project_name in factory:\n request.db.delete(pending_publisher)\n return _invalid(\n errors=[\n {\n \"code\": \"invalid-pending-publisher\",\n \"description\": \"valid token, but project already exists\",\n }\n ]\n )\n\n # Create the new project, and reify the pending publisher against it.\n project_service = request.find_service(IProjectService)\n new_project = project_service.create_project(\n pending_publisher.project_name,\n pending_publisher.added_by,\n ratelimited=False,\n )\n oidc_service.reify_pending_publisher(pending_publisher, new_project)\n\n # Successfully converting a pending publisher into a normal publisher\n # is a positive signal, so we reset the associated ratelimits.\n ratelimiters = _ratelimiters(request)\n ratelimiters[\"user.oidc\"].clear(pending_publisher.added_by.id)\n ratelimiters[\"ip.oidc\"].clear(request.remote_addr)\n\n # There might be other pending publishers for the same project name,\n # which we've now invalidated by creating the project. These would\n # be disposed of on use, but we explicitly dispose of them here while\n # also sending emails to their owners.\n stale_pending_publishers = (\n request.db.query(PendingOIDCPublisher)\n .filter(\n func.normalize_pep426_name(PendingOIDCPublisher.project_name)\n == func.normalize_pep426_name(pending_publisher.project_name)\n )\n .all()\n )\n for stale_publisher in stale_pending_publishers:\n send_pending_oidc_publisher_invalidated_email(\n request,\n stale_publisher.added_by,\n project_name=stale_publisher.project_name,\n )\n request.db.delete(stale_publisher)\n\n # We either don't have a pending OIDC publisher, or we *did*\n # have one and we've just converted it. Either way, look for a full publisher\n # to actually do the macaroon minting with.\n publisher = oidc_service.find_publisher(claims, pending=False)\n if not publisher:\n return _invalid(\n errors=[\n {\n \"code\": \"invalid-publisher\",\n \"description\": \"valid token, but no corresponding publisher\",\n }\n ]\n )\n\n # At this point, we've verified that the given JWT is valid for the given\n # project. All we need to do is mint a new token.\n # NOTE: For OIDC-minted API tokens, the Macaroon's description string\n # is purely an implementation detail and is not displayed to the user.\n macaroon_service = request.find_service(IMacaroonService, context=None)\n not_before = int(time.time())\n expires_at = not_before + 900\n serialized, dm = macaroon_service.create_macaroon(\n request.domain,\n f\"OpenID token: {publisher.publisher_url} ({not_before})\",\n [\n caveats.OIDCPublisher(oidc_publisher_id=str(publisher.id)),\n caveats.ProjectID(project_ids=[str(p.id) for p in publisher.projects]),\n caveats.Expiration(expires_at=expires_at, not_before=not_before),\n ],\n oidc_publisher_id=publisher.id,\n )\n for project in publisher.projects:\n project.record_event(\n tag=EventTag.Project.ShortLivedAPITokenAdded,\n ip_address=request.remote_addr,\n additional={\n \"expires\": expires_at,\n \"publisher_name\": publisher.publisher_name,\n \"publisher_url\": publisher.publisher_url,\n },\n )\n return {\"success\": True, \"token\": serialized}\n", "path": "warehouse/oidc/views.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom warehouse.oidc.interfaces import IOIDCPublisherService\nfrom warehouse.oidc.services import OIDCPublisherServiceFactory\nfrom warehouse.oidc.utils import GITHUB_OIDC_ISSUER_URL\n\n\ndef includeme(config):\n oidc_publisher_service_class = config.maybe_dotted(\n config.registry.settings[\"oidc.backend\"]\n )\n\n config.register_service_factory(\n OIDCPublisherServiceFactory(\n publisher=\"github\",\n issuer_url=GITHUB_OIDC_ISSUER_URL,\n service_class=oidc_publisher_service_class,\n ),\n IOIDCPublisherService,\n name=\"github\",\n )\n\n # During deployments, we separate auth routes into their own subdomain\n # to simplify caching exclusion.\n auth = config.get_settings().get(\"auth.domain\")\n\n config.add_route(\"oidc.audience\", \"/_/oidc/audience\", domain=auth)\n config.add_route(\"oidc.github.mint_token\", \"/_/oidc/github/mint-token\", domain=auth)\n", "path": "warehouse/oidc/__init__.py"}]}
2,671
447
gh_patches_debug_26572
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1926
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Converting from half precision to `bfloat16` in Pearson correlation coefficient cause numerical errors ## 🐛 Bug The lack of half-precision op support on CPUs has lead to the decision to convert to `bfloat16` in the calculation of Pearson correlation coefficient. (#1813) However this leads to broadcasting errors when the values are (meaningfully) small. ### To Reproduce ```py >>> import torch >>> a = torch.rand((32,), dtype=torch.half) >>> b = torch.rand((32,), dtype=torch.half) >>> from torchmetrics.functional import pearson_corrcoef >>> pearson_corrcoef(a, b) tensor(0.2240) >>> pearson_corrcoef(a * 1e-1, b * 1e-1) tensor(0.2251) >>> pearson_corrcoef(a * 1e-2, b * 1e-2) tensor(0.2213) >>> pearson_corrcoef(a * 1e-3, b * 1e-3) tensor(0.) >>> pearson_corrcoef(a * 1e-4, b * 1e-4) tensor(nan) ``` Which generally makes sense since there are only 5 exponential bits in `float16`, and some are lost in the operations. However this is not obvious during debugging since 0.001 is not *that* small... ### Expected behavior A warning when the dynamic range is problematic, such as [what SciPy does](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html#scipy-stats-pearsonr). Or there may be some way to preserve more precision? ### Environment - TorchMetrics version (pypi v1.0.1): - Python & PyTorch Version (3.10, 2.0.1): - Any other relevant information such as OS (tested on Linux_x64 and macOS_arm64): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/functional/regression/pearson.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Tuple 15 16 import torch 17 from torch import Tensor 18 19 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs 20 from torchmetrics.utilities.checks import _check_same_shape 21 22 23 def _pearson_corrcoef_update( 24 preds: Tensor, 25 target: Tensor, 26 mean_x: Tensor, 27 mean_y: Tensor, 28 var_x: Tensor, 29 var_y: Tensor, 30 corr_xy: Tensor, 31 n_prior: Tensor, 32 num_outputs: int, 33 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: 34 """Update and returns variables required to compute Pearson Correlation Coefficient. 35 36 Check for same shape of input tensors. 37 38 Args: 39 preds: estimated scores 40 target: ground truth scores 41 mean_x: current mean estimate of x tensor 42 mean_y: current mean estimate of y tensor 43 var_x: current variance estimate of x tensor 44 var_y: current variance estimate of y tensor 45 corr_xy: current covariance estimate between x and y tensor 46 n_prior: current number of observed observations 47 num_outputs: Number of outputs in multioutput setting 48 49 """ 50 # Data checking 51 _check_same_shape(preds, target) 52 _check_data_shape_to_num_outputs(preds, target, num_outputs) 53 cond = n_prior.mean() > 0 54 55 n_obs = preds.shape[0] 56 if cond: 57 mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs) 58 my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs) 59 else: 60 mx_new = preds.mean(0) 61 my_new = target.mean(0) 62 63 n_prior += n_obs 64 65 if cond: 66 var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) 67 var_y += ((target - my_new) * (target - mean_y)).sum(0) 68 69 else: 70 var_x += preds.var(0) * (n_obs - 1) 71 var_y += target.var(0) * (n_obs - 1) 72 corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0) 73 mean_x = mx_new 74 mean_y = my_new 75 76 return mean_x, mean_y, var_x, var_y, corr_xy, n_prior 77 78 79 def _pearson_corrcoef_compute( 80 var_x: Tensor, 81 var_y: Tensor, 82 corr_xy: Tensor, 83 nb: Tensor, 84 ) -> Tensor: 85 """Compute the final pearson correlation based on accumulated statistics. 86 87 Args: 88 var_x: variance estimate of x tensor 89 var_y: variance estimate of y tensor 90 corr_xy: covariance estimate between x and y tensor 91 nb: number of observations 92 93 """ 94 var_x /= nb - 1 95 var_y /= nb - 1 96 corr_xy /= nb - 1 97 # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16 98 # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed 99 if var_x.dtype == torch.float16 and var_x.device == torch.device("cpu"): 100 var_x = var_x.bfloat16() 101 var_y = var_y.bfloat16() 102 103 corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() 104 return torch.clamp(corrcoef, -1.0, 1.0) 105 106 107 def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: 108 """Compute pearson correlation coefficient. 109 110 Args: 111 preds: estimated scores 112 target: ground truth scores 113 114 Example (single output regression): 115 >>> from torchmetrics.functional.regression import pearson_corrcoef 116 >>> target = torch.tensor([3, -0.5, 2, 7]) 117 >>> preds = torch.tensor([2.5, 0.0, 2, 8]) 118 >>> pearson_corrcoef(preds, target) 119 tensor(0.9849) 120 121 Example (multi output regression): 122 >>> from torchmetrics.functional.regression import pearson_corrcoef 123 >>> target = torch.tensor([[3, -0.5], [2, 7]]) 124 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) 125 >>> pearson_corrcoef(preds, target) 126 tensor([1., 1.]) 127 128 """ 129 d = preds.shape[1] if preds.ndim == 2 else 1 130 _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device) 131 mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() 132 var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() 133 _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update( 134 preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1] 135 ) 136 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py --- a/src/torchmetrics/functional/regression/pearson.py +++ b/src/torchmetrics/functional/regression/pearson.py @@ -11,12 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import math from typing import Tuple import torch from torch import Tensor from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs +from torchmetrics.utilities import rank_zero_warn from torchmetrics.utilities.checks import _check_same_shape @@ -100,6 +102,15 @@ var_x = var_x.bfloat16() var_y = var_y.bfloat16() + bound = math.sqrt(torch.finfo(var_x.dtype).eps) + if (var_x < bound).any() or (var_y < bound).any(): + rank_zero_warn( + "The variance of predictions or target is close to zero. This can cause instability in Pearson correlation" + "coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a" + f"larger dtype (currently using {var_x.dtype}).", + UserWarning, + ) + corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() return torch.clamp(corrcoef, -1.0, 1.0)
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py\n--- a/src/torchmetrics/functional/regression/pearson.py\n+++ b/src/torchmetrics/functional/regression/pearson.py\n@@ -11,12 +11,14 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import math\n from typing import Tuple\n \n import torch\n from torch import Tensor\n \n from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\n+from torchmetrics.utilities import rank_zero_warn\n from torchmetrics.utilities.checks import _check_same_shape\n \n \n@@ -100,6 +102,15 @@\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n \n+ bound = math.sqrt(torch.finfo(var_x.dtype).eps)\n+ if (var_x < bound).any() or (var_y < bound).any():\n+ rank_zero_warn(\n+ \"The variance of predictions or target is close to zero. This can cause instability in Pearson correlation\"\n+ \"coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a\"\n+ f\"larger dtype (currently using {var_x.dtype}).\",\n+ UserWarning,\n+ )\n+\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n", "issue": "Converting from half precision to `bfloat16` in Pearson correlation coefficient cause numerical errors\n## \ud83d\udc1b Bug\r\n\r\nThe lack of half-precision op support on CPUs has lead to the decision to convert to `bfloat16` in the calculation of Pearson correlation coefficient. (#1813)\r\n\r\nHowever this leads to broadcasting errors when the values are (meaningfully) small.\r\n\r\n\r\n### To Reproduce\r\n\r\n```py\r\n>>> import torch\r\n>>> a = torch.rand((32,), dtype=torch.half)\r\n>>> b = torch.rand((32,), dtype=torch.half)\r\n>>> from torchmetrics.functional import pearson_corrcoef\r\n>>> pearson_corrcoef(a, b)\r\ntensor(0.2240)\r\n>>> pearson_corrcoef(a * 1e-1, b * 1e-1)\r\ntensor(0.2251)\r\n>>> pearson_corrcoef(a * 1e-2, b * 1e-2)\r\ntensor(0.2213)\r\n>>> pearson_corrcoef(a * 1e-3, b * 1e-3)\r\ntensor(0.)\r\n>>> pearson_corrcoef(a * 1e-4, b * 1e-4)\r\ntensor(nan)\r\n```\r\n\r\nWhich generally makes sense since there are only 5 exponential bits in `float16`, and some are lost in the operations. However this is not obvious during debugging since 0.001 is not *that* small...\r\n\r\n### Expected behavior\r\n\r\nA warning when the dynamic range is problematic, such as [what SciPy does](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html#scipy-stats-pearsonr).\r\n\r\nOr there may be some way to preserve more precision?\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (pypi v1.0.1):\r\n- Python & PyTorch Version (3.10, 2.0.1):\r\n- Any other relevant information such as OS (tested on Linux_x64 and macOS_arm64):\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n\n bound = math.sqrt(torch.finfo(var_x.dtype).eps)\n if (var_x < bound).any() or (var_y < bound).any():\n rank_zero_warn(\n \"The variance of predictions or target is close to zero. This can cause instability in Pearson correlation\"\n \"coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a\"\n f\"larger dtype (currently using {var_x.dtype}).\",\n UserWarning,\n )\n\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}]}
2,287
350
gh_patches_debug_36546
rasdani/github-patches
git_diff
weecology__retriever-698
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error downloading eBird_observation data. The URL doesn't work anymore. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `try_install_all.py` Content: ``` 1 """Attempt to install all datasets into all database management systems 2 3 This module, when run, attempts to install datasets from all Retriever scripts 4 in the /scripts folder (except for those listed in IGNORE), for each engine in 5 ENGINE_LIST() from __init__.py. In other words, it runs trys to install using 6 all possible combinations of database platform and script and checks to 7 see if there are any errors. It does not check the values in the database. 8 9 """ 10 from __future__ import print_function 11 from __future__ import absolute_import 12 import os 13 import sys 14 from imp import reload 15 from retriever.lib.tools import choose_engine 16 from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST 17 18 reload(sys) 19 if hasattr(sys, 'setdefaultencoding'): 20 sys.setdefaultencoding('latin-1') 21 22 MODULE_LIST = MODULE_LIST() 23 ENGINE_LIST = ENGINE_LIST() 24 if len(sys.argv) > 1: 25 ENGINE_LIST = [ 26 e for e in ENGINE_LIST 27 if e.name in sys.argv[1:] or 28 e.abbreviation in sys.argv[1:] 29 ] 30 SCRIPT_LIST = SCRIPT_LIST() 31 TEST_ENGINES = {} 32 IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree", "eBirdOD"] 33 IGNORE = [dataset.lower() for dataset in IGNORE] 34 35 for engine in ENGINE_LIST: 36 opts = {} 37 print("** %s **" % engine.name) 38 opts["engine"] = engine.abbreviation 39 40 try: 41 TEST_ENGINES[engine.abbreviation] = choose_engine(opts) 42 TEST_ENGINES[engine.abbreviation].get_input() 43 TEST_ENGINES[engine.abbreviation].get_cursor() 44 except: 45 TEST_ENGINES[engine.abbreviation] = None 46 pass 47 48 errors = [] 49 for module in MODULE_LIST: 50 for (key, value) in list(TEST_ENGINES.items()): 51 if module.SCRIPT.shortname.lower() not in IGNORE: 52 if value != None: 53 print("==>", module.__name__, value.name, "..........", module.SCRIPT.shortname) 54 try: 55 module.SCRIPT.download(value) 56 except KeyboardInterrupt: 57 pass 58 except Exception as e: 59 print("ERROR.") 60 errors.append((key, module.__name__, e)) 61 else: 62 errors.append((key, "No connection detected......" + module.SCRIPT.shortname)) 63 64 print('') 65 if errors: 66 print("Engine, Dataset, Error") 67 for error in errors: 68 print(error) 69 else: 70 print("All tests passed") 71 ``` Path: `scripts/eBird_observation.py` Content: ``` 1 #retriever 2 """Data Retriever script for the eBird Observation Dataset""" 3 4 from retriever.lib.templates import Script 5 from retriever.lib.models import Table 6 7 8 class main(Script): 9 def __init__(self, **kwargs): 10 Script.__init__(self, **kwargs) 11 self.name = "eBird Observation Dataset" 12 self.shortname = "eBirdOD" 13 self.ref = "http://ebird.org/content/ebird/news/gbif/" 14 self.urls = {"main": "https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1"} 15 self.retriever_minimum_version = '2.0.dev' 16 self.version = '1.0' 17 self.description = "A collection of observations from birders through portals managed and maintained by local partner conservation organizations" 18 19 def download(self, engine=None, debug=False): 20 data_file_name = "eBird_Observation_Dataset_2013.csv" 21 Script.download(self, engine, debug) 22 self.engine.download_files_from_archive(self.urls["main"], 23 [data_file_name], 24 filetype='gz') 25 table = (Table("main", delimiter=",")) 26 table.columns=[("BASISOFRECORD",("char", )), 27 ("INSTITUTIONCODE",("char", )), 28 ("COLLECTIONCODE",("char", )), 29 ("CATALOGNUMBER",("char", )), 30 ("OCCURRENCEID",("char", )), 31 ("RECORDEDBY",("char", )), 32 ("YEAR",("int", )), 33 ("MONTH",("int", )), 34 ("DAY",("int", )), 35 ("COUNTRY",("char", )), 36 ("STATEPROVINCE",("char", )), 37 ("COUNTY",("char", )), 38 ("DECIMALLATITUDE",("double", )), 39 ("DECIMALLONGITUDE",("double", )), 40 ("LOCALITY",("char", )), 41 ("KINGDOM",("char", )), 42 ("PHYLUM",("char", )), 43 ("CLASS",("char", )), 44 ("SPORDER",("char", )), 45 ("FAMILY",("char", )), 46 ("GENUS",("char", )), 47 ("SPECIFICEPITHET",("char", )), 48 ("SCIENTIFICNAME",("char", )), 49 ("VERNACULARNAME",("char", )), 50 ("INDIVIDUALCOUNT",("int", ))] 51 engine.table = table 52 engine.create_table() 53 engine.insert_data_from_file(engine.format_filename(data_file_name)) 54 return engine 55 56 SCRIPT = main() 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/eBird_observation.py b/scripts/eBird_observation.py deleted file mode 100644 --- a/scripts/eBird_observation.py +++ /dev/null @@ -1,56 +0,0 @@ -#retriever -"""Data Retriever script for the eBird Observation Dataset""" - -from retriever.lib.templates import Script -from retriever.lib.models import Table - - -class main(Script): - def __init__(self, **kwargs): - Script.__init__(self, **kwargs) - self.name = "eBird Observation Dataset" - self.shortname = "eBirdOD" - self.ref = "http://ebird.org/content/ebird/news/gbif/" - self.urls = {"main": "https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1"} - self.retriever_minimum_version = '2.0.dev' - self.version = '1.0' - self.description = "A collection of observations from birders through portals managed and maintained by local partner conservation organizations" - - def download(self, engine=None, debug=False): - data_file_name = "eBird_Observation_Dataset_2013.csv" - Script.download(self, engine, debug) - self.engine.download_files_from_archive(self.urls["main"], - [data_file_name], - filetype='gz') - table = (Table("main", delimiter=",")) - table.columns=[("BASISOFRECORD",("char", )), - ("INSTITUTIONCODE",("char", )), - ("COLLECTIONCODE",("char", )), - ("CATALOGNUMBER",("char", )), - ("OCCURRENCEID",("char", )), - ("RECORDEDBY",("char", )), - ("YEAR",("int", )), - ("MONTH",("int", )), - ("DAY",("int", )), - ("COUNTRY",("char", )), - ("STATEPROVINCE",("char", )), - ("COUNTY",("char", )), - ("DECIMALLATITUDE",("double", )), - ("DECIMALLONGITUDE",("double", )), - ("LOCALITY",("char", )), - ("KINGDOM",("char", )), - ("PHYLUM",("char", )), - ("CLASS",("char", )), - ("SPORDER",("char", )), - ("FAMILY",("char", )), - ("GENUS",("char", )), - ("SPECIFICEPITHET",("char", )), - ("SCIENTIFICNAME",("char", )), - ("VERNACULARNAME",("char", )), - ("INDIVIDUALCOUNT",("int", ))] - engine.table = table - engine.create_table() - engine.insert_data_from_file(engine.format_filename(data_file_name)) - return engine - -SCRIPT = main() diff --git a/try_install_all.py b/try_install_all.py --- a/try_install_all.py +++ b/try_install_all.py @@ -29,7 +29,7 @@ ] SCRIPT_LIST = SCRIPT_LIST() TEST_ENGINES = {} -IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree", "eBirdOD"] +IGNORE = ["AvianBodyMass", "FIA", "Bioclim", "PRISM", "vertnet","NPN", "mammsupertree"] IGNORE = [dataset.lower() for dataset in IGNORE] for engine in ENGINE_LIST:
{"golden_diff": "diff --git a/scripts/eBird_observation.py b/scripts/eBird_observation.py\ndeleted file mode 100644\n--- a/scripts/eBird_observation.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-#retriever\n-\"\"\"Data Retriever script for the eBird Observation Dataset\"\"\"\n-\n-from retriever.lib.templates import Script\n-from retriever.lib.models import Table\n-\n-\n-class main(Script):\n- def __init__(self, **kwargs):\n- Script.__init__(self, **kwargs)\n- self.name = \"eBird Observation Dataset\"\n- self.shortname = \"eBirdOD\"\n- self.ref = \"http://ebird.org/content/ebird/news/gbif/\"\n- self.urls = {\"main\": \"https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1\"}\n- self.retriever_minimum_version = '2.0.dev'\n- self.version = '1.0'\n- self.description = \"A collection of observations from birders through portals managed and maintained by local partner conservation organizations\"\n-\n- def download(self, engine=None, debug=False):\n- data_file_name = \"eBird_Observation_Dataset_2013.csv\"\n- Script.download(self, engine, debug)\n- self.engine.download_files_from_archive(self.urls[\"main\"],\n- [data_file_name],\n- filetype='gz')\n- table = (Table(\"main\", delimiter=\",\"))\n- table.columns=[(\"BASISOFRECORD\",(\"char\", )),\n- (\"INSTITUTIONCODE\",(\"char\", )),\n- (\"COLLECTIONCODE\",(\"char\", )),\n- (\"CATALOGNUMBER\",(\"char\", )),\n- (\"OCCURRENCEID\",(\"char\", )),\n- (\"RECORDEDBY\",(\"char\", )),\n- (\"YEAR\",(\"int\", )),\n- (\"MONTH\",(\"int\", )),\n- (\"DAY\",(\"int\", )),\n- (\"COUNTRY\",(\"char\", )),\n- (\"STATEPROVINCE\",(\"char\", )),\n- (\"COUNTY\",(\"char\", )),\n- (\"DECIMALLATITUDE\",(\"double\", )),\n- (\"DECIMALLONGITUDE\",(\"double\", )),\n- (\"LOCALITY\",(\"char\", )),\n- (\"KINGDOM\",(\"char\", )),\n- (\"PHYLUM\",(\"char\", )),\n- (\"CLASS\",(\"char\", )),\n- (\"SPORDER\",(\"char\", )),\n- (\"FAMILY\",(\"char\", )),\n- (\"GENUS\",(\"char\", )),\n- (\"SPECIFICEPITHET\",(\"char\", )),\n- (\"SCIENTIFICNAME\",(\"char\", )),\n- (\"VERNACULARNAME\",(\"char\", )),\n- (\"INDIVIDUALCOUNT\",(\"int\", ))]\n- engine.table = table\n- engine.create_table()\n- engine.insert_data_from_file(engine.format_filename(data_file_name))\n- return engine\n-\n-SCRIPT = main()\ndiff --git a/try_install_all.py b/try_install_all.py\n--- a/try_install_all.py\n+++ b/try_install_all.py\n@@ -29,7 +29,7 @@\n ]\n SCRIPT_LIST = SCRIPT_LIST()\n TEST_ENGINES = {}\n-IGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\", \"eBirdOD\"]\n+IGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\"]\n IGNORE = [dataset.lower() for dataset in IGNORE]\n \n for engine in ENGINE_LIST:\n", "issue": "Error downloading eBird_observation data.\nThe URL doesn't work anymore.\n\n", "before_files": [{"content": "\"\"\"Attempt to install all datasets into all database management systems\n\nThis module, when run, attempts to install datasets from all Retriever scripts\nin the /scripts folder (except for those listed in IGNORE), for each engine in\nENGINE_LIST() from __init__.py. In other words, it runs trys to install using\nall possible combinations of database platform and script and checks to\nsee if there are any errors. It does not check the values in the database.\n\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom imp import reload\nfrom retriever.lib.tools import choose_engine\nfrom retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST\n\nreload(sys)\nif hasattr(sys, 'setdefaultencoding'):\n sys.setdefaultencoding('latin-1')\n\nMODULE_LIST = MODULE_LIST()\nENGINE_LIST = ENGINE_LIST()\nif len(sys.argv) > 1:\n ENGINE_LIST = [\n e for e in ENGINE_LIST\n if e.name in sys.argv[1:] or\n e.abbreviation in sys.argv[1:]\n ]\nSCRIPT_LIST = SCRIPT_LIST()\nTEST_ENGINES = {}\nIGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\", \"eBirdOD\"]\nIGNORE = [dataset.lower() for dataset in IGNORE]\n\nfor engine in ENGINE_LIST:\n opts = {}\n print(\"** %s **\" % engine.name)\n opts[\"engine\"] = engine.abbreviation\n\n try:\n TEST_ENGINES[engine.abbreviation] = choose_engine(opts)\n TEST_ENGINES[engine.abbreviation].get_input()\n TEST_ENGINES[engine.abbreviation].get_cursor()\n except:\n TEST_ENGINES[engine.abbreviation] = None\n pass\n\nerrors = []\nfor module in MODULE_LIST:\n for (key, value) in list(TEST_ENGINES.items()):\n if module.SCRIPT.shortname.lower() not in IGNORE:\n if value != None:\n print(\"==>\", module.__name__, value.name, \"..........\", module.SCRIPT.shortname)\n try:\n module.SCRIPT.download(value)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(\"ERROR.\")\n errors.append((key, module.__name__, e))\n else:\n errors.append((key, \"No connection detected......\" + module.SCRIPT.shortname))\n\nprint('')\nif errors:\n print(\"Engine, Dataset, Error\")\n for error in errors:\n print(error)\nelse:\n print(\"All tests passed\")\n", "path": "try_install_all.py"}, {"content": "#retriever\n\"\"\"Data Retriever script for the eBird Observation Dataset\"\"\"\n\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"eBird Observation Dataset\"\n self.shortname = \"eBirdOD\"\n self.ref = \"http://ebird.org/content/ebird/news/gbif/\"\n self.urls = {\"main\": \"https://dataone.ornith.cornell.edu/metacat/d1/mn/v1/object/CLOEODDATA.05192014.1\"}\n self.retriever_minimum_version = '2.0.dev'\n self.version = '1.0'\n self.description = \"A collection of observations from birders through portals managed and maintained by local partner conservation organizations\"\n\n def download(self, engine=None, debug=False):\n data_file_name = \"eBird_Observation_Dataset_2013.csv\"\n Script.download(self, engine, debug)\n self.engine.download_files_from_archive(self.urls[\"main\"],\n [data_file_name],\n filetype='gz')\n table = (Table(\"main\", delimiter=\",\"))\n table.columns=[(\"BASISOFRECORD\",(\"char\", )),\n (\"INSTITUTIONCODE\",(\"char\", )),\n (\"COLLECTIONCODE\",(\"char\", )),\n (\"CATALOGNUMBER\",(\"char\", )),\n (\"OCCURRENCEID\",(\"char\", )),\n (\"RECORDEDBY\",(\"char\", )),\n (\"YEAR\",(\"int\", )),\n (\"MONTH\",(\"int\", )),\n (\"DAY\",(\"int\", )),\n (\"COUNTRY\",(\"char\", )),\n (\"STATEPROVINCE\",(\"char\", )),\n (\"COUNTY\",(\"char\", )),\n (\"DECIMALLATITUDE\",(\"double\", )),\n (\"DECIMALLONGITUDE\",(\"double\", )),\n (\"LOCALITY\",(\"char\", )),\n (\"KINGDOM\",(\"char\", )),\n (\"PHYLUM\",(\"char\", )),\n (\"CLASS\",(\"char\", )),\n (\"SPORDER\",(\"char\", )),\n (\"FAMILY\",(\"char\", )),\n (\"GENUS\",(\"char\", )),\n (\"SPECIFICEPITHET\",(\"char\", )),\n (\"SCIENTIFICNAME\",(\"char\", )),\n (\"VERNACULARNAME\",(\"char\", )),\n (\"INDIVIDUALCOUNT\",(\"int\", ))]\n engine.table = table\n engine.create_table()\n engine.insert_data_from_file(engine.format_filename(data_file_name))\n return engine\n\nSCRIPT = main()\n", "path": "scripts/eBird_observation.py"}], "after_files": [{"content": "\"\"\"Attempt to install all datasets into all database management systems\n\nThis module, when run, attempts to install datasets from all Retriever scripts\nin the /scripts folder (except for those listed in IGNORE), for each engine in\nENGINE_LIST() from __init__.py. In other words, it runs trys to install using\nall possible combinations of database platform and script and checks to\nsee if there are any errors. It does not check the values in the database.\n\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom imp import reload\nfrom retriever.lib.tools import choose_engine\nfrom retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST\n\nreload(sys)\nif hasattr(sys, 'setdefaultencoding'):\n sys.setdefaultencoding('latin-1')\n\nMODULE_LIST = MODULE_LIST()\nENGINE_LIST = ENGINE_LIST()\nif len(sys.argv) > 1:\n ENGINE_LIST = [\n e for e in ENGINE_LIST\n if e.name in sys.argv[1:] or\n e.abbreviation in sys.argv[1:]\n ]\nSCRIPT_LIST = SCRIPT_LIST()\nTEST_ENGINES = {}\nIGNORE = [\"AvianBodyMass\", \"FIA\", \"Bioclim\", \"PRISM\", \"vertnet\",\"NPN\", \"mammsupertree\"]\nIGNORE = [dataset.lower() for dataset in IGNORE]\n\nfor engine in ENGINE_LIST:\n opts = {}\n print(\"** %s **\" % engine.name)\n opts[\"engine\"] = engine.abbreviation\n\n try:\n TEST_ENGINES[engine.abbreviation] = choose_engine(opts)\n TEST_ENGINES[engine.abbreviation].get_input()\n TEST_ENGINES[engine.abbreviation].get_cursor()\n except:\n TEST_ENGINES[engine.abbreviation] = None\n pass\n\nerrors = []\nfor module in MODULE_LIST:\n for (key, value) in list(TEST_ENGINES.items()):\n if module.SCRIPT.shortname.lower() not in IGNORE:\n if value != None:\n print(\"==>\", module.__name__, value.name, \"..........\", module.SCRIPT.shortname)\n try:\n module.SCRIPT.download(value)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(\"ERROR.\")\n errors.append((key, module.__name__, e))\n else:\n errors.append((key, \"No connection detected......\" + module.SCRIPT.shortname))\n\nprint('')\nif errors:\n print(\"Engine, Dataset, Error\")\n for error in errors:\n print(error)\nelse:\n print(\"All tests passed\")\n", "path": "try_install_all.py"}, {"content": null, "path": "scripts/eBird_observation.py"}]}
1,642
820
gh_patches_debug_29119
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3786
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BigQuery: expose public helper method to convert a list of schema fields to/from a list of schema dictionaries (JSON) I've received some feedback asking how to take a schema from the client library and save it to a JSON file. One reason to do this is the [`bq` command-line tool](https://cloud.google.com/bigquery/bq-command-line-tool#creatingtablefromfile) accepts a schema file, formatted like ``` [ {"name": "name", "type": "string", "mode": "required"}, {"name": "gender", "type": "string", "mode": "nullable"}, {"name": "count", "type": "integer", "mode": "required"} ] ``` Note: this format is the same as the API representation. It would be great if our client libraries could read/write in this format. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bigquery/google/cloud/bigquery/schema.py` Content: ``` 1 # Copyright 2015 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Schemas for BigQuery tables / queries.""" 16 17 18 class SchemaField(object): 19 """Describe a single field within a table schema. 20 21 :type name: str 22 :param name: the name of the field. 23 24 :type field_type: str 25 :param field_type: the type of the field (one of 'STRING', 'INTEGER', 26 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD'). 27 28 :type mode: str 29 :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED', 30 or 'REPEATED'). 31 32 :type description: str 33 :param description: optional description for the field. 34 35 :type fields: tuple of :class:`SchemaField` 36 :param fields: subfields (requires ``field_type`` of 'RECORD'). 37 """ 38 def __init__(self, name, field_type, mode='NULLABLE', 39 description=None, fields=()): 40 self._name = name 41 self._field_type = field_type 42 self._mode = mode 43 self._description = description 44 self._fields = tuple(fields) 45 46 @property 47 def name(self): 48 """str: The name of the field.""" 49 return self._name 50 51 @property 52 def field_type(self): 53 """str: The type of the field. 54 55 Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 56 'TIMESTAMP' or 'RECORD'. 57 """ 58 return self._field_type 59 60 @property 61 def mode(self): 62 """str: The mode of the field. 63 64 Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'. 65 """ 66 return self._mode 67 68 @property 69 def is_nullable(self): 70 """Check whether 'mode' is 'nullable'.""" 71 return self._mode == 'NULLABLE' 72 73 @property 74 def description(self): 75 """Optional[str]: Description for the field.""" 76 return self._description 77 78 @property 79 def fields(self): 80 """tuple: Subfields contained in this field. 81 82 If ``field_type`` is not 'RECORD', this property must be 83 empty / unset. 84 """ 85 return self._fields 86 87 def _key(self): 88 """A tuple key that unique-ly describes this field. 89 90 Used to compute this instance's hashcode and evaluate equality. 91 92 Returns: 93 tuple: The contents of this :class:`SchemaField`. 94 """ 95 return ( 96 self._name, 97 self._field_type.lower(), 98 self._mode, 99 self._description, 100 self._fields, 101 ) 102 103 def __eq__(self, other): 104 if not isinstance(other, SchemaField): 105 return NotImplemented 106 return self._key() == other._key() 107 108 def __ne__(self, other): 109 return not self == other 110 111 def __hash__(self): 112 return hash(self._key()) 113 114 def __repr__(self): 115 return 'SchemaField{}'.format(self._key()) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py --- a/bigquery/google/cloud/bigquery/schema.py +++ b/bigquery/google/cloud/bigquery/schema.py @@ -43,6 +43,25 @@ self._description = description self._fields = tuple(fields) + @classmethod + def from_api_repr(cls, api_repr): + """Return a ``SchemaField`` object deserialized from a dictionary. + + Args: + api_repr (Mapping[str, str]): The serialized representation + of the SchemaField, such as what is output by + :meth:`to_api_repr`. + + Returns: + SchemaField: The ``SchemaField`` object. + """ + return cls( + field_type=api_repr['type'].upper(), + fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())], + mode=api_repr['mode'].upper(), + name=api_repr['name'], + ) + @property def name(self): """str: The name of the field.""" @@ -84,6 +103,28 @@ """ return self._fields + def to_api_repr(self): + """Return a dictionary representing this schema field. + + Returns: + dict: A dictionary representing the SchemaField in a serialized + form. + """ + # Put together the basic representation. See http://bit.ly/2hOAT5u. + answer = { + 'mode': self.mode.lower(), + 'name': self.name, + 'type': self.field_type.lower(), + } + + # If this is a RECORD type, then sub-fields are also included, + # add this to the serialized representation. + if self.field_type.upper() == 'RECORD': + answer['fields'] = [f.to_api_repr() for f in self.fields] + + # Done; return the serialized dictionary. + return answer + def _key(self): """A tuple key that unique-ly describes this field.
{"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py\n--- a/bigquery/google/cloud/bigquery/schema.py\n+++ b/bigquery/google/cloud/bigquery/schema.py\n@@ -43,6 +43,25 @@\n self._description = description\n self._fields = tuple(fields)\n \n+ @classmethod\n+ def from_api_repr(cls, api_repr):\n+ \"\"\"Return a ``SchemaField`` object deserialized from a dictionary.\n+\n+ Args:\n+ api_repr (Mapping[str, str]): The serialized representation\n+ of the SchemaField, such as what is output by\n+ :meth:`to_api_repr`.\n+\n+ Returns:\n+ SchemaField: The ``SchemaField`` object.\n+ \"\"\"\n+ return cls(\n+ field_type=api_repr['type'].upper(),\n+ fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],\n+ mode=api_repr['mode'].upper(),\n+ name=api_repr['name'],\n+ )\n+\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n@@ -84,6 +103,28 @@\n \"\"\"\n return self._fields\n \n+ def to_api_repr(self):\n+ \"\"\"Return a dictionary representing this schema field.\n+\n+ Returns:\n+ dict: A dictionary representing the SchemaField in a serialized\n+ form.\n+ \"\"\"\n+ # Put together the basic representation. See http://bit.ly/2hOAT5u.\n+ answer = {\n+ 'mode': self.mode.lower(),\n+ 'name': self.name,\n+ 'type': self.field_type.lower(),\n+ }\n+\n+ # If this is a RECORD type, then sub-fields are also included,\n+ # add this to the serialized representation.\n+ if self.field_type.upper() == 'RECORD':\n+ answer['fields'] = [f.to_api_repr() for f in self.fields]\n+\n+ # Done; return the serialized dictionary.\n+ return answer\n+\n def _key(self):\n \"\"\"A tuple key that unique-ly describes this field.\n", "issue": "BigQuery: expose public helper method to convert a list of schema fields to/from a list of schema dictionaries (JSON)\nI've received some feedback asking how to take a schema from the client library and save it to a JSON file. One reason to do this is the [`bq` command-line tool](https://cloud.google.com/bigquery/bq-command-line-tool#creatingtablefromfile) accepts a schema file, formatted like\r\n\r\n```\r\n[\r\n {\"name\": \"name\", \"type\": \"string\", \"mode\": \"required\"},\r\n {\"name\": \"gender\", \"type\": \"string\", \"mode\": \"nullable\"},\r\n {\"name\": \"count\", \"type\": \"integer\", \"mode\": \"required\"}\r\n]\r\n```\r\n\r\nNote: this format is the same as the API representation.\r\n\r\nIt would be great if our client libraries could read/write in this format.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Schemas for BigQuery tables / queries.\"\"\"\n\n\nclass SchemaField(object):\n \"\"\"Describe a single field within a table schema.\n\n :type name: str\n :param name: the name of the field.\n\n :type field_type: str\n :param field_type: the type of the field (one of 'STRING', 'INTEGER',\n 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').\n\n :type mode: str\n :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',\n or 'REPEATED').\n\n :type description: str\n :param description: optional description for the field.\n\n :type fields: tuple of :class:`SchemaField`\n :param fields: subfields (requires ``field_type`` of 'RECORD').\n \"\"\"\n def __init__(self, name, field_type, mode='NULLABLE',\n description=None, fields=()):\n self._name = name\n self._field_type = field_type\n self._mode = mode\n self._description = description\n self._fields = tuple(fields)\n\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n return self._name\n\n @property\n def field_type(self):\n \"\"\"str: The type of the field.\n\n Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',\n 'TIMESTAMP' or 'RECORD'.\n \"\"\"\n return self._field_type\n\n @property\n def mode(self):\n \"\"\"str: The mode of the field.\n\n Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.\n \"\"\"\n return self._mode\n\n @property\n def is_nullable(self):\n \"\"\"Check whether 'mode' is 'nullable'.\"\"\"\n return self._mode == 'NULLABLE'\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description for the field.\"\"\"\n return self._description\n\n @property\n def fields(self):\n \"\"\"tuple: Subfields contained in this field.\n\n If ``field_type`` is not 'RECORD', this property must be\n empty / unset.\n \"\"\"\n return self._fields\n\n def _key(self):\n \"\"\"A tuple key that unique-ly describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n tuple: The contents of this :class:`SchemaField`.\n \"\"\"\n return (\n self._name,\n self._field_type.lower(),\n self._mode,\n self._description,\n self._fields,\n )\n\n def __eq__(self, other):\n if not isinstance(other, SchemaField):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return 'SchemaField{}'.format(self._key())\n", "path": "bigquery/google/cloud/bigquery/schema.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Schemas for BigQuery tables / queries.\"\"\"\n\n\nclass SchemaField(object):\n \"\"\"Describe a single field within a table schema.\n\n :type name: str\n :param name: the name of the field.\n\n :type field_type: str\n :param field_type: the type of the field (one of 'STRING', 'INTEGER',\n 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').\n\n :type mode: str\n :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',\n or 'REPEATED').\n\n :type description: str\n :param description: optional description for the field.\n\n :type fields: tuple of :class:`SchemaField`\n :param fields: subfields (requires ``field_type`` of 'RECORD').\n \"\"\"\n def __init__(self, name, field_type, mode='NULLABLE',\n description=None, fields=()):\n self._name = name\n self._field_type = field_type\n self._mode = mode\n self._description = description\n self._fields = tuple(fields)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Return a ``SchemaField`` object deserialized from a dictionary.\n\n Args:\n api_repr (Mapping[str, str]): The serialized representation\n of the SchemaField, such as what is output by\n :meth:`to_api_repr`.\n\n Returns:\n SchemaField: The ``SchemaField`` object.\n \"\"\"\n return cls(\n field_type=api_repr['type'].upper(),\n fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],\n mode=api_repr['mode'].upper(),\n name=api_repr['name'],\n )\n\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n return self._name\n\n @property\n def field_type(self):\n \"\"\"str: The type of the field.\n\n Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',\n 'TIMESTAMP' or 'RECORD'.\n \"\"\"\n return self._field_type\n\n @property\n def mode(self):\n \"\"\"str: The mode of the field.\n\n Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.\n \"\"\"\n return self._mode\n\n @property\n def is_nullable(self):\n \"\"\"Check whether 'mode' is 'nullable'.\"\"\"\n return self._mode == 'NULLABLE'\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description for the field.\"\"\"\n return self._description\n\n @property\n def fields(self):\n \"\"\"tuple: Subfields contained in this field.\n\n If ``field_type`` is not 'RECORD', this property must be\n empty / unset.\n \"\"\"\n return self._fields\n\n def to_api_repr(self):\n \"\"\"Return a dictionary representing this schema field.\n\n Returns:\n dict: A dictionary representing the SchemaField in a serialized\n form.\n \"\"\"\n # Put together the basic representation. See http://bit.ly/2hOAT5u.\n answer = {\n 'mode': self.mode.lower(),\n 'name': self.name,\n 'type': self.field_type.lower(),\n }\n\n # If this is a RECORD type, then sub-fields are also included,\n # add this to the serialized representation.\n if self.field_type.upper() == 'RECORD':\n answer['fields'] = [f.to_api_repr() for f in self.fields]\n\n # Done; return the serialized dictionary.\n return answer\n\n def _key(self):\n \"\"\"A tuple key that unique-ly describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n tuple: The contents of this :class:`SchemaField`.\n \"\"\"\n return (\n self._name,\n self._field_type.lower(),\n self._mode,\n self._description,\n self._fields,\n )\n\n def __eq__(self, other):\n if not isinstance(other, SchemaField):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return 'SchemaField{}'.format(self._key())\n", "path": "bigquery/google/cloud/bigquery/schema.py"}]}
1,469
468
gh_patches_debug_13145
rasdani/github-patches
git_diff
mabel-dev__opteryx-1159
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 🧹 resync SQLoxide AST to SQL and AST visitor appear to have been added --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opteryx/third_party/sqloxide/__init__.py` Content: ``` 1 try: 2 from .sqloxide import parse_sql 3 except ImportError as e: # pragma: no cover 4 print(e) 5 if str(e) != "PyO3 modules may only be initialized once per interpreter process": 6 raise e 7 8 __all__ = ["parse_sql"] 9 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opteryx/third_party/sqloxide/__init__.py b/opteryx/third_party/sqloxide/__init__.py --- a/opteryx/third_party/sqloxide/__init__.py +++ b/opteryx/third_party/sqloxide/__init__.py @@ -1,8 +1,16 @@ -try: - from .sqloxide import parse_sql -except ImportError as e: # pragma: no cover - print(e) - if str(e) != "PyO3 modules may only be initialized once per interpreter process": - raise e - -__all__ = ["parse_sql"] +""" +This module provides an interface to the sqloxide library, which is responsible for parsing SQL, +restoring the Abstract Syntax Tree (AST), and performing various mutations on expressions and relations. + +For more information about sqloxide: https://github.com/wseaton/sqloxide + +This module is not from sqloxide, it is written for Opteryx. +""" + +from .sqloxide import mutate_expressions +from .sqloxide import mutate_relations +from .sqloxide import parse_sql +from .sqloxide import restore_ast + +# Explicitly define the API of this module for external consumers +__all__ = ["parse_sql", "restore_ast", "mutate_expressions", "mutate_relations"]
{"golden_diff": "diff --git a/opteryx/third_party/sqloxide/__init__.py b/opteryx/third_party/sqloxide/__init__.py\n--- a/opteryx/third_party/sqloxide/__init__.py\n+++ b/opteryx/third_party/sqloxide/__init__.py\n@@ -1,8 +1,16 @@\n-try:\n- from .sqloxide import parse_sql\n-except ImportError as e: # pragma: no cover\n- print(e)\n- if str(e) != \"PyO3 modules may only be initialized once per interpreter process\":\n- raise e\n-\n-__all__ = [\"parse_sql\"]\n+\"\"\"\n+This module provides an interface to the sqloxide library, which is responsible for parsing SQL,\n+restoring the Abstract Syntax Tree (AST), and performing various mutations on expressions and relations.\n+\n+For more information about sqloxide: https://github.com/wseaton/sqloxide\n+\n+This module is not from sqloxide, it is written for Opteryx.\n+\"\"\"\n+\n+from .sqloxide import mutate_expressions\n+from .sqloxide import mutate_relations\n+from .sqloxide import parse_sql\n+from .sqloxide import restore_ast\n+\n+# Explicitly define the API of this module for external consumers\n+__all__ = [\"parse_sql\", \"restore_ast\", \"mutate_expressions\", \"mutate_relations\"]\n", "issue": "\ud83e\uddf9 resync SQLoxide\nAST to SQL and AST visitor appear to have been added \n", "before_files": [{"content": "try:\n from .sqloxide import parse_sql\nexcept ImportError as e: # pragma: no cover\n print(e)\n if str(e) != \"PyO3 modules may only be initialized once per interpreter process\":\n raise e\n\n__all__ = [\"parse_sql\"]\n", "path": "opteryx/third_party/sqloxide/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis module provides an interface to the sqloxide library, which is responsible for parsing SQL,\nrestoring the Abstract Syntax Tree (AST), and performing various mutations on expressions and relations.\n\nFor more information about sqloxide: https://github.com/wseaton/sqloxide\n\nThis module is not from sqloxide, it is written for Opteryx.\n\"\"\"\n\nfrom .sqloxide import mutate_expressions\nfrom .sqloxide import mutate_relations\nfrom .sqloxide import parse_sql\nfrom .sqloxide import restore_ast\n\n# Explicitly define the API of this module for external consumers\n__all__ = [\"parse_sql\", \"restore_ast\", \"mutate_expressions\", \"mutate_relations\"]\n", "path": "opteryx/third_party/sqloxide/__init__.py"}]}
356
311
gh_patches_debug_40479
rasdani/github-patches
git_diff
pypa__cibuildwheel-204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Linux support in AppVeyor FYI https://www.appveyor.com/blog/2018/03/06/appveyor-for-linux/ https://www.appveyor.com/blog/2018/05/15/appveyor-for-linux-is-generally-available/ https://www.appveyor.com/docs/getting-started-with-appveyor-for-linux/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cibuildwheel/__main__.py` Content: ``` 1 from __future__ import print_function 2 import argparse, os, subprocess, sys, textwrap 3 4 import cibuildwheel 5 import cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos 6 from cibuildwheel.environment import parse_environment, EnvironmentParseError 7 from cibuildwheel.util import BuildSelector, Unbuffered 8 9 def get_option_from_environment(option_name, platform=None, default=None): 10 ''' 11 Returns an option from the environment, optionally scoped by the platform. 12 13 Example: 14 get_option_from_environment('CIBW_COLOR', platform='macos') 15 16 This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of 17 CIBW_COLOR. 18 ''' 19 if platform: 20 option = os.environ.get('%s_%s' % (option_name, platform.upper())) 21 if option is not None: 22 return option 23 24 return os.environ.get(option_name, default) 25 26 27 def main(): 28 parser = argparse.ArgumentParser( 29 description='Build wheels for all the platforms.', 30 epilog=('Most options are supplied via environment variables. ' 31 'See https://github.com/joerick/cibuildwheel#options for info.')) 32 33 parser.add_argument('--platform', 34 choices=['auto', 'linux', 'macos', 'windows'], 35 default=os.environ.get('CIBW_PLATFORM', 'auto'), 36 help=('Platform to build for. For "linux" you need docker running, on Mac ' 37 'or Linux. For "macos", you need a Mac machine, and note that this ' 38 'script is going to automatically install MacPython on your system, ' 39 'so don\'t run on your development machine. For "windows", you need to ' 40 'run in Windows, and it will build and test for all versions of ' 41 'Python at C:\\PythonXX[-x64]. Default: auto.')) 42 parser.add_argument('--output-dir', 43 default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'), 44 help='Destination folder for the wheels.') 45 parser.add_argument('project_dir', 46 default='.', 47 nargs='?', 48 help=('Path to the project that you want wheels for. Default: the current ' 49 'directory.')) 50 51 parser.add_argument('--print-build-identifiers', 52 action='store_true', 53 help='Print the build identifiers matched by the current invocation and exit.') 54 55 args = parser.parse_args() 56 57 detect_obsolete_options() 58 59 if args.platform != 'auto': 60 platform = args.platform 61 else: 62 platform = None 63 64 if os.environ.get('TRAVIS_OS_NAME') == 'linux': 65 platform = 'linux' 66 elif os.environ.get('TRAVIS_OS_NAME') == 'osx': 67 platform = 'macos' 68 elif os.environ.get('TRAVIS_OS_NAME') == 'windows': 69 platform = 'windows' 70 elif 'APPVEYOR' in os.environ: 71 platform = 'windows' 72 elif 'BITRISE_BUILD_NUMBER' in os.environ: 73 platform = 'macos' 74 elif os.environ.get('CIRCLECI'): 75 if sys.platform.startswith('linux'): 76 platform = 'linux' 77 elif sys.platform.startswith('darwin'): 78 platform = 'macos' 79 elif 'AZURE_HTTP_USER_AGENT' in os.environ: 80 if os.environ['AGENT_OS'] == 'Linux': 81 platform = 'linux' 82 elif os.environ['AGENT_OS'] == 'Darwin': 83 platform = 'macos' 84 elif os.environ['AGENT_OS'] == 'Windows_NT': 85 platform = 'windows' 86 87 if platform is None: 88 print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, ' 89 'Travis CI, AppVeyor, and CircleCI are supported. You can run on your development ' 90 'machine using the --platform argument. Check --help output for more ' 91 'information.', 92 file=sys.stderr) 93 exit(2) 94 95 output_dir = args.output_dir 96 test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform) 97 test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split() 98 test_extras = get_option_from_environment('CIBW_TEST_EXTRAS', platform=platform, default='') 99 project_dir = args.project_dir 100 before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform) 101 build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='') 102 build_config, skip_config = os.environ.get('CIBW_BUILD', '*'), os.environ.get('CIBW_SKIP', '') 103 environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='') 104 105 if test_extras: 106 test_extras = '[{0}]'.format(test_extras) 107 108 try: 109 build_verbosity = min(3, max(-3, int(build_verbosity))) 110 except ValueError: 111 build_verbosity = 0 112 113 try: 114 environment = parse_environment(environment_config) 115 except (EnvironmentParseError, ValueError) as e: 116 print('cibuildwheel: Malformed environment option "%s"' % environment_config, file=sys.stderr) 117 import traceback 118 traceback.print_exc(None, sys.stderr) 119 exit(2) 120 121 build_selector = BuildSelector(build_config, skip_config) 122 123 # Add CIBUILDWHEEL environment variable 124 # This needs to be passed on to the docker container in linux.py 125 os.environ['CIBUILDWHEEL'] = '1' 126 127 if not os.path.exists(os.path.join(project_dir, 'setup.py')): 128 print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr) 129 exit(2) 130 131 if args.print_build_identifiers: 132 print_build_identifiers(platform, build_selector) 133 exit(0) 134 135 build_options = dict( 136 project_dir=project_dir, 137 output_dir=output_dir, 138 test_command=test_command, 139 test_requires=test_requires, 140 test_extras=test_extras, 141 before_build=before_build, 142 build_verbosity=build_verbosity, 143 build_selector=build_selector, 144 environment=environment, 145 ) 146 147 if platform == 'linux': 148 manylinux_x86_64_image = os.environ.get('CIBW_MANYLINUX_X86_64_IMAGE', 'manylinux2010') 149 manylinux_i686_image = os.environ.get('CIBW_MANYLINUX_I686_IMAGE', 'manylinux2010') 150 151 default_manylinux_images_x86_64 = {'manylinux1': 'quay.io/pypa/manylinux1_x86_64', 152 'manylinux2010': 'quay.io/pypa/manylinux2010_x86_64'} 153 default_manylinux_images_i686 = {'manylinux1': 'quay.io/pypa/manylinux1_i686', 154 'manylinux2010': 'quay.io/pypa/manylinux2010_i686'} 155 156 build_options.update( 157 manylinux_images={'x86_64': default_manylinux_images_x86_64.get(manylinux_x86_64_image) or manylinux_x86_64_image, 158 'i686': default_manylinux_images_i686.get(manylinux_i686_image) or manylinux_i686_image}, 159 ) 160 elif platform == 'macos': 161 pass 162 elif platform == 'windows': 163 pass 164 165 # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print' 166 sys.stdout = Unbuffered(sys.stdout) 167 168 print_preamble(platform, build_options) 169 170 if not os.path.exists(output_dir): 171 os.makedirs(output_dir) 172 173 if platform == 'linux': 174 cibuildwheel.linux.build(**build_options) 175 elif platform == 'windows': 176 cibuildwheel.windows.build(**build_options) 177 elif platform == 'macos': 178 cibuildwheel.macos.build(**build_options) 179 else: 180 raise Exception('Unsupported platform') 181 182 183 def detect_obsolete_options(): 184 # Check the old 'MANYLINUX1_*_IMAGE' options 185 for (deprecated, alternative) in [('CIBW_MANYLINUX1_X86_64_IMAGE', 'CIBW_MANYLINUX_X86_64_IMAGE'), 186 ('CIBW_MANYLINUX1_I686_IMAGE', 'CIBW_MANYLINUX_I686_IMAGE')]: 187 if deprecated in os.environ: 188 print("'{}' has been deprecated, and will be removed in a future release. Use the option '{}' instead.".format(deprecated, alternative)) 189 if alternative not in os.environ: 190 print("Using value of option '{}' as replacement for '{}'".format(deprecated, alternative)) 191 os.environ[alternative] = os.environ[deprecated] 192 else: 193 print("Option '{}' is not empty. Please unset '{}'".format(alternative, deprecated)) 194 exit(2) 195 196 # Check for 'manylinux1' in the 'CIBW_BUILD' and 'CIBW_SKIP' options 197 for deprecated in ['CIBW_BUILD', 'CIBW_SKIP']: 198 if deprecated in os.environ and 'manylinux1' in os.environ[deprecated]: 199 print("Build identifiers with 'manylinux1' been deprecated. Replacing all occurences of 'manylinux1' by 'manylinux' in the option '{}'".format(deprecated)) 200 os.environ[deprecated] = os.environ[deprecated].replace('manylinux1', 'manylinux') 201 202 203 def print_preamble(platform, build_options): 204 print(textwrap.dedent(''' 205 _ _ _ _ _ _ _ 206 ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| | 207 | _| | . | | | | | . | | | | | -_| -_| | 208 |___|_|___|___|_|_|___|_____|_|_|___|___|_| 209 ''')) 210 211 print('cibuildwheel version %s\n' % cibuildwheel.__version__) 212 213 214 print('Build options:') 215 print(' platform: %r' % platform) 216 for option, value in sorted(build_options.items()): 217 print(' %s: %r' % (option, value)) 218 219 warnings = detect_warnings(platform, build_options) 220 if warnings: 221 print('\nWarnings:') 222 for warning in warnings: 223 print(' ' + warning) 224 225 print('\nHere we go!\n') 226 227 228 def print_build_identifiers(platform, build_selector): 229 if platform == 'linux': 230 python_configurations = cibuildwheel.linux.get_python_configurations(build_selector) 231 elif platform == 'windows': 232 python_configurations = cibuildwheel.windows.get_python_configurations(build_selector) 233 elif platform == 'macos': 234 python_configurations = cibuildwheel.macos.get_python_configurations(build_selector) 235 else: 236 python_configurations = [] 237 238 for config in python_configurations: 239 print(config.identifier) 240 241 242 def detect_warnings(platform, build_options): 243 warnings = [] 244 245 # warn about deprecated {python} and {pip} 246 for option_name in ['test_command', 'before_build']: 247 option_value = build_options.get(option_name) 248 249 if option_value: 250 if '{python}' in option_value or '{pip}' in option_value: 251 warnings.append(option_name + ": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.") 252 253 return warnings 254 255 256 if __name__ == '__main__': 257 main() 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py --- a/cibuildwheel/__main__.py +++ b/cibuildwheel/__main__.py @@ -24,6 +24,12 @@ return os.environ.get(option_name, default) +def strtobool(val): + if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'): + return True + return False + + def main(): parser = argparse.ArgumentParser( description='Build wheels for all the platforms.', @@ -38,7 +44,7 @@ 'script is going to automatically install MacPython on your system, ' 'so don\'t run on your development machine. For "windows", you need to ' 'run in Windows, and it will build and test for all versions of ' - 'Python at C:\\PythonXX[-x64]. Default: auto.')) + 'Python. Default: auto.')) parser.add_argument('--output-dir', default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'), help='Destination folder for the wheels.') @@ -59,36 +65,19 @@ if args.platform != 'auto': platform = args.platform else: - platform = None - - if os.environ.get('TRAVIS_OS_NAME') == 'linux': - platform = 'linux' - elif os.environ.get('TRAVIS_OS_NAME') == 'osx': - platform = 'macos' - elif os.environ.get('TRAVIS_OS_NAME') == 'windows': - platform = 'windows' - elif 'APPVEYOR' in os.environ: - platform = 'windows' - elif 'BITRISE_BUILD_NUMBER' in os.environ: - platform = 'macos' - elif os.environ.get('CIRCLECI'): + ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ + if ci: if sys.platform.startswith('linux'): platform = 'linux' - elif sys.platform.startswith('darwin'): + elif sys.platform == 'darwin': platform = 'macos' - elif 'AZURE_HTTP_USER_AGENT' in os.environ: - if os.environ['AGENT_OS'] == 'Linux': - platform = 'linux' - elif os.environ['AGENT_OS'] == 'Darwin': - platform = 'macos' - elif os.environ['AGENT_OS'] == 'Windows_NT': + elif sys.platform == 'win32': platform = 'windows' - if platform is None: print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, ' - 'Travis CI, AppVeyor, and CircleCI are supported. You can run on your development ' - 'machine using the --platform argument. Check --help output for more ' - 'information.', + 'Travis CI, AppVeyor, Azure Pipelines and CircleCI are supported. You can run on your ' + 'development machine or other CI providers using the --platform argument. Check --help ' + 'output for more information.', file=sys.stderr) exit(2)
{"golden_diff": "diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py\n--- a/cibuildwheel/__main__.py\n+++ b/cibuildwheel/__main__.py\n@@ -24,6 +24,12 @@\n return os.environ.get(option_name, default)\n \n \n+def strtobool(val):\n+ if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):\n+ return True\n+ return False\n+\n+\n def main():\n parser = argparse.ArgumentParser(\n description='Build wheels for all the platforms.',\n@@ -38,7 +44,7 @@\n 'script is going to automatically install MacPython on your system, '\n 'so don\\'t run on your development machine. For \"windows\", you need to '\n 'run in Windows, and it will build and test for all versions of '\n- 'Python at C:\\\\PythonXX[-x64]. Default: auto.'))\n+ 'Python. Default: auto.'))\n parser.add_argument('--output-dir',\n default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),\n help='Destination folder for the wheels.')\n@@ -59,36 +65,19 @@\n if args.platform != 'auto':\n platform = args.platform\n else:\n- platform = None\n-\n- if os.environ.get('TRAVIS_OS_NAME') == 'linux':\n- platform = 'linux'\n- elif os.environ.get('TRAVIS_OS_NAME') == 'osx':\n- platform = 'macos'\n- elif os.environ.get('TRAVIS_OS_NAME') == 'windows':\n- platform = 'windows'\n- elif 'APPVEYOR' in os.environ:\n- platform = 'windows'\n- elif 'BITRISE_BUILD_NUMBER' in os.environ:\n- platform = 'macos'\n- elif os.environ.get('CIRCLECI'):\n+ ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ\n+ if ci:\n if sys.platform.startswith('linux'):\n platform = 'linux'\n- elif sys.platform.startswith('darwin'):\n+ elif sys.platform == 'darwin':\n platform = 'macos'\n- elif 'AZURE_HTTP_USER_AGENT' in os.environ:\n- if os.environ['AGENT_OS'] == 'Linux':\n- platform = 'linux'\n- elif os.environ['AGENT_OS'] == 'Darwin':\n- platform = 'macos'\n- elif os.environ['AGENT_OS'] == 'Windows_NT':\n+ elif sys.platform == 'win32':\n platform = 'windows'\n-\n if platform is None:\n print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '\n- 'Travis CI, AppVeyor, and CircleCI are supported. You can run on your development '\n- 'machine using the --platform argument. Check --help output for more '\n- 'information.',\n+ 'Travis CI, AppVeyor, Azure Pipelines and CircleCI are supported. You can run on your '\n+ 'development machine or other CI providers using the --platform argument. Check --help '\n+ 'output for more information.',\n file=sys.stderr)\n exit(2)\n", "issue": "Linux support in AppVeyor\nFYI\r\nhttps://www.appveyor.com/blog/2018/03/06/appveyor-for-linux/\r\nhttps://www.appveyor.com/blog/2018/05/15/appveyor-for-linux-is-generally-available/\r\nhttps://www.appveyor.com/docs/getting-started-with-appveyor-for-linux/\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse, os, subprocess, sys, textwrap\n\nimport cibuildwheel\nimport cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos\nfrom cibuildwheel.environment import parse_environment, EnvironmentParseError\nfrom cibuildwheel.util import BuildSelector, Unbuffered\n\ndef get_option_from_environment(option_name, platform=None, default=None):\n '''\n Returns an option from the environment, optionally scoped by the platform.\n\n Example:\n get_option_from_environment('CIBW_COLOR', platform='macos')\n\n This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of\n CIBW_COLOR.\n '''\n if platform:\n option = os.environ.get('%s_%s' % (option_name, platform.upper()))\n if option is not None:\n return option\n\n return os.environ.get(option_name, default)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Build wheels for all the platforms.',\n epilog=('Most options are supplied via environment variables. '\n 'See https://github.com/joerick/cibuildwheel#options for info.'))\n\n parser.add_argument('--platform',\n choices=['auto', 'linux', 'macos', 'windows'],\n default=os.environ.get('CIBW_PLATFORM', 'auto'),\n help=('Platform to build for. For \"linux\" you need docker running, on Mac '\n 'or Linux. For \"macos\", you need a Mac machine, and note that this '\n 'script is going to automatically install MacPython on your system, '\n 'so don\\'t run on your development machine. For \"windows\", you need to '\n 'run in Windows, and it will build and test for all versions of '\n 'Python at C:\\\\PythonXX[-x64]. Default: auto.'))\n parser.add_argument('--output-dir',\n default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),\n help='Destination folder for the wheels.')\n parser.add_argument('project_dir',\n default='.',\n nargs='?',\n help=('Path to the project that you want wheels for. Default: the current '\n 'directory.'))\n\n parser.add_argument('--print-build-identifiers',\n action='store_true',\n help='Print the build identifiers matched by the current invocation and exit.')\n\n args = parser.parse_args()\n\n detect_obsolete_options()\n\n if args.platform != 'auto':\n platform = args.platform\n else:\n platform = None\n\n if os.environ.get('TRAVIS_OS_NAME') == 'linux':\n platform = 'linux'\n elif os.environ.get('TRAVIS_OS_NAME') == 'osx':\n platform = 'macos'\n elif os.environ.get('TRAVIS_OS_NAME') == 'windows':\n platform = 'windows'\n elif 'APPVEYOR' in os.environ:\n platform = 'windows'\n elif 'BITRISE_BUILD_NUMBER' in os.environ:\n platform = 'macos'\n elif os.environ.get('CIRCLECI'):\n if sys.platform.startswith('linux'):\n platform = 'linux'\n elif sys.platform.startswith('darwin'):\n platform = 'macos'\n elif 'AZURE_HTTP_USER_AGENT' in os.environ:\n if os.environ['AGENT_OS'] == 'Linux':\n platform = 'linux'\n elif os.environ['AGENT_OS'] == 'Darwin':\n platform = 'macos'\n elif os.environ['AGENT_OS'] == 'Windows_NT':\n platform = 'windows'\n\n if platform is None:\n print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '\n 'Travis CI, AppVeyor, and CircleCI are supported. You can run on your development '\n 'machine using the --platform argument. Check --help output for more '\n 'information.',\n file=sys.stderr)\n exit(2)\n\n output_dir = args.output_dir\n test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform)\n test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split()\n test_extras = get_option_from_environment('CIBW_TEST_EXTRAS', platform=platform, default='')\n project_dir = args.project_dir\n before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform)\n build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='')\n build_config, skip_config = os.environ.get('CIBW_BUILD', '*'), os.environ.get('CIBW_SKIP', '')\n environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='')\n\n if test_extras:\n test_extras = '[{0}]'.format(test_extras)\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity)))\n except ValueError:\n build_verbosity = 0\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError) as e:\n print('cibuildwheel: Malformed environment option \"%s\"' % environment_config, file=sys.stderr)\n import traceback\n traceback.print_exc(None, sys.stderr)\n exit(2)\n\n build_selector = BuildSelector(build_config, skip_config)\n\n # Add CIBUILDWHEEL environment variable\n # This needs to be passed on to the docker container in linux.py\n os.environ['CIBUILDWHEEL'] = '1'\n\n if not os.path.exists(os.path.join(project_dir, 'setup.py')):\n print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)\n exit(2)\n\n if args.print_build_identifiers:\n print_build_identifiers(platform, build_selector)\n exit(0)\n\n build_options = dict(\n project_dir=project_dir,\n output_dir=output_dir,\n test_command=test_command,\n test_requires=test_requires,\n test_extras=test_extras,\n before_build=before_build,\n build_verbosity=build_verbosity,\n build_selector=build_selector,\n environment=environment,\n )\n\n if platform == 'linux':\n manylinux_x86_64_image = os.environ.get('CIBW_MANYLINUX_X86_64_IMAGE', 'manylinux2010')\n manylinux_i686_image = os.environ.get('CIBW_MANYLINUX_I686_IMAGE', 'manylinux2010')\n\n default_manylinux_images_x86_64 = {'manylinux1': 'quay.io/pypa/manylinux1_x86_64',\n 'manylinux2010': 'quay.io/pypa/manylinux2010_x86_64'}\n default_manylinux_images_i686 = {'manylinux1': 'quay.io/pypa/manylinux1_i686',\n 'manylinux2010': 'quay.io/pypa/manylinux2010_i686'}\n\n build_options.update(\n manylinux_images={'x86_64': default_manylinux_images_x86_64.get(manylinux_x86_64_image) or manylinux_x86_64_image,\n 'i686': default_manylinux_images_i686.get(manylinux_i686_image) or manylinux_i686_image},\n )\n elif platform == 'macos':\n pass\n elif platform == 'windows':\n pass\n\n # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print'\n sys.stdout = Unbuffered(sys.stdout)\n\n print_preamble(platform, build_options)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if platform == 'linux':\n cibuildwheel.linux.build(**build_options)\n elif platform == 'windows':\n cibuildwheel.windows.build(**build_options)\n elif platform == 'macos':\n cibuildwheel.macos.build(**build_options)\n else:\n raise Exception('Unsupported platform')\n\n\ndef detect_obsolete_options():\n # Check the old 'MANYLINUX1_*_IMAGE' options\n for (deprecated, alternative) in [('CIBW_MANYLINUX1_X86_64_IMAGE', 'CIBW_MANYLINUX_X86_64_IMAGE'),\n ('CIBW_MANYLINUX1_I686_IMAGE', 'CIBW_MANYLINUX_I686_IMAGE')]:\n if deprecated in os.environ:\n print(\"'{}' has been deprecated, and will be removed in a future release. Use the option '{}' instead.\".format(deprecated, alternative))\n if alternative not in os.environ:\n print(\"Using value of option '{}' as replacement for '{}'\".format(deprecated, alternative))\n os.environ[alternative] = os.environ[deprecated]\n else:\n print(\"Option '{}' is not empty. Please unset '{}'\".format(alternative, deprecated))\n exit(2)\n\n # Check for 'manylinux1' in the 'CIBW_BUILD' and 'CIBW_SKIP' options\n for deprecated in ['CIBW_BUILD', 'CIBW_SKIP']:\n if deprecated in os.environ and 'manylinux1' in os.environ[deprecated]:\n print(\"Build identifiers with 'manylinux1' been deprecated. Replacing all occurences of 'manylinux1' by 'manylinux' in the option '{}'\".format(deprecated))\n os.environ[deprecated] = os.environ[deprecated].replace('manylinux1', 'manylinux')\n\n\ndef print_preamble(platform, build_options):\n print(textwrap.dedent('''\n _ _ _ _ _ _ _\n ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| |\n | _| | . | | | | | . | | | | | -_| -_| |\n |___|_|___|___|_|_|___|_____|_|_|___|___|_|\n '''))\n\n print('cibuildwheel version %s\\n' % cibuildwheel.__version__)\n\n\n print('Build options:')\n print(' platform: %r' % platform)\n for option, value in sorted(build_options.items()):\n print(' %s: %r' % (option, value))\n\n warnings = detect_warnings(platform, build_options)\n if warnings:\n print('\\nWarnings:')\n for warning in warnings:\n print(' ' + warning)\n\n print('\\nHere we go!\\n')\n\n\ndef print_build_identifiers(platform, build_selector):\n if platform == 'linux':\n python_configurations = cibuildwheel.linux.get_python_configurations(build_selector)\n elif platform == 'windows':\n python_configurations = cibuildwheel.windows.get_python_configurations(build_selector)\n elif platform == 'macos':\n python_configurations = cibuildwheel.macos.get_python_configurations(build_selector)\n else:\n python_configurations = []\n\n for config in python_configurations:\n print(config.identifier)\n\n\ndef detect_warnings(platform, build_options):\n warnings = []\n\n # warn about deprecated {python} and {pip}\n for option_name in ['test_command', 'before_build']:\n option_value = build_options.get(option_name)\n\n if option_value:\n if '{python}' in option_value or '{pip}' in option_value:\n warnings.append(option_name + \": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.\")\n\n return warnings\n\n\nif __name__ == '__main__':\n main()\n", "path": "cibuildwheel/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse, os, subprocess, sys, textwrap\n\nimport cibuildwheel\nimport cibuildwheel.linux, cibuildwheel.windows, cibuildwheel.macos\nfrom cibuildwheel.environment import parse_environment, EnvironmentParseError\nfrom cibuildwheel.util import BuildSelector, Unbuffered\n\ndef get_option_from_environment(option_name, platform=None, default=None):\n '''\n Returns an option from the environment, optionally scoped by the platform.\n\n Example:\n get_option_from_environment('CIBW_COLOR', platform='macos')\n\n This will return the value of CIBW_COLOR_MACOS if it exists, otherwise the value of\n CIBW_COLOR.\n '''\n if platform:\n option = os.environ.get('%s_%s' % (option_name, platform.upper()))\n if option is not None:\n return option\n\n return os.environ.get(option_name, default)\n\n\ndef strtobool(val):\n if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n return False\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Build wheels for all the platforms.',\n epilog=('Most options are supplied via environment variables. '\n 'See https://github.com/joerick/cibuildwheel#options for info.'))\n\n parser.add_argument('--platform',\n choices=['auto', 'linux', 'macos', 'windows'],\n default=os.environ.get('CIBW_PLATFORM', 'auto'),\n help=('Platform to build for. For \"linux\" you need docker running, on Mac '\n 'or Linux. For \"macos\", you need a Mac machine, and note that this '\n 'script is going to automatically install MacPython on your system, '\n 'so don\\'t run on your development machine. For \"windows\", you need to '\n 'run in Windows, and it will build and test for all versions of '\n 'Python. Default: auto.'))\n parser.add_argument('--output-dir',\n default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),\n help='Destination folder for the wheels.')\n parser.add_argument('project_dir',\n default='.',\n nargs='?',\n help=('Path to the project that you want wheels for. Default: the current '\n 'directory.'))\n\n parser.add_argument('--print-build-identifiers',\n action='store_true',\n help='Print the build identifiers matched by the current invocation and exit.')\n\n args = parser.parse_args()\n\n detect_obsolete_options()\n\n if args.platform != 'auto':\n platform = args.platform\n else:\n ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ\n if ci:\n if sys.platform.startswith('linux'):\n platform = 'linux'\n elif sys.platform == 'darwin':\n platform = 'macos'\n elif sys.platform == 'win32':\n platform = 'windows'\n if platform is None:\n print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '\n 'Travis CI, AppVeyor, Azure Pipelines and CircleCI are supported. You can run on your '\n 'development machine or other CI providers using the --platform argument. Check --help '\n 'output for more information.',\n file=sys.stderr)\n exit(2)\n\n output_dir = args.output_dir\n test_command = get_option_from_environment('CIBW_TEST_COMMAND', platform=platform)\n test_requires = get_option_from_environment('CIBW_TEST_REQUIRES', platform=platform, default='').split()\n test_extras = get_option_from_environment('CIBW_TEST_EXTRAS', platform=platform, default='')\n project_dir = args.project_dir\n before_build = get_option_from_environment('CIBW_BEFORE_BUILD', platform=platform)\n build_verbosity = get_option_from_environment('CIBW_BUILD_VERBOSITY', platform=platform, default='')\n build_config, skip_config = os.environ.get('CIBW_BUILD', '*'), os.environ.get('CIBW_SKIP', '')\n environment_config = get_option_from_environment('CIBW_ENVIRONMENT', platform=platform, default='')\n\n if test_extras:\n test_extras = '[{0}]'.format(test_extras)\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity)))\n except ValueError:\n build_verbosity = 0\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError) as e:\n print('cibuildwheel: Malformed environment option \"%s\"' % environment_config, file=sys.stderr)\n import traceback\n traceback.print_exc(None, sys.stderr)\n exit(2)\n\n build_selector = BuildSelector(build_config, skip_config)\n\n # Add CIBUILDWHEEL environment variable\n # This needs to be passed on to the docker container in linux.py\n os.environ['CIBUILDWHEEL'] = '1'\n\n if not os.path.exists(os.path.join(project_dir, 'setup.py')):\n print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)\n exit(2)\n\n if args.print_build_identifiers:\n print_build_identifiers(platform, build_selector)\n exit(0)\n\n build_options = dict(\n project_dir=project_dir,\n output_dir=output_dir,\n test_command=test_command,\n test_requires=test_requires,\n test_extras=test_extras,\n before_build=before_build,\n build_verbosity=build_verbosity,\n build_selector=build_selector,\n environment=environment,\n )\n\n if platform == 'linux':\n manylinux_x86_64_image = os.environ.get('CIBW_MANYLINUX_X86_64_IMAGE', 'manylinux2010')\n manylinux_i686_image = os.environ.get('CIBW_MANYLINUX_I686_IMAGE', 'manylinux2010')\n\n default_manylinux_images_x86_64 = {'manylinux1': 'quay.io/pypa/manylinux1_x86_64',\n 'manylinux2010': 'quay.io/pypa/manylinux2010_x86_64'}\n default_manylinux_images_i686 = {'manylinux1': 'quay.io/pypa/manylinux1_i686',\n 'manylinux2010': 'quay.io/pypa/manylinux2010_i686'}\n\n build_options.update(\n manylinux_images={'x86_64': default_manylinux_images_x86_64.get(manylinux_x86_64_image) or manylinux_x86_64_image,\n 'i686': default_manylinux_images_i686.get(manylinux_i686_image) or manylinux_i686_image},\n )\n elif platform == 'macos':\n pass\n elif platform == 'windows':\n pass\n\n # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print'\n sys.stdout = Unbuffered(sys.stdout)\n\n print_preamble(platform, build_options)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if platform == 'linux':\n cibuildwheel.linux.build(**build_options)\n elif platform == 'windows':\n cibuildwheel.windows.build(**build_options)\n elif platform == 'macos':\n cibuildwheel.macos.build(**build_options)\n else:\n raise Exception('Unsupported platform')\n\n\ndef detect_obsolete_options():\n # Check the old 'MANYLINUX1_*_IMAGE' options\n for (deprecated, alternative) in [('CIBW_MANYLINUX1_X86_64_IMAGE', 'CIBW_MANYLINUX_X86_64_IMAGE'),\n ('CIBW_MANYLINUX1_I686_IMAGE', 'CIBW_MANYLINUX_I686_IMAGE')]:\n if deprecated in os.environ:\n print(\"'{}' has been deprecated, and will be removed in a future release. Use the option '{}' instead.\".format(deprecated, alternative))\n if alternative not in os.environ:\n print(\"Using value of option '{}' as replacement for '{}'\".format(deprecated, alternative))\n os.environ[alternative] = os.environ[deprecated]\n else:\n print(\"Option '{}' is not empty. Please unset '{}'\".format(alternative, deprecated))\n exit(2)\n\n # Check for 'manylinux1' in the 'CIBW_BUILD' and 'CIBW_SKIP' options\n for deprecated in ['CIBW_BUILD', 'CIBW_SKIP']:\n if deprecated in os.environ and 'manylinux1' in os.environ[deprecated]:\n print(\"Build identifiers with 'manylinux1' been deprecated. Replacing all occurences of 'manylinux1' by 'manylinux' in the option '{}'\".format(deprecated))\n os.environ[deprecated] = os.environ[deprecated].replace('manylinux1', 'manylinux')\n\n\ndef print_preamble(platform, build_options):\n print(textwrap.dedent('''\n _ _ _ _ _ _ _\n ___|_| |_ _ _|_| |_| |_ _ _| |_ ___ ___| |\n | _| | . | | | | | . | | | | | -_| -_| |\n |___|_|___|___|_|_|___|_____|_|_|___|___|_|\n '''))\n\n print('cibuildwheel version %s\\n' % cibuildwheel.__version__)\n\n\n print('Build options:')\n print(' platform: %r' % platform)\n for option, value in sorted(build_options.items()):\n print(' %s: %r' % (option, value))\n\n warnings = detect_warnings(platform, build_options)\n if warnings:\n print('\\nWarnings:')\n for warning in warnings:\n print(' ' + warning)\n\n print('\\nHere we go!\\n')\n\n\ndef print_build_identifiers(platform, build_selector):\n if platform == 'linux':\n python_configurations = cibuildwheel.linux.get_python_configurations(build_selector)\n elif platform == 'windows':\n python_configurations = cibuildwheel.windows.get_python_configurations(build_selector)\n elif platform == 'macos':\n python_configurations = cibuildwheel.macos.get_python_configurations(build_selector)\n else:\n python_configurations = []\n\n for config in python_configurations:\n print(config.identifier)\n\n\ndef detect_warnings(platform, build_options):\n warnings = []\n\n # warn about deprecated {python} and {pip}\n for option_name in ['test_command', 'before_build']:\n option_value = build_options.get(option_name)\n\n if option_value:\n if '{python}' in option_value or '{pip}' in option_value:\n warnings.append(option_name + \": '{python}' and '{pip}' are no longer needed, and will be removed in a future release. Simply use 'python' or 'pip' instead.\")\n\n return warnings\n\n\nif __name__ == '__main__':\n main()\n", "path": "cibuildwheel/__main__.py"}]}
3,564
732
gh_patches_debug_15830
rasdani/github-patches
git_diff
Parsl__parsl-666
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add "all" install option We have several subpackages now. I think it would be good if we had an option to install all subpackages. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 3 with open('parsl/version.py') as f: 4 exec(f.read()) 5 6 with open('requirements.txt') as f: 7 install_requires = f.readlines() 8 9 setup( 10 name='parsl', 11 version=VERSION, 12 description='Simple data dependent workflows in Python', 13 long_description='Simple parallel workflows system for Python', 14 url='https://github.com/Parsl/parsl', 15 author='The Parsl Team', 16 author_email='parsl@googlegroups.com', 17 license='Apache 2.0', 18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION), 19 package_data={'': ['LICENSE']}, 20 packages=find_packages(), 21 install_requires=install_requires, 22 scripts = ['parsl/executors/high_throughput/process_worker_pool.py', 23 'parsl/executors/extreme_scale/mpi_worker_pool.py'], 24 extras_require = { 25 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'], 26 'aws' : ['boto3'], 27 'jetstream' : ['python-novaclient'], 28 'extreme_scale' : ['mpi4py'], 29 'docs' : ['nbsphinx', 'sphinx_rtd_theme'], 30 'google_cloud' : ['google-auth', 'google-api-python-client'] 31 }, 32 classifiers = [ 33 # Maturity 34 'Development Status :: 3 - Alpha', 35 # Intended audience 36 'Intended Audience :: Developers', 37 # Licence, must match with licence above 38 'License :: OSI Approved :: Apache Software License', 39 # Python versions supported 40 'Programming Language :: Python :: 3.5', 41 'Programming Language :: Python :: 3.6', 42 ], 43 keywords=['Workflows', 'Scientific computing'], 44 ) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,10 +24,17 @@ extras_require = { 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'], 'aws' : ['boto3'], - 'jetstream' : ['python-novaclient'], + # Jetstream is deprecated since the interface has not been maintained. + # 'jetstream' : ['python-novaclient'], 'extreme_scale' : ['mpi4py'], 'docs' : ['nbsphinx', 'sphinx_rtd_theme'], - 'google_cloud' : ['google-auth', 'google-api-python-client'] + 'google_cloud' : ['google-auth', 'google-api-python-client'], + 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy', + 'boto3', + 'mpi4py', + 'nbsphinx', 'sphinx_rtd_theme', + 'google-auth', 'google-api-python-client'] + }, classifiers = [ # Maturity
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,10 +24,17 @@\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n- 'jetstream' : ['python-novaclient'],\n+ # Jetstream is deprecated since the interface has not been maintained.\n+ # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n- 'google_cloud' : ['google-auth', 'google-api-python-client']\n+ 'google_cloud' : ['google-auth', 'google-api-python-client'],\n+ 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n+ 'boto3',\n+ 'mpi4py',\n+ 'nbsphinx', 'sphinx_rtd_theme',\n+ 'google-auth', 'google-api-python-client']\n+\n },\n classifiers = [\n # Maturity\n", "issue": "Add \"all\" install option\nWe have several subpackages now. I think it would be good if we had an option to install all subpackages. \n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='parsl@googlegroups.com',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py'],\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='parsl@googlegroups.com',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py'],\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n # Jetstream is deprecated since the interface has not been maintained.\n # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n 'boto3',\n 'mpi4py',\n 'nbsphinx', 'sphinx_rtd_theme',\n 'google-auth', 'google-api-python-client']\n\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]}
755
244
gh_patches_debug_15111
rasdani/github-patches
git_diff
wagtail__wagtail-2621
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add default icon for TableBlock As per https://github.com/torchbox/wagtail/pull/1705#issuecomment-216053655. Best to do this after #2417 is merged, to avoid conflicts. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/contrib/table_block/fields.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 3 import json 4 5 from django import forms 6 from django.template.loader import render_to_string 7 from django.utils import translation 8 from django.utils.functional import cached_property 9 10 from wagtail.utils.widgets import WidgetWithScript 11 from wagtail.wagtailcore.blocks import FieldBlock 12 13 14 class TableInput(WidgetWithScript, forms.HiddenInput): 15 16 def __init__(self, table_options=None, attrs=None): 17 self.table_options = table_options 18 super(TableInput, self).__init__(attrs=attrs) 19 20 def render(self, name, value, attrs=None): 21 original_field_html = super(TableInput, self).render(name, value, attrs) 22 return render_to_string("table_block/widgets/table.html", { 23 'original_field_html': original_field_html, 24 'attrs': attrs, 25 'value': value, 26 }) 27 28 def render_js_init(self, id_, name, value): 29 return "initTable({0}, {1});".format(json.dumps(id_), json.dumps(self.table_options)) 30 31 32 33 class TableBlock(FieldBlock): 34 def __init__(self, required=True, help_text=None, table_options=None, **kwargs): 35 # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality 36 # natively (via 'label' and 'default') 37 # CharField's 'max_length' and 'min_length' parameters are not exposed as table data needs to 38 # have arbitrary length 39 # table_options can contain any valid handsontable options: http://docs.handsontable.com/0.18.0/Options.html 40 self.field_options = {'required': required, 'help_text': help_text} 41 42 language = translation.get_language() 43 if language is not None and len(language) > 2: 44 language = language[:2] 45 46 default_table_options = { 47 'minSpareRows': 0, 48 'startRows': 3, 49 'startCols': 3, 50 'colHeaders': False, 51 'rowHeaders': False, 52 'contextMenu': True, 53 'editor': 'text', 54 'stretchH': 'all', 55 'height': 108, 56 'language': language, 57 'renderer': 'text', 58 'autoColumnSize': False, 59 } 60 if table_options is not None: 61 default_table_options.update(table_options) 62 self.table_options = default_table_options 63 super(TableBlock, self).__init__(**kwargs) 64 65 @cached_property 66 def field(self): 67 return forms.CharField(widget=TableInput(table_options=self.table_options), **self.field_options) 68 69 def value_from_form(self, value): 70 return json.loads(value) 71 72 def value_for_form(self, value): 73 return json.dumps(value) 74 75 def is_html_renderer(self): 76 return self.table_options['renderer'] == 'html' 77 78 def render(self, value): 79 template = getattr(self.meta, 'template', None) 80 if template and value: 81 table_header = value['data'][0] if value.get('data', None) and len(value['data']) > 0 and value.get('first_row_is_table_header', False) else None 82 first_col_is_header = value.get('first_col_is_header', False) 83 context = { 84 'self': value, 85 self.TEMPLATE_VAR: value, 86 'table_header': table_header, 87 'first_col_is_header': first_col_is_header, 88 'html_renderer': self.is_html_renderer(), 89 'data': value['data'][1:] if table_header else value.get('data', []) 90 } 91 return render_to_string(template, context) 92 else: 93 return self.render_basic(value) 94 95 @property 96 def media(self): 97 return forms.Media( 98 css={'all': ['table_block/css/vendor/handsontable-0.24.2.full.min.css']}, 99 js=['table_block/js/vendor/handsontable-0.24.2.full.min.js', 'table_block/js/table.js'] 100 ) 101 102 class Meta: 103 default = None 104 template = 'table_block/blocks/table.html' 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/contrib/table_block/fields.py b/wagtail/contrib/table_block/fields.py --- a/wagtail/contrib/table_block/fields.py +++ b/wagtail/contrib/table_block/fields.py @@ -29,7 +29,6 @@ return "initTable({0}, {1});".format(json.dumps(id_), json.dumps(self.table_options)) - class TableBlock(FieldBlock): def __init__(self, required=True, help_text=None, table_options=None, **kwargs): # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality @@ -102,3 +101,4 @@ class Meta: default = None template = 'table_block/blocks/table.html' + icon = "table"
{"golden_diff": "diff --git a/wagtail/contrib/table_block/fields.py b/wagtail/contrib/table_block/fields.py\n--- a/wagtail/contrib/table_block/fields.py\n+++ b/wagtail/contrib/table_block/fields.py\n@@ -29,7 +29,6 @@\n return \"initTable({0}, {1});\".format(json.dumps(id_), json.dumps(self.table_options))\n \n \n-\n class TableBlock(FieldBlock):\n def __init__(self, required=True, help_text=None, table_options=None, **kwargs):\n # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality\n@@ -102,3 +101,4 @@\n class Meta:\n default = None\n template = 'table_block/blocks/table.html'\n+ icon = \"table\"\n", "issue": "Add default icon for TableBlock\nAs per https://github.com/torchbox/wagtail/pull/1705#issuecomment-216053655. Best to do this after #2417 is merged, to avoid conflicts.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django import forms\nfrom django.template.loader import render_to_string\nfrom django.utils import translation\nfrom django.utils.functional import cached_property\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.blocks import FieldBlock\n\n\nclass TableInput(WidgetWithScript, forms.HiddenInput):\n\n def __init__(self, table_options=None, attrs=None):\n self.table_options = table_options\n super(TableInput, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n original_field_html = super(TableInput, self).render(name, value, attrs)\n return render_to_string(\"table_block/widgets/table.html\", {\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n })\n\n def render_js_init(self, id_, name, value):\n return \"initTable({0}, {1});\".format(json.dumps(id_), json.dumps(self.table_options))\n\n\n\nclass TableBlock(FieldBlock):\n def __init__(self, required=True, help_text=None, table_options=None, **kwargs):\n # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality\n # natively (via 'label' and 'default')\n # CharField's 'max_length' and 'min_length' parameters are not exposed as table data needs to\n # have arbitrary length\n # table_options can contain any valid handsontable options: http://docs.handsontable.com/0.18.0/Options.html\n self.field_options = {'required': required, 'help_text': help_text}\n\n language = translation.get_language()\n if language is not None and len(language) > 2:\n language = language[:2]\n\n default_table_options = {\n 'minSpareRows': 0,\n 'startRows': 3,\n 'startCols': 3,\n 'colHeaders': False,\n 'rowHeaders': False,\n 'contextMenu': True,\n 'editor': 'text',\n 'stretchH': 'all',\n 'height': 108,\n 'language': language,\n 'renderer': 'text',\n 'autoColumnSize': False,\n }\n if table_options is not None:\n default_table_options.update(table_options)\n self.table_options = default_table_options\n super(TableBlock, self).__init__(**kwargs)\n\n @cached_property\n def field(self):\n return forms.CharField(widget=TableInput(table_options=self.table_options), **self.field_options)\n\n def value_from_form(self, value):\n return json.loads(value)\n\n def value_for_form(self, value):\n return json.dumps(value)\n\n def is_html_renderer(self):\n return self.table_options['renderer'] == 'html'\n\n def render(self, value):\n template = getattr(self.meta, 'template', None)\n if template and value:\n table_header = value['data'][0] if value.get('data', None) and len(value['data']) > 0 and value.get('first_row_is_table_header', False) else None\n first_col_is_header = value.get('first_col_is_header', False)\n context = {\n 'self': value,\n self.TEMPLATE_VAR: value,\n 'table_header': table_header,\n 'first_col_is_header': first_col_is_header,\n 'html_renderer': self.is_html_renderer(),\n 'data': value['data'][1:] if table_header else value.get('data', [])\n }\n return render_to_string(template, context)\n else:\n return self.render_basic(value)\n\n @property\n def media(self):\n return forms.Media(\n css={'all': ['table_block/css/vendor/handsontable-0.24.2.full.min.css']},\n js=['table_block/js/vendor/handsontable-0.24.2.full.min.js', 'table_block/js/table.js']\n )\n\n class Meta:\n default = None\n template = 'table_block/blocks/table.html'\n", "path": "wagtail/contrib/table_block/fields.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django import forms\nfrom django.template.loader import render_to_string\nfrom django.utils import translation\nfrom django.utils.functional import cached_property\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.blocks import FieldBlock\n\n\nclass TableInput(WidgetWithScript, forms.HiddenInput):\n\n def __init__(self, table_options=None, attrs=None):\n self.table_options = table_options\n super(TableInput, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n original_field_html = super(TableInput, self).render(name, value, attrs)\n return render_to_string(\"table_block/widgets/table.html\", {\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n })\n\n def render_js_init(self, id_, name, value):\n return \"initTable({0}, {1});\".format(json.dumps(id_), json.dumps(self.table_options))\n\n\nclass TableBlock(FieldBlock):\n def __init__(self, required=True, help_text=None, table_options=None, **kwargs):\n # CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality\n # natively (via 'label' and 'default')\n # CharField's 'max_length' and 'min_length' parameters are not exposed as table data needs to\n # have arbitrary length\n # table_options can contain any valid handsontable options: http://docs.handsontable.com/0.18.0/Options.html\n self.field_options = {'required': required, 'help_text': help_text}\n\n language = translation.get_language()\n if language is not None and len(language) > 2:\n language = language[:2]\n\n default_table_options = {\n 'minSpareRows': 0,\n 'startRows': 3,\n 'startCols': 3,\n 'colHeaders': False,\n 'rowHeaders': False,\n 'contextMenu': True,\n 'editor': 'text',\n 'stretchH': 'all',\n 'height': 108,\n 'language': language,\n 'renderer': 'text',\n 'autoColumnSize': False,\n }\n if table_options is not None:\n default_table_options.update(table_options)\n self.table_options = default_table_options\n super(TableBlock, self).__init__(**kwargs)\n\n @cached_property\n def field(self):\n return forms.CharField(widget=TableInput(table_options=self.table_options), **self.field_options)\n\n def value_from_form(self, value):\n return json.loads(value)\n\n def value_for_form(self, value):\n return json.dumps(value)\n\n def is_html_renderer(self):\n return self.table_options['renderer'] == 'html'\n\n def render(self, value):\n template = getattr(self.meta, 'template', None)\n if template and value:\n table_header = value['data'][0] if value.get('data', None) and len(value['data']) > 0 and value.get('first_row_is_table_header', False) else None\n first_col_is_header = value.get('first_col_is_header', False)\n context = {\n 'self': value,\n self.TEMPLATE_VAR: value,\n 'table_header': table_header,\n 'first_col_is_header': first_col_is_header,\n 'html_renderer': self.is_html_renderer(),\n 'data': value['data'][1:] if table_header else value.get('data', [])\n }\n return render_to_string(template, context)\n else:\n return self.render_basic(value)\n\n @property\n def media(self):\n return forms.Media(\n css={'all': ['table_block/css/vendor/handsontable-0.24.2.full.min.css']},\n js=['table_block/js/vendor/handsontable-0.24.2.full.min.js', 'table_block/js/table.js']\n )\n\n class Meta:\n default = None\n template = 'table_block/blocks/table.html'\n icon = \"table\"\n", "path": "wagtail/contrib/table_block/fields.py"}]}
1,409
180
gh_patches_debug_17716
rasdani/github-patches
git_diff
beetbox__beets-4900
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- scrub: The `auto` option should be a no-op when `import.write` is disabled ### Problem I was importing new tracks with `import.write` disabled and seeing all tracks populated with new tags. With help from the [forum](https://discourse.beets.io/t/tags-are-being-written-to-imported-files-even-though-import-write-no/2068/4), I was able to determine that with `scrub.auto` enabled and `import.write` disabled, the scrub plugin removes tags and rewrites them from the database. The [scrub documentation](https://beets.readthedocs.io/en/stable/plugins/scrub.html#automatic-scrubbing) indicates that scrubbing on import is only active if `import.write` is also enabled. > When importing new files (with import.write turned on) or modifying files’ tags with the beet modify command, beets will first strip all types of tags entirely and then write the database-tracked metadata to the file. I'd think a resolution could be: - Update the documentation to indicate scrub doesn't read the `import.write` setting. - Or scrub _should_ read that setting. - Or, even better, have a separate scrub configuration that disables restoring tags after the scrub. ### Setup * OS: Docker https://hub.docker.com/r/linuxserver/beets * Python version: 3.9.7 * beets version: 1.6.0 * Turning off plugins made problem go away (yes/no): yes My configuration (output of `beet config`) is: ```yaml lyrics: bing_lang_from: [] auto: no fallback: '' sources: genius bing_client_secret: REDACTED bing_lang_to: google_API_key: REDACTED google_engine_ID: REDACTED genius_api_key: REDACTED force: no local: no plugins: - fetchart - embedart - convert - scrub - replaygain - lyrics - lastgenre - the - inline - chroma - web - permissions directory: /music library: /config/musiclibrary.blb art_filename: _cover threaded: yes original_date: yes per_disc_numbering: yes chroma: auto: yes embedart: auto: no maxwidth: 0 compare_threshold: 0 ifempty: no remove_art_file: no quality: 0 convert: auto: no dest: /converted format: aac formats: aac: command: ffmpeg -i $source -c:a aac -vbr 4 $dest extension: m4a mp3: command: /config/gapless-mp3.sh $source $dest extension: mp3 alac: command: ffmpeg -i $source -y -vn -acodec alac $dest extension: m4a flac: ffmpeg -i $source -y -vn -acodec flac $dest opus: ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest ogg: ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest wma: ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest pretend: no link: no hardlink: no threads: 8 id3v23: inherit max_bitrate: 500 tmpdir: quiet: no embed: yes paths: {} no_convert: '' never_convert_lossy_files: no copy_album_art: no album_art_maxwidth: 0 delete_originals: no item_fields: disc_and_track: u'%02i.%02i' % (disc, track) if disctotal > 1 else u'%02i' % (track) the: a: no the: yes format: '{0}, {1}' strip: no patterns: [] paths: default: '%the{$albumartist}/$original_year - $album%aunique{}/$disc_and_track $title' singleton: '%the{$albumartist}/_singles/$original_year - $title' comp: _Compilations/$original_year - $album%aunique{}/$disc_and_track $title albumtype_soundtrack: _Soundtracks/$original_year - $album%aunique{}/$disc_and_track $title permissions: file: 664 dir: 775 import: write: no copy: no move: yes resume: ask incremental: no quiet_fallback: skip timid: no log: /config/beet.log lastgenre: auto: yes source: album whitelist: yes min_weight: 10 count: 1 fallback: canonical: no force: yes separator: ', ' prefer_specific: no title_case: yes fetchart: auto: yes store_source: yes minwidth: 600 sources: - filesystem - coverart - itunes - albumart - amazon maxwidth: 0 quality: 0 max_filesize: 0 enforce_ratio: no cautious: no cover_names: - cover - front - art - album - folder google_key: REDACTED google_engine: 001442825323518660753:hrh5ch1gjzm fanarttv_key: REDACTED lastfm_key: REDACTED high_resolution: no deinterlace: no cover_format: replaygain: auto: yes backend: ffmpeg overwrite: no threads: 8 parallel_on_import: no per_disc: no peak: 'true' targetlevel: 89 r128: [Opus] r128_targetlevel: 84 scrub: auto: yes replace: ^\.: _ '[\x00-\x1f]': _ '[<>:"\?\*\|]': _ '[\xE8-\xEB]': e '[\xEC-\xEF]': i '[\xE2-\xE6]': a '[\xF2-\xF6]': o '[\xF8]': o \.$: _ \s+$: '' web: host: 0.0.0.0 port: 8337 cors: '' cors_supports_credentials: no reverse_proxy: no include_paths: no readonly: yes pathfields: {} album_fields: {} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `beetsplug/scrub.py` Content: ``` 1 # This file is part of beets. 2 # Copyright 2016, Adrian Sampson. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Cleans extraneous metadata from files' tags via a command or 16 automatically whenever tags are written. 17 """ 18 19 20 from beets.plugins import BeetsPlugin 21 from beets import ui 22 from beets import util 23 from beets import config 24 import mediafile 25 import mutagen 26 27 _MUTAGEN_FORMATS = { 28 'asf': 'ASF', 29 'apev2': 'APEv2File', 30 'flac': 'FLAC', 31 'id3': 'ID3FileType', 32 'mp3': 'MP3', 33 'mp4': 'MP4', 34 'oggflac': 'OggFLAC', 35 'oggspeex': 'OggSpeex', 36 'oggtheora': 'OggTheora', 37 'oggvorbis': 'OggVorbis', 38 'oggopus': 'OggOpus', 39 'trueaudio': 'TrueAudio', 40 'wavpack': 'WavPack', 41 'monkeysaudio': 'MonkeysAudio', 42 'optimfrog': 'OptimFROG', 43 } 44 45 46 class ScrubPlugin(BeetsPlugin): 47 """Removes extraneous metadata from files' tags.""" 48 def __init__(self): 49 super().__init__() 50 self.config.add({ 51 'auto': True, 52 }) 53 54 if self.config['auto']: 55 self.register_listener("import_task_files", self.import_task_files) 56 57 def commands(self): 58 def scrub_func(lib, opts, args): 59 # Walk through matching files and remove tags. 60 for item in lib.items(ui.decargs(args)): 61 self._log.info('scrubbing: {0}', 62 util.displayable_path(item.path)) 63 self._scrub_item(item, opts.write) 64 65 scrub_cmd = ui.Subcommand('scrub', help='clean audio tags') 66 scrub_cmd.parser.add_option( 67 '-W', '--nowrite', dest='write', 68 action='store_false', default=True, 69 help='leave tags empty') 70 scrub_cmd.func = scrub_func 71 72 return [scrub_cmd] 73 74 @staticmethod 75 def _mutagen_classes(): 76 """Get a list of file type classes from the Mutagen module. 77 """ 78 classes = [] 79 for modname, clsname in _MUTAGEN_FORMATS.items(): 80 mod = __import__(f'mutagen.{modname}', 81 fromlist=[clsname]) 82 classes.append(getattr(mod, clsname)) 83 return classes 84 85 def _scrub(self, path): 86 """Remove all tags from a file. 87 """ 88 for cls in self._mutagen_classes(): 89 # Try opening the file with this type, but just skip in the 90 # event of any error. 91 try: 92 f = cls(util.syspath(path)) 93 except Exception: 94 continue 95 if f.tags is None: 96 continue 97 98 # Remove the tag for this type. 99 try: 100 f.delete() 101 except NotImplementedError: 102 # Some Mutagen metadata subclasses (namely, ASFTag) do not 103 # support .delete(), presumably because it is impossible to 104 # remove them. In this case, we just remove all the tags. 105 for tag in f.keys(): 106 del f[tag] 107 f.save() 108 except (OSError, mutagen.MutagenError) as exc: 109 self._log.error('could not scrub {0}: {1}', 110 util.displayable_path(path), exc) 111 112 def _scrub_item(self, item, restore=True): 113 """Remove tags from an Item's associated file and, if `restore` 114 is enabled, write the database's tags back to the file. 115 """ 116 # Get album art if we need to restore it. 117 if restore: 118 try: 119 mf = mediafile.MediaFile(util.syspath(item.path), 120 config['id3v23'].get(bool)) 121 except mediafile.UnreadableFileError as exc: 122 self._log.error('could not open file to scrub: {0}', 123 exc) 124 return 125 images = mf.images 126 127 # Remove all tags. 128 self._scrub(item.path) 129 130 # Restore tags, if enabled. 131 if restore: 132 self._log.debug('writing new tags after scrub') 133 item.try_write() 134 if images: 135 self._log.debug('restoring art') 136 try: 137 mf = mediafile.MediaFile(util.syspath(item.path), 138 config['id3v23'].get(bool)) 139 mf.images = images 140 mf.save() 141 except mediafile.UnreadableFileError as exc: 142 self._log.error('could not write tags: {0}', exc) 143 144 def import_task_files(self, session, task): 145 """Automatically scrub imported files.""" 146 for item in task.imported_items(): 147 self._log.debug('auto-scrubbing {0}', 148 util.displayable_path(item.path)) 149 self._scrub_item(item) 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/beetsplug/scrub.py b/beetsplug/scrub.py --- a/beetsplug/scrub.py +++ b/beetsplug/scrub.py @@ -109,7 +109,7 @@ self._log.error('could not scrub {0}: {1}', util.displayable_path(path), exc) - def _scrub_item(self, item, restore=True): + def _scrub_item(self, item, restore): """Remove tags from an Item's associated file and, if `restore` is enabled, write the database's tags back to the file. """ @@ -146,4 +146,4 @@ for item in task.imported_items(): self._log.debug('auto-scrubbing {0}', util.displayable_path(item.path)) - self._scrub_item(item) + self._scrub_item(item, ui.should_write())
{"golden_diff": "diff --git a/beetsplug/scrub.py b/beetsplug/scrub.py\n--- a/beetsplug/scrub.py\n+++ b/beetsplug/scrub.py\n@@ -109,7 +109,7 @@\n self._log.error('could not scrub {0}: {1}',\n util.displayable_path(path), exc)\n \n- def _scrub_item(self, item, restore=True):\n+ def _scrub_item(self, item, restore):\n \"\"\"Remove tags from an Item's associated file and, if `restore`\n is enabled, write the database's tags back to the file.\n \"\"\"\n@@ -146,4 +146,4 @@\n for item in task.imported_items():\n self._log.debug('auto-scrubbing {0}',\n util.displayable_path(item.path))\n- self._scrub_item(item)\n+ self._scrub_item(item, ui.should_write())\n", "issue": "scrub: The `auto` option should be a no-op when `import.write` is disabled\n### Problem\r\n\r\nI was importing new tracks with `import.write` disabled and seeing all tracks populated with new tags. With help from the [forum](https://discourse.beets.io/t/tags-are-being-written-to-imported-files-even-though-import-write-no/2068/4), I was able to determine that with `scrub.auto` enabled and `import.write` disabled, the scrub plugin removes tags and rewrites them from the database.\r\nThe [scrub documentation](https://beets.readthedocs.io/en/stable/plugins/scrub.html#automatic-scrubbing) indicates that scrubbing on import is only active if `import.write` is also enabled.\r\n> When importing new files (with import.write turned on) or modifying files\u2019 tags with the beet modify command, beets will first strip all types of tags entirely and then write the database-tracked metadata to the file.\r\n\r\nI'd think a resolution could be:\r\n- Update the documentation to indicate scrub doesn't read the `import.write` setting.\r\n- Or scrub _should_ read that setting.\r\n- Or, even better, have a separate scrub configuration that disables restoring tags after the scrub.\r\n\r\n### Setup\r\n\r\n* OS: Docker https://hub.docker.com/r/linuxserver/beets\r\n* Python version: 3.9.7\r\n* beets version: 1.6.0\r\n* Turning off plugins made problem go away (yes/no): yes\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\n```yaml\r\nlyrics:\r\n bing_lang_from: []\r\n auto: no\r\n fallback: ''\r\n sources: genius\r\n bing_client_secret: REDACTED\r\n bing_lang_to:\r\n google_API_key: REDACTED\r\n google_engine_ID: REDACTED\r\n genius_api_key: REDACTED\r\n force: no\r\n local: no\r\n\r\nplugins:\r\n- fetchart\r\n- embedart\r\n- convert\r\n- scrub\r\n- replaygain\r\n- lyrics\r\n- lastgenre\r\n- the\r\n- inline\r\n- chroma\r\n- web\r\n- permissions\r\ndirectory: /music\r\nlibrary: /config/musiclibrary.blb\r\nart_filename: _cover\r\nthreaded: yes\r\noriginal_date: yes\r\nper_disc_numbering: yes\r\nchroma:\r\n auto: yes\r\nembedart:\r\n auto: no\r\n maxwidth: 0\r\n compare_threshold: 0\r\n ifempty: no\r\n remove_art_file: no\r\n quality: 0\r\nconvert:\r\n auto: no\r\n dest: /converted\r\n format: aac\r\n formats:\r\n aac:\r\n command: ffmpeg -i $source -c:a aac -vbr 4 $dest\r\n extension: m4a\r\n mp3:\r\n command: /config/gapless-mp3.sh $source $dest\r\n extension: mp3\r\n alac:\r\n command: ffmpeg -i $source -y -vn -acodec alac $dest\r\n extension: m4a\r\n flac: ffmpeg -i $source -y -vn -acodec flac $dest\r\n opus: ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest\r\n ogg: ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest\r\n wma: ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest\r\n pretend: no\r\n link: no\r\n hardlink: no\r\n threads: 8\r\n id3v23: inherit\r\n max_bitrate: 500\r\n tmpdir:\r\n quiet: no\r\n embed: yes\r\n\r\n paths: {}\r\n no_convert: ''\r\n never_convert_lossy_files: no\r\n copy_album_art: no\r\n album_art_maxwidth: 0\r\n delete_originals: no\r\nitem_fields:\r\n disc_and_track: u'%02i.%02i' % (disc, track) if disctotal > 1 else u'%02i' % (track)\r\nthe:\r\n a: no\r\n the: yes\r\n format: '{0}, {1}'\r\n strip: no\r\n patterns: []\r\n\r\npaths:\r\n default: '%the{$albumartist}/$original_year - $album%aunique{}/$disc_and_track $title'\r\n singleton: '%the{$albumartist}/_singles/$original_year - $title'\r\n comp: _Compilations/$original_year - $album%aunique{}/$disc_and_track $title\r\n albumtype_soundtrack: _Soundtracks/$original_year - $album%aunique{}/$disc_and_track $title\r\npermissions:\r\n file: 664\r\n dir: 775\r\n\r\nimport:\r\n write: no\r\n copy: no\r\n move: yes\r\n resume: ask\r\n incremental: no\r\n quiet_fallback: skip\r\n timid: no\r\n log: /config/beet.log\r\nlastgenre:\r\n auto: yes\r\n source: album\r\n whitelist: yes\r\n min_weight: 10\r\n count: 1\r\n fallback:\r\n canonical: no\r\n force: yes\r\n separator: ', '\r\n prefer_specific: no\r\n title_case: yes\r\nfetchart:\r\n auto: yes\r\n store_source: yes\r\n minwidth: 600\r\n sources:\r\n - filesystem\r\n - coverart\r\n - itunes\r\n - albumart\r\n - amazon\r\n maxwidth: 0\r\n quality: 0\r\n max_filesize: 0\r\n enforce_ratio: no\r\n cautious: no\r\n cover_names:\r\n - cover\r\n - front\r\n - art\r\n - album\r\n - folder\r\n google_key: REDACTED\r\n google_engine: 001442825323518660753:hrh5ch1gjzm\r\n fanarttv_key: REDACTED\r\n lastfm_key: REDACTED\r\n high_resolution: no\r\n deinterlace: no\r\n cover_format:\r\nreplaygain:\r\n auto: yes\r\n backend: ffmpeg\r\n overwrite: no\r\n threads: 8\r\n parallel_on_import: no\r\n per_disc: no\r\n peak: 'true'\r\n targetlevel: 89\r\n r128: [Opus]\r\n r128_targetlevel: 84\r\nscrub:\r\n auto: yes\r\n\r\nreplace:\r\n ^\\.: _\r\n '[\\x00-\\x1f]': _\r\n '[<>:\"\\?\\*\\|]': _\r\n '[\\xE8-\\xEB]': e\r\n '[\\xEC-\\xEF]': i\r\n '[\\xE2-\\xE6]': a\r\n '[\\xF2-\\xF6]': o\r\n '[\\xF8]': o\r\n \\.$: _\r\n \\s+$: ''\r\nweb:\r\n host: 0.0.0.0\r\n port: 8337\r\n cors: ''\r\n cors_supports_credentials: no\r\n reverse_proxy: no\r\n include_paths: no\r\n readonly: yes\r\npathfields: {}\r\nalbum_fields: {}\r\n\r\n```\r\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Cleans extraneous metadata from files' tags via a command or\nautomatically whenever tags are written.\n\"\"\"\n\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets import util\nfrom beets import config\nimport mediafile\nimport mutagen\n\n_MUTAGEN_FORMATS = {\n 'asf': 'ASF',\n 'apev2': 'APEv2File',\n 'flac': 'FLAC',\n 'id3': 'ID3FileType',\n 'mp3': 'MP3',\n 'mp4': 'MP4',\n 'oggflac': 'OggFLAC',\n 'oggspeex': 'OggSpeex',\n 'oggtheora': 'OggTheora',\n 'oggvorbis': 'OggVorbis',\n 'oggopus': 'OggOpus',\n 'trueaudio': 'TrueAudio',\n 'wavpack': 'WavPack',\n 'monkeysaudio': 'MonkeysAudio',\n 'optimfrog': 'OptimFROG',\n}\n\n\nclass ScrubPlugin(BeetsPlugin):\n \"\"\"Removes extraneous metadata from files' tags.\"\"\"\n def __init__(self):\n super().__init__()\n self.config.add({\n 'auto': True,\n })\n\n if self.config['auto']:\n self.register_listener(\"import_task_files\", self.import_task_files)\n\n def commands(self):\n def scrub_func(lib, opts, args):\n # Walk through matching files and remove tags.\n for item in lib.items(ui.decargs(args)):\n self._log.info('scrubbing: {0}',\n util.displayable_path(item.path))\n self._scrub_item(item, opts.write)\n\n scrub_cmd = ui.Subcommand('scrub', help='clean audio tags')\n scrub_cmd.parser.add_option(\n '-W', '--nowrite', dest='write',\n action='store_false', default=True,\n help='leave tags empty')\n scrub_cmd.func = scrub_func\n\n return [scrub_cmd]\n\n @staticmethod\n def _mutagen_classes():\n \"\"\"Get a list of file type classes from the Mutagen module.\n \"\"\"\n classes = []\n for modname, clsname in _MUTAGEN_FORMATS.items():\n mod = __import__(f'mutagen.{modname}',\n fromlist=[clsname])\n classes.append(getattr(mod, clsname))\n return classes\n\n def _scrub(self, path):\n \"\"\"Remove all tags from a file.\n \"\"\"\n for cls in self._mutagen_classes():\n # Try opening the file with this type, but just skip in the\n # event of any error.\n try:\n f = cls(util.syspath(path))\n except Exception:\n continue\n if f.tags is None:\n continue\n\n # Remove the tag for this type.\n try:\n f.delete()\n except NotImplementedError:\n # Some Mutagen metadata subclasses (namely, ASFTag) do not\n # support .delete(), presumably because it is impossible to\n # remove them. In this case, we just remove all the tags.\n for tag in f.keys():\n del f[tag]\n f.save()\n except (OSError, mutagen.MutagenError) as exc:\n self._log.error('could not scrub {0}: {1}',\n util.displayable_path(path), exc)\n\n def _scrub_item(self, item, restore=True):\n \"\"\"Remove tags from an Item's associated file and, if `restore`\n is enabled, write the database's tags back to the file.\n \"\"\"\n # Get album art if we need to restore it.\n if restore:\n try:\n mf = mediafile.MediaFile(util.syspath(item.path),\n config['id3v23'].get(bool))\n except mediafile.UnreadableFileError as exc:\n self._log.error('could not open file to scrub: {0}',\n exc)\n return\n images = mf.images\n\n # Remove all tags.\n self._scrub(item.path)\n\n # Restore tags, if enabled.\n if restore:\n self._log.debug('writing new tags after scrub')\n item.try_write()\n if images:\n self._log.debug('restoring art')\n try:\n mf = mediafile.MediaFile(util.syspath(item.path),\n config['id3v23'].get(bool))\n mf.images = images\n mf.save()\n except mediafile.UnreadableFileError as exc:\n self._log.error('could not write tags: {0}', exc)\n\n def import_task_files(self, session, task):\n \"\"\"Automatically scrub imported files.\"\"\"\n for item in task.imported_items():\n self._log.debug('auto-scrubbing {0}',\n util.displayable_path(item.path))\n self._scrub_item(item)\n", "path": "beetsplug/scrub.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Cleans extraneous metadata from files' tags via a command or\nautomatically whenever tags are written.\n\"\"\"\n\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets import util\nfrom beets import config\nimport mediafile\nimport mutagen\n\n_MUTAGEN_FORMATS = {\n 'asf': 'ASF',\n 'apev2': 'APEv2File',\n 'flac': 'FLAC',\n 'id3': 'ID3FileType',\n 'mp3': 'MP3',\n 'mp4': 'MP4',\n 'oggflac': 'OggFLAC',\n 'oggspeex': 'OggSpeex',\n 'oggtheora': 'OggTheora',\n 'oggvorbis': 'OggVorbis',\n 'oggopus': 'OggOpus',\n 'trueaudio': 'TrueAudio',\n 'wavpack': 'WavPack',\n 'monkeysaudio': 'MonkeysAudio',\n 'optimfrog': 'OptimFROG',\n}\n\n\nclass ScrubPlugin(BeetsPlugin):\n \"\"\"Removes extraneous metadata from files' tags.\"\"\"\n def __init__(self):\n super().__init__()\n self.config.add({\n 'auto': True,\n })\n\n if self.config['auto']:\n self.register_listener(\"import_task_files\", self.import_task_files)\n\n def commands(self):\n def scrub_func(lib, opts, args):\n # Walk through matching files and remove tags.\n for item in lib.items(ui.decargs(args)):\n self._log.info('scrubbing: {0}',\n util.displayable_path(item.path))\n self._scrub_item(item, opts.write)\n\n scrub_cmd = ui.Subcommand('scrub', help='clean audio tags')\n scrub_cmd.parser.add_option(\n '-W', '--nowrite', dest='write',\n action='store_false', default=True,\n help='leave tags empty')\n scrub_cmd.func = scrub_func\n\n return [scrub_cmd]\n\n @staticmethod\n def _mutagen_classes():\n \"\"\"Get a list of file type classes from the Mutagen module.\n \"\"\"\n classes = []\n for modname, clsname in _MUTAGEN_FORMATS.items():\n mod = __import__(f'mutagen.{modname}',\n fromlist=[clsname])\n classes.append(getattr(mod, clsname))\n return classes\n\n def _scrub(self, path):\n \"\"\"Remove all tags from a file.\n \"\"\"\n for cls in self._mutagen_classes():\n # Try opening the file with this type, but just skip in the\n # event of any error.\n try:\n f = cls(util.syspath(path))\n except Exception:\n continue\n if f.tags is None:\n continue\n\n # Remove the tag for this type.\n try:\n f.delete()\n except NotImplementedError:\n # Some Mutagen metadata subclasses (namely, ASFTag) do not\n # support .delete(), presumably because it is impossible to\n # remove them. In this case, we just remove all the tags.\n for tag in f.keys():\n del f[tag]\n f.save()\n except (OSError, mutagen.MutagenError) as exc:\n self._log.error('could not scrub {0}: {1}',\n util.displayable_path(path), exc)\n\n def _scrub_item(self, item, restore):\n \"\"\"Remove tags from an Item's associated file and, if `restore`\n is enabled, write the database's tags back to the file.\n \"\"\"\n # Get album art if we need to restore it.\n if restore:\n try:\n mf = mediafile.MediaFile(util.syspath(item.path),\n config['id3v23'].get(bool))\n except mediafile.UnreadableFileError as exc:\n self._log.error('could not open file to scrub: {0}',\n exc)\n return\n images = mf.images\n\n # Remove all tags.\n self._scrub(item.path)\n\n # Restore tags, if enabled.\n if restore:\n self._log.debug('writing new tags after scrub')\n item.try_write()\n if images:\n self._log.debug('restoring art')\n try:\n mf = mediafile.MediaFile(util.syspath(item.path),\n config['id3v23'].get(bool))\n mf.images = images\n mf.save()\n except mediafile.UnreadableFileError as exc:\n self._log.error('could not write tags: {0}', exc)\n\n def import_task_files(self, session, task):\n \"\"\"Automatically scrub imported files.\"\"\"\n for item in task.imported_items():\n self._log.debug('auto-scrubbing {0}',\n util.displayable_path(item.path))\n self._scrub_item(item, ui.should_write())\n", "path": "beetsplug/scrub.py"}]}
3,366
205
gh_patches_debug_39300
rasdani/github-patches
git_diff
lightly-ai__lightly-303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add documentation for creating and uploading embeddings through the pip package (non cli) The CLI way of obtaining and uploading embeddings is described. It's pretty minimal but works. However, there is no information on how to do this yourself without using the CLI. I would prefer steps starting from having a pre-trained model. ```python class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def embed(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) return x def forward(self, x): x = self.embed(x) output = F.log_softmax(x, dim=1) return output model = Net() # let's assume here model is pre-trained # I can get the embeddings and predictions using embedding = model.embed(x) prediction = model(x) # TODO: how to create embeddings and upload them to lightly ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightly/api/api_workflow_client.py` Content: ``` 1 import time 2 import random 3 import time 4 import warnings 5 from io import IOBase 6 from typing import * 7 8 import requests 9 from requests import Response 10 11 from lightly.__init__ import __version__ 12 from lightly.api.api_workflow_datasets import _DatasetsMixin 13 from lightly.api.api_workflow_download_dataset import _DownloadDatasetMixin 14 from lightly.api.api_workflow_sampling import _SamplingMixin 15 from lightly.api.api_workflow_upload_dataset import _UploadDatasetMixin 16 from lightly.api.api_workflow_upload_embeddings import _UploadEmbeddingsMixin 17 from lightly.api.utils import getenv 18 from lightly.api.version_checking import get_minimum_compatible_version, version_compare 19 from lightly.openapi_generated.swagger_client import TagData, ScoresApi, QuotaApi 20 from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi 21 from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi 22 from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi 23 from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi 24 from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi 25 from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi 26 from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi 27 from lightly.openapi_generated.swagger_client.api_client import ApiClient 28 from lightly.openapi_generated.swagger_client.configuration import Configuration 29 from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData 30 31 32 class ApiWorkflowClient(_UploadEmbeddingsMixin, _SamplingMixin, _UploadDatasetMixin, _DownloadDatasetMixin, 33 _DatasetsMixin): 34 """Provides a uniform interface to communicate with the api 35 36 The APIWorkflowClient is used to communicaate with the Lightly API. The client 37 can run also more complex workflows which include multiple API calls at once. 38 39 The client can be used in combination with the active learning agent. 40 41 Args: 42 token: 43 the token of the user, provided in webapp 44 dataset_id: 45 the id of the dataset, provided in webapp. \ 46 If it is not set, but used by a workflow, \ 47 the last modfied dataset is taken by default. 48 embedding_id: 49 the id of the embedding to use. If it is not set, \ 50 but used by a workflow, the newest embedding is taken by default 51 """ 52 53 def __init__(self, token: str, dataset_id: str = None, embedding_id: str = None): 54 55 self.check_version_compatibility() 56 57 configuration = Configuration() 58 configuration.host = getenv('LIGHTLY_SERVER_LOCATION', 'https://api.lightly.ai') 59 configuration.api_key = {'token': token} 60 api_client = ApiClient(configuration=configuration) 61 self.api_client = api_client 62 63 self.token = token 64 if dataset_id is not None: 65 self._dataset_id = dataset_id 66 if embedding_id is not None: 67 self.embedding_id = embedding_id 68 69 self.datasets_api = DatasetsApi(api_client=self.api_client) 70 self.samplings_api = SamplingsApi(api_client=self.api_client) 71 self.jobs_api = JobsApi(api_client=self.api_client) 72 self.tags_api = TagsApi(api_client=self.api_client) 73 self.embeddings_api = EmbeddingsApi(api_client=api_client) 74 self.mappings_api = MappingsApi(api_client=api_client) 75 self.scores_api = ScoresApi(api_client=api_client) 76 self.samples_api = SamplesApi(api_client=api_client) 77 self.quota_api = QuotaApi(api_client=api_client) 78 79 def check_version_compatibility(self): 80 minimum_version = get_minimum_compatible_version() 81 if version_compare(__version__, minimum_version) < 0: 82 raise ValueError(f"Incompatible Version of lightly pip package. " 83 f"Please upgrade to at least version {minimum_version} " 84 f"to be able to access the api and webapp") 85 86 @property 87 def dataset_id(self) -> str: 88 ''' Returns the dataset_id 89 90 If the dataset_id is set, it is returned. 91 If it is unset, then the dataset_id of the last modified dataset is taken. 92 93 ''' 94 try: 95 return self._dataset_id 96 except AttributeError: 97 all_datasets: List[DatasetData] = self.datasets_api.get_datasets() 98 datasets_sorted = sorted(all_datasets, key=lambda dataset: dataset.last_modified_at) 99 last_modified_dataset = datasets_sorted[-1] 100 self._dataset_id = last_modified_dataset.id 101 warnings.warn(UserWarning(f"Dataset has not been specified, " 102 f"taking the last modified dataset {last_modified_dataset.name} as default dataset.")) 103 return self._dataset_id 104 105 def _get_all_tags(self) -> List[TagData]: 106 return self.tags_api.get_tags_by_dataset_id(self.dataset_id) 107 108 def _order_list_by_filenames(self, filenames_for_list: List[str], list_to_order: List[object]) -> List[object]: 109 """Orders a list such that it is in the order of the filenames specified on the server. 110 111 Args: 112 filenames_for_list: 113 The filenames of samples in a specific order 114 list_to_order: 115 Some values belonging to the samples 116 117 Returns: 118 The list reordered. The same reorder applied on the filenames_for_list 119 would put them in the order of the filenames in self.filenames_on_server 120 121 """ 122 assert len(filenames_for_list) == len(list_to_order) 123 dict_by_filenames = dict(zip(filenames_for_list, list_to_order)) 124 list_ordered = [dict_by_filenames[filename] for filename in self.filenames_on_server 125 if filename in filenames_for_list] 126 return list_ordered 127 128 @property 129 def filenames_on_server(self): 130 if not hasattr(self, "_filenames_on_server"): 131 self._filenames_on_server = self.mappings_api. \ 132 get_sample_mappings_by_dataset_id(dataset_id=self.dataset_id, field="fileName") 133 return self._filenames_on_server 134 135 def upload_file_with_signed_url(self, file: IOBase, signed_write_url: str, 136 max_backoff: int = 32, max_retries: int = 5) -> Response: 137 """Uploads a file to a url via a put request. 138 139 Args: 140 file: 141 The file to upload. 142 signed_write_url: 143 The url to upload the file to. As no authorization is used, 144 the url must be a signed write url. 145 max_backoff: 146 Maximal backoff before retrying. 147 max_retries: 148 Maximum number of retries before timing out. 149 150 Returns: 151 The response of the put request, usually a 200 for the success case. 152 153 """ 154 155 response = requests.put(signed_write_url, data=file) 156 157 if response.status_code != 200: 158 msg = f'Failed PUT request to {signed_write_url} with status_code' 159 msg += f'{response.status__code}!' 160 raise RuntimeError(msg) 161 162 return response 163 ``` Path: `lightly/api/api_workflow_upload_embeddings.py` Content: ``` 1 import csv 2 from typing import List 3 4 from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData 5 from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData 6 7 8 9 def _is_valid_filename(filename: str): 10 """Returns False if the filename is misformatted. 11 12 """ 13 invalid_characters = [','] 14 for character in invalid_characters: 15 if character in filename: 16 return False 17 return True 18 19 20 class _UploadEmbeddingsMixin: 21 22 def set_embedding_id_by_name(self, embedding_name: str = None): 23 embeddings: List[DatasetEmbeddingData] = \ 24 self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id) 25 26 if embedding_name is None: 27 self.embedding_id = embeddings[-1].id 28 return 29 30 try: 31 self.embedding_id = next(embedding.id for embedding in embeddings if embedding.name == embedding_name) 32 except StopIteration: 33 raise ValueError(f"No embedding with name {embedding_name} found on the server.") 34 35 def upload_embeddings(self, path_to_embeddings_csv: str, name: str): 36 """Uploads embeddings to the server. 37 38 First checks that the specified embedding name is not on ther server. If it is, the upload is aborted. 39 Then creates a new csv with the embeddings in the order specified on the server. Next it uploads it to the server. 40 The received embedding_id is saved as a property of self. 41 Args: 42 path_to_embeddings_csv: the filepath to the .csv containing the embeddings, e.g. "path/to/embeddings.csv" 43 name: The name of the embedding. If an embedding with such a name already exists on the server, 44 the upload is aborted. 45 46 Returns: 47 None 48 49 """ 50 # get the names of the current embeddings on the server: 51 embeddings_on_server: List[DatasetEmbeddingData] = \ 52 self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id) 53 names_embeddings_on_server = [embedding.name for embedding in embeddings_on_server] 54 55 if name in names_embeddings_on_server: 56 print(f"Aborting upload, embedding with name='{name}' already exists.") 57 self.embedding_id = next(embedding for embedding in embeddings_on_server if embedding.name == name).id 58 return 59 60 # create a new csv with the filenames in the desired order 61 path_to_ordered_embeddings_csv = self._order_csv_by_filenames( 62 path_to_embeddings_csv=path_to_embeddings_csv) 63 64 # get the URL to upload the csv to 65 response: WriteCSVUrlData = \ 66 self.embeddings_api.get_embeddings_csv_write_url_by_id(self.dataset_id, name=name) 67 self.embedding_id = response.embedding_id 68 signed_write_url = response.signed_write_url 69 70 # upload the csv to the URL 71 with open(path_to_ordered_embeddings_csv, 'rb') as file_ordered_embeddings_csv: 72 self.upload_file_with_signed_url(file=file_ordered_embeddings_csv, signed_write_url=signed_write_url) 73 74 def _order_csv_by_filenames(self, path_to_embeddings_csv: str) -> str: 75 """Orders the rows in a csv according to the order specified on the server and saves it as a new file. 76 77 Args: 78 path_to_embeddings_csv: 79 the path to the csv to order 80 81 Returns: 82 the filepath to the new csv 83 84 """ 85 with open(path_to_embeddings_csv, 'r') as f: 86 data = csv.reader(f) 87 88 rows = list(data) 89 header_row = rows[0] 90 rows_without_header = rows[1:] 91 index_filenames = header_row.index('filenames') 92 filenames = [row[index_filenames] for row in rows_without_header] 93 94 if len(filenames) != len(self.filenames_on_server): 95 raise ValueError(f'There are {len(filenames)} rows in the embedding file, but ' 96 f'{len(self.filenames_on_server)} filenames/samples on the server.') 97 if set(filenames) != set(self.filenames_on_server): 98 raise ValueError(f'The filenames in the embedding file and the filenames on the server do not align') 99 invalid_filenames = [f for f in filenames if not _is_valid_filename(f)] 100 if len(invalid_filenames) > 0: 101 raise ValueError(f'Invalid filename(s) in embedding file: {invalid_filenames}') 102 103 rows_without_header_ordered = self._order_list_by_filenames(filenames, rows_without_header) 104 105 rows_to_write = [header_row] 106 rows_to_write += rows_without_header_ordered 107 108 path_to_ordered_embeddings_csv = path_to_embeddings_csv.replace('.csv', '_sorted.csv') 109 with open(path_to_ordered_embeddings_csv, 'w') as f: 110 writer = csv.writer(f) 111 writer.writerows(rows_to_write) 112 113 return path_to_ordered_embeddings_csv 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightly/api/api_workflow_client.py b/lightly/api/api_workflow_client.py --- a/lightly/api/api_workflow_client.py +++ b/lightly/api/api_workflow_client.py @@ -85,12 +85,11 @@ @property def dataset_id(self) -> str: - ''' Returns the dataset_id + '''The current dataset_id. If the dataset_id is set, it is returned. - If it is unset, then the dataset_id of the last modified dataset is taken. - - ''' + If it is not set, then the dataset_id of the last modified dataset is selected. + ''' try: return self._dataset_id except AttributeError: @@ -127,6 +126,9 @@ @property def filenames_on_server(self): + '''The list of the filenames in the dataset. + + ''' if not hasattr(self, "_filenames_on_server"): self._filenames_on_server = self.mappings_api. \ get_sample_mappings_by_dataset_id(dataset_id=self.dataset_id, field="fileName") diff --git a/lightly/api/api_workflow_upload_embeddings.py b/lightly/api/api_workflow_upload_embeddings.py --- a/lightly/api/api_workflow_upload_embeddings.py +++ b/lightly/api/api_workflow_upload_embeddings.py @@ -20,6 +20,15 @@ class _UploadEmbeddingsMixin: def set_embedding_id_by_name(self, embedding_name: str = None): + """Sets the embedding id of the client by embedding name. + + Args: + embedding_name: + Name under which the embedding was uploaded. + + Raises: + ValueError if the embedding does not exist. + """ embeddings: List[DatasetEmbeddingData] = \ self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id) @@ -38,14 +47,14 @@ First checks that the specified embedding name is not on ther server. If it is, the upload is aborted. Then creates a new csv with the embeddings in the order specified on the server. Next it uploads it to the server. The received embedding_id is saved as a property of self. + Args: - path_to_embeddings_csv: the filepath to the .csv containing the embeddings, e.g. "path/to/embeddings.csv" - name: The name of the embedding. If an embedding with such a name already exists on the server, + path_to_embeddings_csv: + The path to the .csv containing the embeddings, e.g. "path/to/embeddings.csv" + name: + The name of the embedding. If an embedding with such a name already exists on the server, the upload is aborted. - Returns: - None - """ # get the names of the current embeddings on the server: embeddings_on_server: List[DatasetEmbeddingData] = \
{"golden_diff": "diff --git a/lightly/api/api_workflow_client.py b/lightly/api/api_workflow_client.py\n--- a/lightly/api/api_workflow_client.py\n+++ b/lightly/api/api_workflow_client.py\n@@ -85,12 +85,11 @@\n \n @property\n def dataset_id(self) -> str:\n- ''' Returns the dataset_id\n+ '''The current dataset_id.\n \n If the dataset_id is set, it is returned.\n- If it is unset, then the dataset_id of the last modified dataset is taken.\n-\n- '''\n+ If it is not set, then the dataset_id of the last modified dataset is selected.\n+ ''' \n try:\n return self._dataset_id\n except AttributeError:\n@@ -127,6 +126,9 @@\n \n @property\n def filenames_on_server(self):\n+ '''The list of the filenames in the dataset.\n+\n+ '''\n if not hasattr(self, \"_filenames_on_server\"):\n self._filenames_on_server = self.mappings_api. \\\n get_sample_mappings_by_dataset_id(dataset_id=self.dataset_id, field=\"fileName\")\ndiff --git a/lightly/api/api_workflow_upload_embeddings.py b/lightly/api/api_workflow_upload_embeddings.py\n--- a/lightly/api/api_workflow_upload_embeddings.py\n+++ b/lightly/api/api_workflow_upload_embeddings.py\n@@ -20,6 +20,15 @@\n class _UploadEmbeddingsMixin:\n \n def set_embedding_id_by_name(self, embedding_name: str = None):\n+ \"\"\"Sets the embedding id of the client by embedding name.\n+\n+ Args:\n+ embedding_name:\n+ Name under which the embedding was uploaded.\n+ \n+ Raises:\n+ ValueError if the embedding does not exist.\n+ \"\"\"\n embeddings: List[DatasetEmbeddingData] = \\\n self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id)\n \n@@ -38,14 +47,14 @@\n First checks that the specified embedding name is not on ther server. If it is, the upload is aborted.\n Then creates a new csv with the embeddings in the order specified on the server. Next it uploads it to the server.\n The received embedding_id is saved as a property of self.\n+\n Args:\n- path_to_embeddings_csv: the filepath to the .csv containing the embeddings, e.g. \"path/to/embeddings.csv\"\n- name: The name of the embedding. If an embedding with such a name already exists on the server,\n+ path_to_embeddings_csv:\n+ The path to the .csv containing the embeddings, e.g. \"path/to/embeddings.csv\"\n+ name:\n+ The name of the embedding. If an embedding with such a name already exists on the server,\n the upload is aborted.\n \n- Returns:\n- None\n-\n \"\"\"\n # get the names of the current embeddings on the server:\n embeddings_on_server: List[DatasetEmbeddingData] = \\\n", "issue": "Add documentation for creating and uploading embeddings through the pip package (non cli)\nThe CLI way of obtaining and uploading embeddings is described. It's pretty minimal but works. However, there is no information on how to do this yourself without using the CLI. \r\n\r\nI would prefer steps starting from having a pre-trained model.\r\n```python\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\r\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\r\n self.dropout1 = nn.Dropout(0.25)\r\n self.dropout2 = nn.Dropout(0.5)\r\n self.fc1 = nn.Linear(9216, 128)\r\n self.fc2 = nn.Linear(128, 10)\r\n\r\n def embed(self, x):\r\n x = self.conv1(x)\r\n x = F.relu(x)\r\n x = self.conv2(x)\r\n x = F.relu(x)\r\n x = F.max_pool2d(x, 2)\r\n x = self.dropout1(x)\r\n x = torch.flatten(x, 1)\r\n x = self.fc1(x)\r\n x = F.relu(x)\r\n x = self.dropout2(x)\r\n x = self.fc2(x)\r\n return x\r\n\r\n def forward(self, x):\r\n x = self.embed(x)\r\n output = F.log_softmax(x, dim=1)\r\n return output\r\n\r\nmodel = Net()\r\n\r\n# let's assume here model is pre-trained\r\n# I can get the embeddings and predictions using\r\nembedding = model.embed(x)\r\nprediction = model(x)\r\n\r\n# TODO: how to create embeddings and upload them to lightly\r\n```\n", "before_files": [{"content": "import time\nimport random\nimport time\nimport warnings\nfrom io import IOBase\nfrom typing import *\n\nimport requests\nfrom requests import Response\n\nfrom lightly.__init__ import __version__\nfrom lightly.api.api_workflow_datasets import _DatasetsMixin\nfrom lightly.api.api_workflow_download_dataset import _DownloadDatasetMixin\nfrom lightly.api.api_workflow_sampling import _SamplingMixin\nfrom lightly.api.api_workflow_upload_dataset import _UploadDatasetMixin\nfrom lightly.api.api_workflow_upload_embeddings import _UploadEmbeddingsMixin\nfrom lightly.api.utils import getenv\nfrom lightly.api.version_checking import get_minimum_compatible_version, version_compare\nfrom lightly.openapi_generated.swagger_client import TagData, ScoresApi, QuotaApi\nfrom lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi\nfrom lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi\nfrom lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi\nfrom lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi\nfrom lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi\nfrom lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi\nfrom lightly.openapi_generated.swagger_client.api.tags_api import TagsApi\nfrom lightly.openapi_generated.swagger_client.api_client import ApiClient\nfrom lightly.openapi_generated.swagger_client.configuration import Configuration\nfrom lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData\n\n\nclass ApiWorkflowClient(_UploadEmbeddingsMixin, _SamplingMixin, _UploadDatasetMixin, _DownloadDatasetMixin,\n _DatasetsMixin):\n \"\"\"Provides a uniform interface to communicate with the api \n \n The APIWorkflowClient is used to communicaate with the Lightly API. The client\n can run also more complex workflows which include multiple API calls at once.\n \n The client can be used in combination with the active learning agent. \n\n Args:\n token:\n the token of the user, provided in webapp\n dataset_id:\n the id of the dataset, provided in webapp. \\\n If it is not set, but used by a workflow, \\\n the last modfied dataset is taken by default.\n embedding_id:\n the id of the embedding to use. If it is not set, \\\n but used by a workflow, the newest embedding is taken by default\n \"\"\"\n\n def __init__(self, token: str, dataset_id: str = None, embedding_id: str = None):\n\n self.check_version_compatibility()\n\n configuration = Configuration()\n configuration.host = getenv('LIGHTLY_SERVER_LOCATION', 'https://api.lightly.ai')\n configuration.api_key = {'token': token}\n api_client = ApiClient(configuration=configuration)\n self.api_client = api_client\n\n self.token = token\n if dataset_id is not None:\n self._dataset_id = dataset_id\n if embedding_id is not None:\n self.embedding_id = embedding_id\n\n self.datasets_api = DatasetsApi(api_client=self.api_client)\n self.samplings_api = SamplingsApi(api_client=self.api_client)\n self.jobs_api = JobsApi(api_client=self.api_client)\n self.tags_api = TagsApi(api_client=self.api_client)\n self.embeddings_api = EmbeddingsApi(api_client=api_client)\n self.mappings_api = MappingsApi(api_client=api_client)\n self.scores_api = ScoresApi(api_client=api_client)\n self.samples_api = SamplesApi(api_client=api_client)\n self.quota_api = QuotaApi(api_client=api_client)\n\n def check_version_compatibility(self):\n minimum_version = get_minimum_compatible_version()\n if version_compare(__version__, minimum_version) < 0:\n raise ValueError(f\"Incompatible Version of lightly pip package. \"\n f\"Please upgrade to at least version {minimum_version} \"\n f\"to be able to access the api and webapp\")\n\n @property\n def dataset_id(self) -> str:\n ''' Returns the dataset_id\n\n If the dataset_id is set, it is returned.\n If it is unset, then the dataset_id of the last modified dataset is taken.\n\n '''\n try:\n return self._dataset_id\n except AttributeError:\n all_datasets: List[DatasetData] = self.datasets_api.get_datasets()\n datasets_sorted = sorted(all_datasets, key=lambda dataset: dataset.last_modified_at)\n last_modified_dataset = datasets_sorted[-1]\n self._dataset_id = last_modified_dataset.id\n warnings.warn(UserWarning(f\"Dataset has not been specified, \"\n f\"taking the last modified dataset {last_modified_dataset.name} as default dataset.\"))\n return self._dataset_id\n\n def _get_all_tags(self) -> List[TagData]:\n return self.tags_api.get_tags_by_dataset_id(self.dataset_id)\n\n def _order_list_by_filenames(self, filenames_for_list: List[str], list_to_order: List[object]) -> List[object]:\n \"\"\"Orders a list such that it is in the order of the filenames specified on the server.\n\n Args:\n filenames_for_list:\n The filenames of samples in a specific order\n list_to_order:\n Some values belonging to the samples\n\n Returns:\n The list reordered. The same reorder applied on the filenames_for_list\n would put them in the order of the filenames in self.filenames_on_server\n\n \"\"\"\n assert len(filenames_for_list) == len(list_to_order)\n dict_by_filenames = dict(zip(filenames_for_list, list_to_order))\n list_ordered = [dict_by_filenames[filename] for filename in self.filenames_on_server\n if filename in filenames_for_list]\n return list_ordered\n\n @property\n def filenames_on_server(self):\n if not hasattr(self, \"_filenames_on_server\"):\n self._filenames_on_server = self.mappings_api. \\\n get_sample_mappings_by_dataset_id(dataset_id=self.dataset_id, field=\"fileName\")\n return self._filenames_on_server\n\n def upload_file_with_signed_url(self, file: IOBase, signed_write_url: str,\n max_backoff: int = 32, max_retries: int = 5) -> Response:\n \"\"\"Uploads a file to a url via a put request.\n\n Args:\n file:\n The file to upload.\n signed_write_url:\n The url to upload the file to. As no authorization is used,\n the url must be a signed write url.\n max_backoff:\n Maximal backoff before retrying.\n max_retries:\n Maximum number of retries before timing out.\n\n Returns:\n The response of the put request, usually a 200 for the success case.\n\n \"\"\"\n\n response = requests.put(signed_write_url, data=file)\n\n if response.status_code != 200:\n msg = f'Failed PUT request to {signed_write_url} with status_code'\n msg += f'{response.status__code}!'\n raise RuntimeError(msg)\n\n return response\n", "path": "lightly/api/api_workflow_client.py"}, {"content": "import csv\nfrom typing import List\n\nfrom lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData\nfrom lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData\n\n\n\ndef _is_valid_filename(filename: str):\n \"\"\"Returns False if the filename is misformatted.\n\n \"\"\"\n invalid_characters = [',']\n for character in invalid_characters:\n if character in filename:\n return False\n return True\n\n\nclass _UploadEmbeddingsMixin:\n\n def set_embedding_id_by_name(self, embedding_name: str = None):\n embeddings: List[DatasetEmbeddingData] = \\\n self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id)\n\n if embedding_name is None:\n self.embedding_id = embeddings[-1].id\n return\n\n try:\n self.embedding_id = next(embedding.id for embedding in embeddings if embedding.name == embedding_name)\n except StopIteration:\n raise ValueError(f\"No embedding with name {embedding_name} found on the server.\")\n\n def upload_embeddings(self, path_to_embeddings_csv: str, name: str):\n \"\"\"Uploads embeddings to the server.\n\n First checks that the specified embedding name is not on ther server. If it is, the upload is aborted.\n Then creates a new csv with the embeddings in the order specified on the server. Next it uploads it to the server.\n The received embedding_id is saved as a property of self.\n Args:\n path_to_embeddings_csv: the filepath to the .csv containing the embeddings, e.g. \"path/to/embeddings.csv\"\n name: The name of the embedding. If an embedding with such a name already exists on the server,\n the upload is aborted.\n\n Returns:\n None\n\n \"\"\"\n # get the names of the current embeddings on the server:\n embeddings_on_server: List[DatasetEmbeddingData] = \\\n self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id)\n names_embeddings_on_server = [embedding.name for embedding in embeddings_on_server]\n\n if name in names_embeddings_on_server:\n print(f\"Aborting upload, embedding with name='{name}' already exists.\")\n self.embedding_id = next(embedding for embedding in embeddings_on_server if embedding.name == name).id\n return\n\n # create a new csv with the filenames in the desired order\n path_to_ordered_embeddings_csv = self._order_csv_by_filenames(\n path_to_embeddings_csv=path_to_embeddings_csv)\n\n # get the URL to upload the csv to\n response: WriteCSVUrlData = \\\n self.embeddings_api.get_embeddings_csv_write_url_by_id(self.dataset_id, name=name)\n self.embedding_id = response.embedding_id\n signed_write_url = response.signed_write_url\n\n # upload the csv to the URL\n with open(path_to_ordered_embeddings_csv, 'rb') as file_ordered_embeddings_csv:\n self.upload_file_with_signed_url(file=file_ordered_embeddings_csv, signed_write_url=signed_write_url)\n\n def _order_csv_by_filenames(self, path_to_embeddings_csv: str) -> str:\n \"\"\"Orders the rows in a csv according to the order specified on the server and saves it as a new file.\n\n Args:\n path_to_embeddings_csv:\n the path to the csv to order\n\n Returns:\n the filepath to the new csv\n\n \"\"\"\n with open(path_to_embeddings_csv, 'r') as f:\n data = csv.reader(f)\n\n rows = list(data)\n header_row = rows[0]\n rows_without_header = rows[1:]\n index_filenames = header_row.index('filenames')\n filenames = [row[index_filenames] for row in rows_without_header]\n\n if len(filenames) != len(self.filenames_on_server):\n raise ValueError(f'There are {len(filenames)} rows in the embedding file, but '\n f'{len(self.filenames_on_server)} filenames/samples on the server.')\n if set(filenames) != set(self.filenames_on_server):\n raise ValueError(f'The filenames in the embedding file and the filenames on the server do not align')\n invalid_filenames = [f for f in filenames if not _is_valid_filename(f)]\n if len(invalid_filenames) > 0:\n raise ValueError(f'Invalid filename(s) in embedding file: {invalid_filenames}')\n\n rows_without_header_ordered = self._order_list_by_filenames(filenames, rows_without_header)\n\n rows_to_write = [header_row]\n rows_to_write += rows_without_header_ordered\n\n path_to_ordered_embeddings_csv = path_to_embeddings_csv.replace('.csv', '_sorted.csv')\n with open(path_to_ordered_embeddings_csv, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(rows_to_write)\n\n return path_to_ordered_embeddings_csv\n", "path": "lightly/api/api_workflow_upload_embeddings.py"}], "after_files": [{"content": "import time\nimport random\nimport time\nimport warnings\nfrom io import IOBase\nfrom typing import *\n\nimport requests\nfrom requests import Response\n\nfrom lightly.__init__ import __version__\nfrom lightly.api.api_workflow_datasets import _DatasetsMixin\nfrom lightly.api.api_workflow_download_dataset import _DownloadDatasetMixin\nfrom lightly.api.api_workflow_sampling import _SamplingMixin\nfrom lightly.api.api_workflow_upload_dataset import _UploadDatasetMixin\nfrom lightly.api.api_workflow_upload_embeddings import _UploadEmbeddingsMixin\nfrom lightly.api.utils import getenv\nfrom lightly.api.version_checking import get_minimum_compatible_version, version_compare\nfrom lightly.openapi_generated.swagger_client import TagData, ScoresApi, QuotaApi\nfrom lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi\nfrom lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi\nfrom lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi\nfrom lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi\nfrom lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi\nfrom lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi\nfrom lightly.openapi_generated.swagger_client.api.tags_api import TagsApi\nfrom lightly.openapi_generated.swagger_client.api_client import ApiClient\nfrom lightly.openapi_generated.swagger_client.configuration import Configuration\nfrom lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData\n\n\nclass ApiWorkflowClient(_UploadEmbeddingsMixin, _SamplingMixin, _UploadDatasetMixin, _DownloadDatasetMixin,\n _DatasetsMixin):\n \"\"\"Provides a uniform interface to communicate with the api \n \n The APIWorkflowClient is used to communicaate with the Lightly API. The client\n can run also more complex workflows which include multiple API calls at once.\n \n The client can be used in combination with the active learning agent. \n\n Args:\n token:\n the token of the user, provided in webapp\n dataset_id:\n the id of the dataset, provided in webapp. \\\n If it is not set, but used by a workflow, \\\n the last modfied dataset is taken by default.\n embedding_id:\n the id of the embedding to use. If it is not set, \\\n but used by a workflow, the newest embedding is taken by default\n \"\"\"\n\n def __init__(self, token: str, dataset_id: str = None, embedding_id: str = None):\n\n self.check_version_compatibility()\n\n configuration = Configuration()\n configuration.host = getenv('LIGHTLY_SERVER_LOCATION', 'https://api.lightly.ai')\n configuration.api_key = {'token': token}\n api_client = ApiClient(configuration=configuration)\n self.api_client = api_client\n\n self.token = token\n if dataset_id is not None:\n self._dataset_id = dataset_id\n if embedding_id is not None:\n self.embedding_id = embedding_id\n\n self.datasets_api = DatasetsApi(api_client=self.api_client)\n self.samplings_api = SamplingsApi(api_client=self.api_client)\n self.jobs_api = JobsApi(api_client=self.api_client)\n self.tags_api = TagsApi(api_client=self.api_client)\n self.embeddings_api = EmbeddingsApi(api_client=api_client)\n self.mappings_api = MappingsApi(api_client=api_client)\n self.scores_api = ScoresApi(api_client=api_client)\n self.samples_api = SamplesApi(api_client=api_client)\n self.quota_api = QuotaApi(api_client=api_client)\n\n def check_version_compatibility(self):\n minimum_version = get_minimum_compatible_version()\n if version_compare(__version__, minimum_version) < 0:\n raise ValueError(f\"Incompatible Version of lightly pip package. \"\n f\"Please upgrade to at least version {minimum_version} \"\n f\"to be able to access the api and webapp\")\n\n @property\n def dataset_id(self) -> str:\n '''The current dataset_id.\n\n If the dataset_id is set, it is returned.\n If it is not set, then the dataset_id of the last modified dataset is selected.\n ''' \n try:\n return self._dataset_id\n except AttributeError:\n all_datasets: List[DatasetData] = self.datasets_api.get_datasets()\n datasets_sorted = sorted(all_datasets, key=lambda dataset: dataset.last_modified_at)\n last_modified_dataset = datasets_sorted[-1]\n self._dataset_id = last_modified_dataset.id\n warnings.warn(UserWarning(f\"Dataset has not been specified, \"\n f\"taking the last modified dataset {last_modified_dataset.name} as default dataset.\"))\n return self._dataset_id\n\n def _get_all_tags(self) -> List[TagData]:\n return self.tags_api.get_tags_by_dataset_id(self.dataset_id)\n\n def _order_list_by_filenames(self, filenames_for_list: List[str], list_to_order: List[object]) -> List[object]:\n \"\"\"Orders a list such that it is in the order of the filenames specified on the server.\n\n Args:\n filenames_for_list:\n The filenames of samples in a specific order\n list_to_order:\n Some values belonging to the samples\n\n Returns:\n The list reordered. The same reorder applied on the filenames_for_list\n would put them in the order of the filenames in self.filenames_on_server\n\n \"\"\"\n assert len(filenames_for_list) == len(list_to_order)\n dict_by_filenames = dict(zip(filenames_for_list, list_to_order))\n list_ordered = [dict_by_filenames[filename] for filename in self.filenames_on_server\n if filename in filenames_for_list]\n return list_ordered\n\n @property\n def filenames_on_server(self):\n '''The list of the filenames in the dataset.\n\n '''\n if not hasattr(self, \"_filenames_on_server\"):\n self._filenames_on_server = self.mappings_api. \\\n get_sample_mappings_by_dataset_id(dataset_id=self.dataset_id, field=\"fileName\")\n return self._filenames_on_server\n\n def upload_file_with_signed_url(self, file: IOBase, signed_write_url: str,\n max_backoff: int = 32, max_retries: int = 5) -> Response:\n \"\"\"Uploads a file to a url via a put request.\n\n Args:\n file:\n The file to upload.\n signed_write_url:\n The url to upload the file to. As no authorization is used,\n the url must be a signed write url.\n max_backoff:\n Maximal backoff before retrying.\n max_retries:\n Maximum number of retries before timing out.\n\n Returns:\n The response of the put request, usually a 200 for the success case.\n\n \"\"\"\n\n response = requests.put(signed_write_url, data=file)\n\n if response.status_code != 200:\n msg = f'Failed PUT request to {signed_write_url} with status_code'\n msg += f'{response.status__code}!'\n raise RuntimeError(msg)\n\n return response\n", "path": "lightly/api/api_workflow_client.py"}, {"content": "import csv\nfrom typing import List\n\nfrom lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData\nfrom lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData\n\n\n\ndef _is_valid_filename(filename: str):\n \"\"\"Returns False if the filename is misformatted.\n\n \"\"\"\n invalid_characters = [',']\n for character in invalid_characters:\n if character in filename:\n return False\n return True\n\n\nclass _UploadEmbeddingsMixin:\n\n def set_embedding_id_by_name(self, embedding_name: str = None):\n \"\"\"Sets the embedding id of the client by embedding name.\n\n Args:\n embedding_name:\n Name under which the embedding was uploaded.\n \n Raises:\n ValueError if the embedding does not exist.\n \"\"\"\n embeddings: List[DatasetEmbeddingData] = \\\n self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id)\n\n if embedding_name is None:\n self.embedding_id = embeddings[-1].id\n return\n\n try:\n self.embedding_id = next(embedding.id for embedding in embeddings if embedding.name == embedding_name)\n except StopIteration:\n raise ValueError(f\"No embedding with name {embedding_name} found on the server.\")\n\n def upload_embeddings(self, path_to_embeddings_csv: str, name: str):\n \"\"\"Uploads embeddings to the server.\n\n First checks that the specified embedding name is not on ther server. If it is, the upload is aborted.\n Then creates a new csv with the embeddings in the order specified on the server. Next it uploads it to the server.\n The received embedding_id is saved as a property of self.\n\n Args:\n path_to_embeddings_csv:\n The path to the .csv containing the embeddings, e.g. \"path/to/embeddings.csv\"\n name:\n The name of the embedding. If an embedding with such a name already exists on the server,\n the upload is aborted.\n\n \"\"\"\n # get the names of the current embeddings on the server:\n embeddings_on_server: List[DatasetEmbeddingData] = \\\n self.embeddings_api.get_embeddings_by_dataset_id(dataset_id=self.dataset_id)\n names_embeddings_on_server = [embedding.name for embedding in embeddings_on_server]\n\n if name in names_embeddings_on_server:\n print(f\"Aborting upload, embedding with name='{name}' already exists.\")\n self.embedding_id = next(embedding for embedding in embeddings_on_server if embedding.name == name).id\n return\n\n # create a new csv with the filenames in the desired order\n path_to_ordered_embeddings_csv = self._order_csv_by_filenames(\n path_to_embeddings_csv=path_to_embeddings_csv)\n\n # get the URL to upload the csv to\n response: WriteCSVUrlData = \\\n self.embeddings_api.get_embeddings_csv_write_url_by_id(self.dataset_id, name=name)\n self.embedding_id = response.embedding_id\n signed_write_url = response.signed_write_url\n\n # upload the csv to the URL\n with open(path_to_ordered_embeddings_csv, 'rb') as file_ordered_embeddings_csv:\n self.upload_file_with_signed_url(file=file_ordered_embeddings_csv, signed_write_url=signed_write_url)\n\n def _order_csv_by_filenames(self, path_to_embeddings_csv: str) -> str:\n \"\"\"Orders the rows in a csv according to the order specified on the server and saves it as a new file.\n\n Args:\n path_to_embeddings_csv:\n the path to the csv to order\n\n Returns:\n the filepath to the new csv\n\n \"\"\"\n with open(path_to_embeddings_csv, 'r') as f:\n data = csv.reader(f)\n\n rows = list(data)\n header_row = rows[0]\n rows_without_header = rows[1:]\n index_filenames = header_row.index('filenames')\n filenames = [row[index_filenames] for row in rows_without_header]\n\n if len(filenames) != len(self.filenames_on_server):\n raise ValueError(f'There are {len(filenames)} rows in the embedding file, but '\n f'{len(self.filenames_on_server)} filenames/samples on the server.')\n if set(filenames) != set(self.filenames_on_server):\n raise ValueError(f'The filenames in the embedding file and the filenames on the server do not align')\n invalid_filenames = [f for f in filenames if not _is_valid_filename(f)]\n if len(invalid_filenames) > 0:\n raise ValueError(f'Invalid filename(s) in embedding file: {invalid_filenames}')\n\n rows_without_header_ordered = self._order_list_by_filenames(filenames, rows_without_header)\n\n rows_to_write = [header_row]\n rows_to_write += rows_without_header_ordered\n\n path_to_ordered_embeddings_csv = path_to_embeddings_csv.replace('.csv', '_sorted.csv')\n with open(path_to_ordered_embeddings_csv, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(rows_to_write)\n\n return path_to_ordered_embeddings_csv\n", "path": "lightly/api/api_workflow_upload_embeddings.py"}]}
3,743
634
gh_patches_debug_27387
rasdani/github-patches
git_diff
e-valuation__EvaP-1250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Participant count incorrect on results index E.g. for "Lehrerworkshop bei der Endrunde des Bundeswettbewerb Informatik (Sommersemester 2017)" the overview shows "0/0", or for MINT-Camps it says "1/1" or "3/3" participants - the correct numbers are 12, 24 and 22. ![capture](https://user-images.githubusercontent.com/2188983/43835155-2096edf6-9b11-11e8-98ea-b6f8e62231fa.PNG) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `evap/results/views.py` Content: ``` 1 from collections import defaultdict 2 from statistics import median 3 4 from django.conf import settings 5 from django.db.models import QuerySet, Prefetch, Count 6 from django.core.cache import caches 7 from django.core.cache.utils import make_template_fragment_key 8 from django.core.exceptions import PermissionDenied 9 from django.shortcuts import get_object_or_404, render 10 from django.template.loader import get_template 11 from django.contrib.auth.decorators import login_required 12 from django.utils import translation 13 14 from evap.evaluation.models import Semester, Degree, Contribution, Course, CourseType 15 from evap.evaluation.auth import internal_required 16 from evap.results.tools import collect_results, calculate_average_distribution, distribution_to_grade, \ 17 TextAnswer, TextResult, HeadingResult, get_single_result_rating_result 18 19 20 def get_course_result_template_fragment_cache_key(course_id, language, can_user_see_results_page): 21 return make_template_fragment_key('course_result_template_fragment', [course_id, language, can_user_see_results_page]) 22 23 24 def delete_template_cache(course): 25 assert course.state != 'published' 26 caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', True)) 27 caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', False)) 28 caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', True)) 29 caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', False)) 30 31 32 def warm_up_template_cache(courses): 33 courses = get_courses_with_prefetched_data(courses) 34 current_language = translation.get_language() 35 try: 36 for course in courses: 37 assert course.state == 'published' 38 translation.activate('en') 39 get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True)) 40 get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False)) 41 translation.activate('de') 42 get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True)) 43 get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False)) 44 assert get_course_result_template_fragment_cache_key(course.id, 'en', True) in caches['results'] 45 assert get_course_result_template_fragment_cache_key(course.id, 'en', False) in caches['results'] 46 assert get_course_result_template_fragment_cache_key(course.id, 'de', True) in caches['results'] 47 assert get_course_result_template_fragment_cache_key(course.id, 'de', False) in caches['results'] 48 finally: 49 translation.activate(current_language) # reset to previously set language to prevent unwanted side effects 50 51 52 def get_courses_with_prefetched_data(courses): 53 if isinstance(courses, QuerySet): 54 courses = (courses 55 .annotate(num_participants=Count("participants", distinct=True), num_voters=Count("voters", distinct=True)) 56 .select_related("type") 57 .prefetch_related( 58 "degrees", 59 "semester", 60 Prefetch("contributions", queryset=Contribution.objects.filter(responsible=True).select_related("contributor"), to_attr="responsible_contributions") 61 ) 62 ) 63 for course in courses: 64 course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions] 65 for course in courses: 66 if not course.is_single_result: 67 course.distribution = calculate_average_distribution(course) 68 course.avg_grade = distribution_to_grade(course.distribution) 69 else: 70 course.single_result_rating_result = get_single_result_rating_result(course) 71 return courses 72 73 74 @internal_required 75 def index(request): 76 semesters = Semester.get_all_with_published_unarchived_results() 77 courses = Course.objects.filter(semester__in=semesters, state='published') 78 courses = [course for course in courses if course.can_user_see_course(request.user)] 79 80 if request.user.is_reviewer: 81 additional_courses = Course.objects.filter(semester__in=semesters, state__in=['in_evaluation', 'evaluated', 'reviewed']) 82 courses += get_courses_with_prefetched_data(additional_courses) 83 84 course_pks = [course.pk for course in courses] 85 degrees = Degree.objects.filter(courses__pk__in=course_pks).distinct() 86 course_types = CourseType.objects.filter(courses__pk__in=course_pks).distinct() 87 template_data = dict( 88 courses=courses, 89 degrees=degrees, 90 course_types=sorted(course_types, key=lambda course_type: course_type.name), 91 semesters=semesters, 92 ) 93 return render(request, "results_index.html", template_data) 94 95 96 @login_required 97 def course_detail(request, semester_id, course_id): 98 semester = get_object_or_404(Semester, id=semester_id) 99 course = get_object_or_404(semester.course_set, id=course_id, semester=semester) 100 101 if not course.can_user_see_results_page(request.user): 102 raise PermissionDenied 103 104 course_result = collect_results(course) 105 106 if request.user.is_reviewer: 107 public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view. 108 else: 109 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. 110 111 # redirect to non-public view if there is none because the results have not been published 112 if not course.can_publish_rating_results: 113 public_view = False 114 115 represented_users = list(request.user.represented_users.all()) + [request.user] 116 117 # remove text answers if the user may not see them 118 for questionnaire_result in course_result.questionnaire_results: 119 for question_result in questionnaire_result.question_results: 120 if isinstance(question_result, TextResult): 121 question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)] 122 # remove empty TextResults 123 questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0] 124 125 # filter empty headings 126 for questionnaire_result in course_result.questionnaire_results: 127 filtered_question_results = [] 128 for index, question_result in enumerate(questionnaire_result.question_results): 129 # filter out if there are no more questions or the next question is also a heading question 130 if isinstance(question_result, HeadingResult): 131 if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult): 132 continue 133 filtered_question_results.append(question_result) 134 questionnaire_result.question_results = filtered_question_results 135 136 # remove empty questionnaire_results and contribution_results 137 for contribution_result in course_result.contribution_results: 138 contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results] 139 course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results] 140 141 add_warnings(course, course_result) 142 143 # split course_result into different lists 144 course_questionnaire_results_top = [] 145 course_questionnaire_results_bottom = [] 146 contributor_contribution_results = [] 147 for contribution_result in course_result.contribution_results: 148 if contribution_result.contributor is None: 149 for questionnaire_result in contribution_result.questionnaire_results: 150 if questionnaire_result.questionnaire.is_below_contributors: 151 course_questionnaire_results_bottom.append(questionnaire_result) 152 else: 153 course_questionnaire_results_top.append(questionnaire_result) 154 else: 155 contributor_contribution_results.append(contribution_result) 156 157 if not contributor_contribution_results: 158 course_questionnaire_results_top += course_questionnaire_results_bottom 159 course_questionnaire_results_bottom = [] 160 161 course.distribution = calculate_average_distribution(course) 162 course.avg_grade = distribution_to_grade(course.distribution) 163 164 template_data = dict( 165 course=course, 166 course_questionnaire_results_top=course_questionnaire_results_top, 167 course_questionnaire_results_bottom=course_questionnaire_results_bottom, 168 contributor_contribution_results=contributor_contribution_results, 169 reviewer=request.user.is_reviewer, 170 contributor=course.is_user_contributor_or_delegate(request.user), 171 can_download_grades=request.user.can_download_grades, 172 public_view=public_view) 173 return render(request, "results_course_detail.html", template_data) 174 175 176 def add_warnings(course, course_result): 177 if not course.can_publish_rating_results: 178 return 179 180 # calculate the median values of how many people answered a questionnaire across all contributions 181 questionnaire_max_answers = defaultdict(list) 182 for questionnaire_result in course_result.questionnaire_results: 183 max_answers = max((question_result.count_sum for question_result in questionnaire_result.question_results if question_result.question.is_rating_question), default=0) 184 questionnaire_max_answers[questionnaire_result.questionnaire].append(max_answers) 185 186 questionnaire_warning_thresholds = {} 187 for questionnaire, max_answers_list in questionnaire_max_answers.items(): 188 questionnaire_warning_thresholds[questionnaire] = max(settings.RESULTS_WARNING_PERCENTAGE * median(max_answers_list), settings.RESULTS_WARNING_COUNT) 189 190 for questionnaire_result in course_result.questionnaire_results: 191 rating_results = [question_result for question_result in questionnaire_result.question_results if question_result.question.is_rating_question] 192 max_answers = max((rating_result.count_sum for rating_result in rating_results), default=0) 193 questionnaire_result.warning = 0 < max_answers < questionnaire_warning_thresholds[questionnaire_result.questionnaire] 194 195 for rating_result in rating_results: 196 rating_result.warning = questionnaire_result.warning or rating_result.has_answers and rating_result.count_sum < questionnaire_warning_thresholds[questionnaire_result.questionnaire] 197 198 199 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False): 200 assert text_answer.state in [TextAnswer.PRIVATE, TextAnswer.PUBLISHED] 201 202 if public_view: 203 return False 204 if user.is_reviewer: 205 return True 206 207 contributor = text_answer.contribution.contributor 208 209 if text_answer.is_private: 210 return contributor == user 211 212 if text_answer.is_published: 213 if text_answer.contribution.responsible: 214 return contributor == user or user in contributor.delegates.all() 215 216 if contributor in represented_users: 217 return True 218 if text_answer.contribution.course.contributions.filter( 219 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists(): 220 return True 221 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter( 222 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists(): 223 return True 224 225 return False 226 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -51,8 +51,9 @@ def get_courses_with_prefetched_data(courses): if isinstance(courses, QuerySet): + participant_counts = courses.annotate(num_participants=Count("participants")).values_list("num_participants", flat=True) + voter_counts = courses.annotate(num_voters=Count("voters")).values_list("num_voters", flat=True) courses = (courses - .annotate(num_participants=Count("participants", distinct=True), num_voters=Count("voters", distinct=True)) .select_related("type") .prefetch_related( "degrees", @@ -60,7 +61,10 @@ Prefetch("contributions", queryset=Contribution.objects.filter(responsible=True).select_related("contributor"), to_attr="responsible_contributions") ) ) - for course in courses: + for course, participant_count, voter_count in zip(courses, participant_counts, voter_counts): + if course._participant_count is None: + course.num_participants = participant_count + course.num_voters = voter_count course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions] for course in courses: if not course.is_single_result:
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -51,8 +51,9 @@\n \n def get_courses_with_prefetched_data(courses):\n if isinstance(courses, QuerySet):\n+ participant_counts = courses.annotate(num_participants=Count(\"participants\")).values_list(\"num_participants\", flat=True)\n+ voter_counts = courses.annotate(num_voters=Count(\"voters\")).values_list(\"num_voters\", flat=True)\n courses = (courses\n- .annotate(num_participants=Count(\"participants\", distinct=True), num_voters=Count(\"voters\", distinct=True))\n .select_related(\"type\")\n .prefetch_related(\n \"degrees\",\n@@ -60,7 +61,10 @@\n Prefetch(\"contributions\", queryset=Contribution.objects.filter(responsible=True).select_related(\"contributor\"), to_attr=\"responsible_contributions\")\n )\n )\n- for course in courses:\n+ for course, participant_count, voter_count in zip(courses, participant_counts, voter_counts):\n+ if course._participant_count is None:\n+ course.num_participants = participant_count\n+ course.num_voters = voter_count\n course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions]\n for course in courses:\n if not course.is_single_result:\n", "issue": "Participant count incorrect on results index\nE.g. for \"Lehrerworkshop bei der Endrunde des Bundeswettbewerb Informatik (Sommersemester 2017)\" the overview shows \"0/0\", or for MINT-Camps it says \"1/1\" or \"3/3\" participants - the correct numbers are 12, 24 and 22.\r\n\r\n![capture](https://user-images.githubusercontent.com/2188983/43835155-2096edf6-9b11-11e8-98ea-b6f8e62231fa.PNG)\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom statistics import median\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Prefetch, Count\nfrom django.core.cache import caches\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template.loader import get_template\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import translation\n\nfrom evap.evaluation.models import Semester, Degree, Contribution, Course, CourseType\nfrom evap.evaluation.auth import internal_required\nfrom evap.results.tools import collect_results, calculate_average_distribution, distribution_to_grade, \\\n TextAnswer, TextResult, HeadingResult, get_single_result_rating_result\n\n\ndef get_course_result_template_fragment_cache_key(course_id, language, can_user_see_results_page):\n return make_template_fragment_key('course_result_template_fragment', [course_id, language, can_user_see_results_page])\n\n\ndef delete_template_cache(course):\n assert course.state != 'published'\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', True))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', False))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', True))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', False))\n\n\ndef warm_up_template_cache(courses):\n courses = get_courses_with_prefetched_data(courses)\n current_language = translation.get_language()\n try:\n for course in courses:\n assert course.state == 'published'\n translation.activate('en')\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True))\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False))\n translation.activate('de')\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True))\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False))\n assert get_course_result_template_fragment_cache_key(course.id, 'en', True) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'en', False) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'de', True) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'de', False) in caches['results']\n finally:\n translation.activate(current_language) # reset to previously set language to prevent unwanted side effects\n\n\ndef get_courses_with_prefetched_data(courses):\n if isinstance(courses, QuerySet):\n courses = (courses\n .annotate(num_participants=Count(\"participants\", distinct=True), num_voters=Count(\"voters\", distinct=True))\n .select_related(\"type\")\n .prefetch_related(\n \"degrees\",\n \"semester\",\n Prefetch(\"contributions\", queryset=Contribution.objects.filter(responsible=True).select_related(\"contributor\"), to_attr=\"responsible_contributions\")\n )\n )\n for course in courses:\n course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions]\n for course in courses:\n if not course.is_single_result:\n course.distribution = calculate_average_distribution(course)\n course.avg_grade = distribution_to_grade(course.distribution)\n else:\n course.single_result_rating_result = get_single_result_rating_result(course)\n return courses\n\n\n@internal_required\ndef index(request):\n semesters = Semester.get_all_with_published_unarchived_results()\n courses = Course.objects.filter(semester__in=semesters, state='published')\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n if request.user.is_reviewer:\n additional_courses = Course.objects.filter(semester__in=semesters, state__in=['in_evaluation', 'evaluated', 'reviewed'])\n courses += get_courses_with_prefetched_data(additional_courses)\n\n course_pks = [course.pk for course in courses]\n degrees = Degree.objects.filter(courses__pk__in=course_pks).distinct()\n course_types = CourseType.objects.filter(courses__pk__in=course_pks).distinct()\n template_data = dict(\n courses=courses,\n degrees=degrees,\n course_types=sorted(course_types, key=lambda course_type: course_type.name),\n semesters=semesters,\n )\n return render(request, \"results_index.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results_page(request.user):\n raise PermissionDenied\n\n course_result = collect_results(course)\n\n if request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n # redirect to non-public view if there is none because the results have not been published\n if not course.can_publish_rating_results:\n public_view = False\n\n represented_users = list(request.user.represented_users.all()) + [request.user]\n\n # remove text answers if the user may not see them\n for questionnaire_result in course_result.questionnaire_results:\n for question_result in questionnaire_result.question_results:\n if isinstance(question_result, TextResult):\n question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n # remove empty TextResults\n questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]\n\n # filter empty headings\n for questionnaire_result in course_result.questionnaire_results:\n filtered_question_results = []\n for index, question_result in enumerate(questionnaire_result.question_results):\n # filter out if there are no more questions or the next question is also a heading question\n if isinstance(question_result, HeadingResult):\n if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):\n continue\n filtered_question_results.append(question_result)\n questionnaire_result.question_results = filtered_question_results\n\n # remove empty questionnaire_results and contribution_results\n for contribution_result in course_result.contribution_results:\n contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]\n course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results]\n\n add_warnings(course, course_result)\n\n # split course_result into different lists\n course_questionnaire_results_top = []\n course_questionnaire_results_bottom = []\n contributor_contribution_results = []\n for contribution_result in course_result.contribution_results:\n if contribution_result.contributor is None:\n for questionnaire_result in contribution_result.questionnaire_results:\n if questionnaire_result.questionnaire.is_below_contributors:\n course_questionnaire_results_bottom.append(questionnaire_result)\n else:\n course_questionnaire_results_top.append(questionnaire_result)\n else:\n contributor_contribution_results.append(contribution_result)\n\n if not contributor_contribution_results:\n course_questionnaire_results_top += course_questionnaire_results_bottom\n course_questionnaire_results_bottom = []\n\n course.distribution = calculate_average_distribution(course)\n course.avg_grade = distribution_to_grade(course.distribution)\n\n template_data = dict(\n course=course,\n course_questionnaire_results_top=course_questionnaire_results_top,\n course_questionnaire_results_bottom=course_questionnaire_results_bottom,\n contributor_contribution_results=contributor_contribution_results,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef add_warnings(course, course_result):\n if not course.can_publish_rating_results:\n return\n\n # calculate the median values of how many people answered a questionnaire across all contributions\n questionnaire_max_answers = defaultdict(list)\n for questionnaire_result in course_result.questionnaire_results:\n max_answers = max((question_result.count_sum for question_result in questionnaire_result.question_results if question_result.question.is_rating_question), default=0)\n questionnaire_max_answers[questionnaire_result.questionnaire].append(max_answers)\n\n questionnaire_warning_thresholds = {}\n for questionnaire, max_answers_list in questionnaire_max_answers.items():\n questionnaire_warning_thresholds[questionnaire] = max(settings.RESULTS_WARNING_PERCENTAGE * median(max_answers_list), settings.RESULTS_WARNING_COUNT)\n\n for questionnaire_result in course_result.questionnaire_results:\n rating_results = [question_result for question_result in questionnaire_result.question_results if question_result.question.is_rating_question]\n max_answers = max((rating_result.count_sum for rating_result in rating_results), default=0)\n questionnaire_result.warning = 0 < max_answers < questionnaire_warning_thresholds[questionnaire_result.questionnaire]\n\n for rating_result in rating_results:\n rating_result.warning = questionnaire_result.warning or rating_result.has_answers and rating_result.count_sum < questionnaire_warning_thresholds[questionnaire_result.questionnaire]\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n assert text_answer.state in [TextAnswer.PRIVATE, TextAnswer.PUBLISHED]\n\n if public_view:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom statistics import median\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Prefetch, Count\nfrom django.core.cache import caches\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template.loader import get_template\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import translation\n\nfrom evap.evaluation.models import Semester, Degree, Contribution, Course, CourseType\nfrom evap.evaluation.auth import internal_required\nfrom evap.results.tools import collect_results, calculate_average_distribution, distribution_to_grade, \\\n TextAnswer, TextResult, HeadingResult, get_single_result_rating_result\n\n\ndef get_course_result_template_fragment_cache_key(course_id, language, can_user_see_results_page):\n return make_template_fragment_key('course_result_template_fragment', [course_id, language, can_user_see_results_page])\n\n\ndef delete_template_cache(course):\n assert course.state != 'published'\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', True))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'en', False))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', True))\n caches['results'].delete(get_course_result_template_fragment_cache_key(course.id, 'de', False))\n\n\ndef warm_up_template_cache(courses):\n courses = get_courses_with_prefetched_data(courses)\n current_language = translation.get_language()\n try:\n for course in courses:\n assert course.state == 'published'\n translation.activate('en')\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True))\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False))\n translation.activate('de')\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=True))\n get_template('results_index_course.html').render(dict(course=course, can_user_see_results_page=False))\n assert get_course_result_template_fragment_cache_key(course.id, 'en', True) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'en', False) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'de', True) in caches['results']\n assert get_course_result_template_fragment_cache_key(course.id, 'de', False) in caches['results']\n finally:\n translation.activate(current_language) # reset to previously set language to prevent unwanted side effects\n\n\ndef get_courses_with_prefetched_data(courses):\n if isinstance(courses, QuerySet):\n participant_counts = courses.annotate(num_participants=Count(\"participants\")).values_list(\"num_participants\", flat=True)\n voter_counts = courses.annotate(num_voters=Count(\"voters\")).values_list(\"num_voters\", flat=True)\n courses = (courses\n .select_related(\"type\")\n .prefetch_related(\n \"degrees\",\n \"semester\",\n Prefetch(\"contributions\", queryset=Contribution.objects.filter(responsible=True).select_related(\"contributor\"), to_attr=\"responsible_contributions\")\n )\n )\n for course, participant_count, voter_count in zip(courses, participant_counts, voter_counts):\n if course._participant_count is None:\n course.num_participants = participant_count\n course.num_voters = voter_count\n course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions]\n for course in courses:\n if not course.is_single_result:\n course.distribution = calculate_average_distribution(course)\n course.avg_grade = distribution_to_grade(course.distribution)\n else:\n course.single_result_rating_result = get_single_result_rating_result(course)\n return courses\n\n\n@internal_required\ndef index(request):\n semesters = Semester.get_all_with_published_unarchived_results()\n courses = Course.objects.filter(semester__in=semesters, state='published')\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n if request.user.is_reviewer:\n additional_courses = Course.objects.filter(semester__in=semesters, state__in=['in_evaluation', 'evaluated', 'reviewed'])\n courses += get_courses_with_prefetched_data(additional_courses)\n\n course_pks = [course.pk for course in courses]\n degrees = Degree.objects.filter(courses__pk__in=course_pks).distinct()\n course_types = CourseType.objects.filter(courses__pk__in=course_pks).distinct()\n template_data = dict(\n courses=courses,\n degrees=degrees,\n course_types=sorted(course_types, key=lambda course_type: course_type.name),\n semesters=semesters,\n )\n return render(request, \"results_index.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results_page(request.user):\n raise PermissionDenied\n\n course_result = collect_results(course)\n\n if request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n # redirect to non-public view if there is none because the results have not been published\n if not course.can_publish_rating_results:\n public_view = False\n\n represented_users = list(request.user.represented_users.all()) + [request.user]\n\n # remove text answers if the user may not see them\n for questionnaire_result in course_result.questionnaire_results:\n for question_result in questionnaire_result.question_results:\n if isinstance(question_result, TextResult):\n question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n # remove empty TextResults\n questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]\n\n # filter empty headings\n for questionnaire_result in course_result.questionnaire_results:\n filtered_question_results = []\n for index, question_result in enumerate(questionnaire_result.question_results):\n # filter out if there are no more questions or the next question is also a heading question\n if isinstance(question_result, HeadingResult):\n if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):\n continue\n filtered_question_results.append(question_result)\n questionnaire_result.question_results = filtered_question_results\n\n # remove empty questionnaire_results and contribution_results\n for contribution_result in course_result.contribution_results:\n contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]\n course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results]\n\n add_warnings(course, course_result)\n\n # split course_result into different lists\n course_questionnaire_results_top = []\n course_questionnaire_results_bottom = []\n contributor_contribution_results = []\n for contribution_result in course_result.contribution_results:\n if contribution_result.contributor is None:\n for questionnaire_result in contribution_result.questionnaire_results:\n if questionnaire_result.questionnaire.is_below_contributors:\n course_questionnaire_results_bottom.append(questionnaire_result)\n else:\n course_questionnaire_results_top.append(questionnaire_result)\n else:\n contributor_contribution_results.append(contribution_result)\n\n if not contributor_contribution_results:\n course_questionnaire_results_top += course_questionnaire_results_bottom\n course_questionnaire_results_bottom = []\n\n course.distribution = calculate_average_distribution(course)\n course.avg_grade = distribution_to_grade(course.distribution)\n\n template_data = dict(\n course=course,\n course_questionnaire_results_top=course_questionnaire_results_top,\n course_questionnaire_results_bottom=course_questionnaire_results_bottom,\n contributor_contribution_results=contributor_contribution_results,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef add_warnings(course, course_result):\n if not course.can_publish_rating_results:\n return\n\n # calculate the median values of how many people answered a questionnaire across all contributions\n questionnaire_max_answers = defaultdict(list)\n for questionnaire_result in course_result.questionnaire_results:\n max_answers = max((question_result.count_sum for question_result in questionnaire_result.question_results if question_result.question.is_rating_question), default=0)\n questionnaire_max_answers[questionnaire_result.questionnaire].append(max_answers)\n\n questionnaire_warning_thresholds = {}\n for questionnaire, max_answers_list in questionnaire_max_answers.items():\n questionnaire_warning_thresholds[questionnaire] = max(settings.RESULTS_WARNING_PERCENTAGE * median(max_answers_list), settings.RESULTS_WARNING_COUNT)\n\n for questionnaire_result in course_result.questionnaire_results:\n rating_results = [question_result for question_result in questionnaire_result.question_results if question_result.question.is_rating_question]\n max_answers = max((rating_result.count_sum for rating_result in rating_results), default=0)\n questionnaire_result.warning = 0 < max_answers < questionnaire_warning_thresholds[questionnaire_result.questionnaire]\n\n for rating_result in rating_results:\n rating_result.warning = questionnaire_result.warning or rating_result.has_answers and rating_result.count_sum < questionnaire_warning_thresholds[questionnaire_result.questionnaire]\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n assert text_answer.state in [TextAnswer.PRIVATE, TextAnswer.PUBLISHED]\n\n if public_view:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
3,229
310
gh_patches_debug_23780
rasdani/github-patches
git_diff
pypa__setuptools-2256
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unexpected behavior when distutils is invoked before Setuptools TL;DR We are forced to use distutils because setuptools has broken symlink processing and this causes `distutils.errors.DistutilsClassError: command class <class '__main__.SDistCommand'> must subclass Command`. It works with `setuptools<48` and the changelog doesn't document any breaking behaviors for this version. Repro: ```console $ git clone https://github.com/ansible/ansible.git $ cd ansible $ pip install -U 'setuptools>=48' $ python setup.py sdist ``` (tried under Python 3.8) Ref: https://github.com/ansible/ansible/issues/70456 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/distutils_patch.py` Content: ``` 1 """ 2 Ensure that the local copy of distutils is preferred over stdlib. 3 4 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 5 for more motivation. 6 """ 7 8 import sys 9 import re 10 import os 11 import importlib 12 import warnings 13 14 15 def clear_distutils(): 16 if 'distutils' not in sys.modules: 17 return 18 warnings.warn("Setuptools is replacing distutils") 19 mods = [name for name in sys.modules if re.match(r'distutils\b', name)] 20 for name in mods: 21 del sys.modules[name] 22 23 24 def enabled(): 25 """ 26 Allow selection of distutils by environment variable. 27 """ 28 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') 29 return which == 'local' 30 31 32 def ensure_local_distutils(): 33 clear_distutils() 34 distutils = importlib.import_module('setuptools._distutils') 35 distutils.__name__ = 'distutils' 36 sys.modules['distutils'] = distutils 37 38 # sanity check that submodules load as expected 39 core = importlib.import_module('distutils.core') 40 assert '_distutils' in core.__file__, core.__file__ 41 42 43 if enabled(): 44 ensure_local_distutils() 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/distutils_patch.py b/setuptools/distutils_patch.py --- a/setuptools/distutils_patch.py +++ b/setuptools/distutils_patch.py @@ -12,10 +12,26 @@ import warnings +is_pypy = '__pypy__' in sys.builtin_module_names + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + warnings.warn( + "Distutils was imported before Setuptools. This usage is discouraged " + "and may exhibit undesirable behaviors or errors. Please use " + "Setuptools' objects directly or at least import Setuptools first.") + + def clear_distutils(): if 'distutils' not in sys.modules: return - warnings.warn("Setuptools is replacing distutils") + warnings.warn("Setuptools is replacing distutils.") mods = [name for name in sys.modules if re.match(r'distutils\b', name)] for name in mods: del sys.modules[name] @@ -40,5 +56,6 @@ assert '_distutils' in core.__file__, core.__file__ +warn_distutils_present() if enabled(): ensure_local_distutils()
{"golden_diff": "diff --git a/setuptools/distutils_patch.py b/setuptools/distutils_patch.py\n--- a/setuptools/distutils_patch.py\n+++ b/setuptools/distutils_patch.py\n@@ -12,10 +12,26 @@\n import warnings\n \n \n+is_pypy = '__pypy__' in sys.builtin_module_names\n+\n+\n+def warn_distutils_present():\n+ if 'distutils' not in sys.modules:\n+ return\n+ if is_pypy and sys.version_info < (3, 7):\n+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n+ return\n+ warnings.warn(\n+ \"Distutils was imported before Setuptools. This usage is discouraged \"\n+ \"and may exhibit undesirable behaviors or errors. Please use \"\n+ \"Setuptools' objects directly or at least import Setuptools first.\")\n+\n+\n def clear_distutils():\n if 'distutils' not in sys.modules:\n return\n- warnings.warn(\"Setuptools is replacing distutils\")\n+ warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n@@ -40,5 +56,6 @@\n assert '_distutils' in core.__file__, core.__file__\n \n \n+warn_distutils_present()\n if enabled():\n ensure_local_distutils()\n", "issue": "Unexpected behavior when distutils is invoked before Setuptools\nTL;DR We are forced to use distutils because setuptools has broken symlink processing and this causes `distutils.errors.DistutilsClassError: command class <class '__main__.SDistCommand'> must subclass Command`.\r\n\r\nIt works with `setuptools<48` and the changelog doesn't document any breaking behaviors for this version.\r\n\r\nRepro:\r\n```console\r\n$ git clone https://github.com/ansible/ansible.git\r\n$ cd ansible\r\n$ pip install -U 'setuptools>=48'\r\n$ python setup.py sdist\r\n```\r\n(tried under Python 3.8)\r\n\r\nRef: https://github.com/ansible/ansible/issues/70456\n", "before_files": [{"content": "\"\"\"\nEnsure that the local copy of distutils is preferred over stdlib.\n\nSee https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\nfor more motivation.\n\"\"\"\n\nimport sys\nimport re\nimport os\nimport importlib\nimport warnings\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\nif enabled():\n ensure_local_distutils()\n", "path": "setuptools/distutils_patch.py"}], "after_files": [{"content": "\"\"\"\nEnsure that the local copy of distutils is preferred over stdlib.\n\nSee https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\nfor more motivation.\n\"\"\"\n\nimport sys\nimport re\nimport os\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools. This usage is discouraged \"\n \"and may exhibit undesirable behaviors or errors. Please use \"\n \"Setuptools' objects directly or at least import Setuptools first.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\nwarn_distutils_present()\nif enabled():\n ensure_local_distutils()\n", "path": "setuptools/distutils_patch.py"}]}
770
378
gh_patches_debug_9418
rasdani/github-patches
git_diff
python__mypy-2973
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When `a` has type `Any`, after `assert a is None`, the rest of the block is considered dead code Steps to reproduce: 1. Check out [this repo](https://github.com/benkuhn/mypy-repro) 2. Install the packages from `requirements.txt` 3. Run `mypy repro.py` from the repo root. Expected output: ``` repro.py:4: error: Revealed type is 'Any' repro.py:6: error: Revealed type is 'builtins.None' ``` Actual output: ``` repro.py:4: error: Revealed type is 'Any' ``` The issue goes away if you disable strict Optional checking. This is preventing a significant amount of our codebase from being checked right now, so I would appreciate any suggestions for a workaround! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mypy/meet.py` Content: ``` 1 from collections import OrderedDict 2 from typing import List, Optional 3 4 from mypy.join import is_similar_callables, combine_similar_callables, join_type_list 5 from mypy.types import ( 6 Type, AnyType, TypeVisitor, UnboundType, Void, ErrorType, NoneTyp, TypeVarType, 7 Instance, CallableType, TupleType, TypedDictType, ErasedType, TypeList, UnionType, PartialType, 8 DeletedType, UninhabitedType, TypeType 9 ) 10 from mypy.subtypes import is_equivalent, is_subtype 11 12 from mypy import experiments 13 14 # TODO Describe this module. 15 16 17 def meet_types(s: Type, t: Type) -> Type: 18 """Return the greatest lower bound of two types.""" 19 if isinstance(s, ErasedType): 20 return s 21 if isinstance(s, AnyType): 22 return t 23 if isinstance(s, UnionType) and not isinstance(t, UnionType): 24 s, t = t, s 25 return t.accept(TypeMeetVisitor(s)) 26 27 28 def meet_simple(s: Type, t: Type, default_right: bool = True) -> Type: 29 if s == t: 30 return s 31 if isinstance(s, UnionType): 32 return UnionType.make_simplified_union([meet_types(x, t) for x in s.items]) 33 elif not is_overlapping_types(s, t, use_promotions=True): 34 if experiments.STRICT_OPTIONAL: 35 return UninhabitedType() 36 else: 37 return NoneTyp() 38 else: 39 if default_right: 40 return t 41 else: 42 return s 43 44 45 def is_overlapping_types(t: Type, s: Type, use_promotions: bool = False) -> bool: 46 """Can a value of type t be a value of type s, or vice versa? 47 48 Note that this effectively checks against erased types, since type 49 variables are erased at runtime and the overlapping check is based 50 on runtime behavior. 51 52 If use_promotions is True, also consider type promotions (int and 53 float would only be overlapping if it's True). 54 55 This does not consider multiple inheritance. For example, A and B in 56 the following example are not considered overlapping, even though 57 via C they can be overlapping: 58 59 class A: ... 60 class B: ... 61 class C(A, B): ... 62 63 The rationale is that this case is usually very unlikely as multiple 64 inheritance is rare. Also, we can't reliably determine whether 65 multiple inheritance actually occurs somewhere in a program, due to 66 stub files hiding implementation details, dynamic loading etc. 67 68 TODO: Don't consider tuples always overlapping. 69 TODO: Don't consider callables always overlapping. 70 TODO: Don't consider type variables with values always overlapping. 71 """ 72 # Since we are effectively working with the erased types, we only 73 # need to handle occurrences of TypeVarType at the top level. 74 if isinstance(t, TypeVarType): 75 t = t.erase_to_union_or_bound() 76 if isinstance(s, TypeVarType): 77 s = s.erase_to_union_or_bound() 78 if isinstance(t, Instance): 79 if isinstance(s, Instance): 80 # Consider two classes non-disjoint if one is included in the mro 81 # of another. 82 if use_promotions: 83 # Consider cases like int vs float to be overlapping where 84 # there is only a type promotion relationship but not proper 85 # subclassing. 86 if t.type._promote and is_overlapping_types(t.type._promote, s): 87 return True 88 if s.type._promote and is_overlapping_types(s.type._promote, t): 89 return True 90 return t.type in s.type.mro or s.type in t.type.mro 91 if isinstance(t, UnionType): 92 return any(is_overlapping_types(item, s) 93 for item in t.items) 94 if isinstance(s, UnionType): 95 return any(is_overlapping_types(t, item) 96 for item in s.items) 97 if isinstance(t, TypeType) and isinstance(s, TypeType): 98 # If both types are TypeType, compare their inner types. 99 return is_overlapping_types(t.item, s.item, use_promotions) 100 elif isinstance(t, TypeType) or isinstance(s, TypeType): 101 # If exactly only one of t or s is a TypeType, check if one of them 102 # is an `object` or a `type` and otherwise assume no overlap. 103 other = s if isinstance(t, TypeType) else t 104 if isinstance(other, Instance): 105 return other.type.fullname() in {'builtins.object', 'builtins.type'} 106 else: 107 return False 108 if experiments.STRICT_OPTIONAL: 109 if isinstance(t, NoneTyp) != isinstance(s, NoneTyp): 110 # NoneTyp does not overlap with other non-Union types under strict Optional checking 111 return False 112 # We conservatively assume that non-instance, non-union, and non-TypeType types can overlap 113 # any other types. 114 return True 115 116 117 class TypeMeetVisitor(TypeVisitor[Type]): 118 def __init__(self, s: Type) -> None: 119 self.s = s 120 121 def visit_unbound_type(self, t: UnboundType) -> Type: 122 if isinstance(self.s, Void) or isinstance(self.s, ErrorType): 123 return ErrorType() 124 elif isinstance(self.s, NoneTyp): 125 if experiments.STRICT_OPTIONAL: 126 return AnyType() 127 else: 128 return self.s 129 elif isinstance(self.s, UninhabitedType): 130 return self.s 131 else: 132 return AnyType() 133 134 def visit_error_type(self, t: ErrorType) -> Type: 135 return t 136 137 def visit_type_list(self, t: TypeList) -> Type: 138 assert False, 'Not supported' 139 140 def visit_any(self, t: AnyType) -> Type: 141 return self.s 142 143 def visit_union_type(self, t: UnionType) -> Type: 144 if isinstance(self.s, UnionType): 145 meets = [] # type: List[Type] 146 for x in t.items: 147 for y in self.s.items: 148 meets.append(meet_types(x, y)) 149 else: 150 meets = [meet_types(x, self.s) 151 for x in t.items] 152 return UnionType.make_simplified_union(meets) 153 154 def visit_void(self, t: Void) -> Type: 155 if isinstance(self.s, Void): 156 return t 157 else: 158 return ErrorType() 159 160 def visit_none_type(self, t: NoneTyp) -> Type: 161 if experiments.STRICT_OPTIONAL: 162 if isinstance(self.s, NoneTyp) or (isinstance(self.s, Instance) and 163 self.s.type.fullname() == 'builtins.object'): 164 return t 165 else: 166 return UninhabitedType() 167 else: 168 if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType): 169 return t 170 else: 171 return ErrorType() 172 173 def visit_uninhabited_type(self, t: UninhabitedType) -> Type: 174 if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType): 175 return t 176 else: 177 return ErrorType() 178 179 def visit_deleted_type(self, t: DeletedType) -> Type: 180 if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType): 181 if isinstance(self.s, NoneTyp): 182 if experiments.STRICT_OPTIONAL: 183 return t 184 else: 185 return self.s 186 elif isinstance(self.s, UninhabitedType): 187 return self.s 188 else: 189 return t 190 else: 191 return ErrorType() 192 193 def visit_erased_type(self, t: ErasedType) -> Type: 194 return self.s 195 196 def visit_type_var(self, t: TypeVarType) -> Type: 197 if isinstance(self.s, TypeVarType) and self.s.id == t.id: 198 return self.s 199 else: 200 return self.default(self.s) 201 202 def visit_instance(self, t: Instance) -> Type: 203 if isinstance(self.s, Instance): 204 si = self.s 205 if t.type == si.type: 206 if is_subtype(t, self.s) or is_subtype(self.s, t): 207 # Combine type arguments. We could have used join below 208 # equivalently. 209 args = [] # type: List[Type] 210 for i in range(len(t.args)): 211 args.append(self.meet(t.args[i], si.args[i])) 212 return Instance(t.type, args) 213 else: 214 if experiments.STRICT_OPTIONAL: 215 return UninhabitedType() 216 else: 217 return NoneTyp() 218 else: 219 if is_subtype(t, self.s): 220 return t 221 elif is_subtype(self.s, t): 222 # See also above comment. 223 return self.s 224 else: 225 if experiments.STRICT_OPTIONAL: 226 return UninhabitedType() 227 else: 228 return NoneTyp() 229 elif isinstance(self.s, TypeType): 230 return meet_types(t, self.s) 231 elif isinstance(self.s, TupleType): 232 return meet_types(t, self.s) 233 else: 234 return self.default(self.s) 235 236 def visit_callable_type(self, t: CallableType) -> Type: 237 if isinstance(self.s, CallableType) and is_similar_callables(t, self.s): 238 return combine_similar_callables(t, self.s) 239 else: 240 return self.default(self.s) 241 242 def visit_tuple_type(self, t: TupleType) -> Type: 243 if isinstance(self.s, TupleType) and self.s.length() == t.length(): 244 items = [] # type: List[Type] 245 for i in range(t.length()): 246 items.append(self.meet(t.items[i], self.s.items[i])) 247 # TODO: What if the fallbacks are different? 248 return TupleType(items, t.fallback) 249 # meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>]. 250 elif (isinstance(self.s, Instance) and 251 self.s.type.fullname() == 'builtins.tuple' and self.s.args): 252 return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items]) 253 else: 254 return self.default(self.s) 255 256 def visit_typeddict_type(self, t: TypedDictType) -> Type: 257 if isinstance(self.s, TypedDictType): 258 for (_, l, r) in self.s.zip(t): 259 if not is_equivalent(l, r): 260 return self.default(self.s) 261 items = OrderedDict([ 262 (item_name, s_item_type or t_item_type) 263 for (item_name, s_item_type, t_item_type) in self.s.zipall(t) 264 ]) 265 mapping_value_type = join_type_list(list(items.values())) 266 fallback = self.s.create_anonymous_fallback(value_type=mapping_value_type) 267 return TypedDictType(items, fallback) 268 else: 269 return self.default(self.s) 270 271 def visit_partial_type(self, t: PartialType) -> Type: 272 # We can't determine the meet of partial types. We should never get here. 273 assert False, 'Internal error' 274 275 def visit_type_type(self, t: TypeType) -> Type: 276 if isinstance(self.s, TypeType): 277 typ = self.meet(t.item, self.s.item) 278 if not isinstance(typ, NoneTyp): 279 typ = TypeType(typ, line=t.line) 280 return typ 281 elif isinstance(self.s, Instance) and self.s.type.fullname() == 'builtins.type': 282 return t 283 else: 284 return self.default(self.s) 285 286 def meet(self, s: Type, t: Type) -> Type: 287 return meet_types(s, t) 288 289 def default(self, typ: Type) -> Type: 290 if isinstance(typ, UnboundType): 291 return AnyType() 292 elif isinstance(typ, Void) or isinstance(typ, ErrorType): 293 return ErrorType() 294 else: 295 if experiments.STRICT_OPTIONAL: 296 return UninhabitedType() 297 else: 298 return NoneTyp() 299 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mypy/meet.py b/mypy/meet.py --- a/mypy/meet.py +++ b/mypy/meet.py @@ -69,6 +69,10 @@ TODO: Don't consider callables always overlapping. TODO: Don't consider type variables with values always overlapping. """ + # Any overlaps with everything + if isinstance(t, AnyType) or isinstance(s, AnyType): + return True + # Since we are effectively working with the erased types, we only # need to handle occurrences of TypeVarType at the top level. if isinstance(t, TypeVarType):
{"golden_diff": "diff --git a/mypy/meet.py b/mypy/meet.py\n--- a/mypy/meet.py\n+++ b/mypy/meet.py\n@@ -69,6 +69,10 @@\n TODO: Don't consider callables always overlapping.\n TODO: Don't consider type variables with values always overlapping.\n \"\"\"\n+ # Any overlaps with everything\n+ if isinstance(t, AnyType) or isinstance(s, AnyType):\n+ return True\n+\n # Since we are effectively working with the erased types, we only\n # need to handle occurrences of TypeVarType at the top level.\n if isinstance(t, TypeVarType):\n", "issue": "When `a` has type `Any`, after `assert a is None`, the rest of the block is considered dead code\nSteps to reproduce:\r\n\r\n1. Check out [this repo](https://github.com/benkuhn/mypy-repro)\r\n2. Install the packages from `requirements.txt`\r\n3. Run `mypy repro.py` from the repo root.\r\n\r\nExpected output:\r\n```\r\nrepro.py:4: error: Revealed type is 'Any'\r\nrepro.py:6: error: Revealed type is 'builtins.None'\r\n```\r\n\r\nActual output:\r\n```\r\nrepro.py:4: error: Revealed type is 'Any'\r\n```\r\n\r\nThe issue goes away if you disable strict Optional checking.\r\n\r\nThis is preventing a significant amount of our codebase from being checked right now, so I would appreciate any suggestions for a workaround!\n", "before_files": [{"content": "from collections import OrderedDict\nfrom typing import List, Optional\n\nfrom mypy.join import is_similar_callables, combine_similar_callables, join_type_list\nfrom mypy.types import (\n Type, AnyType, TypeVisitor, UnboundType, Void, ErrorType, NoneTyp, TypeVarType,\n Instance, CallableType, TupleType, TypedDictType, ErasedType, TypeList, UnionType, PartialType,\n DeletedType, UninhabitedType, TypeType\n)\nfrom mypy.subtypes import is_equivalent, is_subtype\n\nfrom mypy import experiments\n\n# TODO Describe this module.\n\n\ndef meet_types(s: Type, t: Type) -> Type:\n \"\"\"Return the greatest lower bound of two types.\"\"\"\n if isinstance(s, ErasedType):\n return s\n if isinstance(s, AnyType):\n return t\n if isinstance(s, UnionType) and not isinstance(t, UnionType):\n s, t = t, s\n return t.accept(TypeMeetVisitor(s))\n\n\ndef meet_simple(s: Type, t: Type, default_right: bool = True) -> Type:\n if s == t:\n return s\n if isinstance(s, UnionType):\n return UnionType.make_simplified_union([meet_types(x, t) for x in s.items])\n elif not is_overlapping_types(s, t, use_promotions=True):\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n else:\n if default_right:\n return t\n else:\n return s\n\n\ndef is_overlapping_types(t: Type, s: Type, use_promotions: bool = False) -> bool:\n \"\"\"Can a value of type t be a value of type s, or vice versa?\n\n Note that this effectively checks against erased types, since type\n variables are erased at runtime and the overlapping check is based\n on runtime behavior.\n\n If use_promotions is True, also consider type promotions (int and\n float would only be overlapping if it's True).\n\n This does not consider multiple inheritance. For example, A and B in\n the following example are not considered overlapping, even though\n via C they can be overlapping:\n\n class A: ...\n class B: ...\n class C(A, B): ...\n\n The rationale is that this case is usually very unlikely as multiple\n inheritance is rare. Also, we can't reliably determine whether\n multiple inheritance actually occurs somewhere in a program, due to\n stub files hiding implementation details, dynamic loading etc.\n\n TODO: Don't consider tuples always overlapping.\n TODO: Don't consider callables always overlapping.\n TODO: Don't consider type variables with values always overlapping.\n \"\"\"\n # Since we are effectively working with the erased types, we only\n # need to handle occurrences of TypeVarType at the top level.\n if isinstance(t, TypeVarType):\n t = t.erase_to_union_or_bound()\n if isinstance(s, TypeVarType):\n s = s.erase_to_union_or_bound()\n if isinstance(t, Instance):\n if isinstance(s, Instance):\n # Consider two classes non-disjoint if one is included in the mro\n # of another.\n if use_promotions:\n # Consider cases like int vs float to be overlapping where\n # there is only a type promotion relationship but not proper\n # subclassing.\n if t.type._promote and is_overlapping_types(t.type._promote, s):\n return True\n if s.type._promote and is_overlapping_types(s.type._promote, t):\n return True\n return t.type in s.type.mro or s.type in t.type.mro\n if isinstance(t, UnionType):\n return any(is_overlapping_types(item, s)\n for item in t.items)\n if isinstance(s, UnionType):\n return any(is_overlapping_types(t, item)\n for item in s.items)\n if isinstance(t, TypeType) and isinstance(s, TypeType):\n # If both types are TypeType, compare their inner types.\n return is_overlapping_types(t.item, s.item, use_promotions)\n elif isinstance(t, TypeType) or isinstance(s, TypeType):\n # If exactly only one of t or s is a TypeType, check if one of them\n # is an `object` or a `type` and otherwise assume no overlap.\n other = s if isinstance(t, TypeType) else t\n if isinstance(other, Instance):\n return other.type.fullname() in {'builtins.object', 'builtins.type'}\n else:\n return False\n if experiments.STRICT_OPTIONAL:\n if isinstance(t, NoneTyp) != isinstance(s, NoneTyp):\n # NoneTyp does not overlap with other non-Union types under strict Optional checking\n return False\n # We conservatively assume that non-instance, non-union, and non-TypeType types can overlap\n # any other types.\n return True\n\n\nclass TypeMeetVisitor(TypeVisitor[Type]):\n def __init__(self, s: Type) -> None:\n self.s = s\n\n def visit_unbound_type(self, t: UnboundType) -> Type:\n if isinstance(self.s, Void) or isinstance(self.s, ErrorType):\n return ErrorType()\n elif isinstance(self.s, NoneTyp):\n if experiments.STRICT_OPTIONAL:\n return AnyType()\n else:\n return self.s\n elif isinstance(self.s, UninhabitedType):\n return self.s\n else:\n return AnyType()\n\n def visit_error_type(self, t: ErrorType) -> Type:\n return t\n\n def visit_type_list(self, t: TypeList) -> Type:\n assert False, 'Not supported'\n\n def visit_any(self, t: AnyType) -> Type:\n return self.s\n\n def visit_union_type(self, t: UnionType) -> Type:\n if isinstance(self.s, UnionType):\n meets = [] # type: List[Type]\n for x in t.items:\n for y in self.s.items:\n meets.append(meet_types(x, y))\n else:\n meets = [meet_types(x, self.s)\n for x in t.items]\n return UnionType.make_simplified_union(meets)\n\n def visit_void(self, t: Void) -> Type:\n if isinstance(self.s, Void):\n return t\n else:\n return ErrorType()\n\n def visit_none_type(self, t: NoneTyp) -> Type:\n if experiments.STRICT_OPTIONAL:\n if isinstance(self.s, NoneTyp) or (isinstance(self.s, Instance) and\n self.s.type.fullname() == 'builtins.object'):\n return t\n else:\n return UninhabitedType()\n else:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n return t\n else:\n return ErrorType()\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> Type:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n return t\n else:\n return ErrorType()\n\n def visit_deleted_type(self, t: DeletedType) -> Type:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n if isinstance(self.s, NoneTyp):\n if experiments.STRICT_OPTIONAL:\n return t\n else:\n return self.s\n elif isinstance(self.s, UninhabitedType):\n return self.s\n else:\n return t\n else:\n return ErrorType()\n\n def visit_erased_type(self, t: ErasedType) -> Type:\n return self.s\n\n def visit_type_var(self, t: TypeVarType) -> Type:\n if isinstance(self.s, TypeVarType) and self.s.id == t.id:\n return self.s\n else:\n return self.default(self.s)\n\n def visit_instance(self, t: Instance) -> Type:\n if isinstance(self.s, Instance):\n si = self.s\n if t.type == si.type:\n if is_subtype(t, self.s) or is_subtype(self.s, t):\n # Combine type arguments. We could have used join below\n # equivalently.\n args = [] # type: List[Type]\n for i in range(len(t.args)):\n args.append(self.meet(t.args[i], si.args[i]))\n return Instance(t.type, args)\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n else:\n if is_subtype(t, self.s):\n return t\n elif is_subtype(self.s, t):\n # See also above comment.\n return self.s\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n elif isinstance(self.s, TypeType):\n return meet_types(t, self.s)\n elif isinstance(self.s, TupleType):\n return meet_types(t, self.s)\n else:\n return self.default(self.s)\n\n def visit_callable_type(self, t: CallableType) -> Type:\n if isinstance(self.s, CallableType) and is_similar_callables(t, self.s):\n return combine_similar_callables(t, self.s)\n else:\n return self.default(self.s)\n\n def visit_tuple_type(self, t: TupleType) -> Type:\n if isinstance(self.s, TupleType) and self.s.length() == t.length():\n items = [] # type: List[Type]\n for i in range(t.length()):\n items.append(self.meet(t.items[i], self.s.items[i]))\n # TODO: What if the fallbacks are different?\n return TupleType(items, t.fallback)\n # meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>].\n elif (isinstance(self.s, Instance) and\n self.s.type.fullname() == 'builtins.tuple' and self.s.args):\n return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items])\n else:\n return self.default(self.s)\n\n def visit_typeddict_type(self, t: TypedDictType) -> Type:\n if isinstance(self.s, TypedDictType):\n for (_, l, r) in self.s.zip(t):\n if not is_equivalent(l, r):\n return self.default(self.s)\n items = OrderedDict([\n (item_name, s_item_type or t_item_type)\n for (item_name, s_item_type, t_item_type) in self.s.zipall(t)\n ])\n mapping_value_type = join_type_list(list(items.values()))\n fallback = self.s.create_anonymous_fallback(value_type=mapping_value_type)\n return TypedDictType(items, fallback)\n else:\n return self.default(self.s)\n\n def visit_partial_type(self, t: PartialType) -> Type:\n # We can't determine the meet of partial types. We should never get here.\n assert False, 'Internal error'\n\n def visit_type_type(self, t: TypeType) -> Type:\n if isinstance(self.s, TypeType):\n typ = self.meet(t.item, self.s.item)\n if not isinstance(typ, NoneTyp):\n typ = TypeType(typ, line=t.line)\n return typ\n elif isinstance(self.s, Instance) and self.s.type.fullname() == 'builtins.type':\n return t\n else:\n return self.default(self.s)\n\n def meet(self, s: Type, t: Type) -> Type:\n return meet_types(s, t)\n\n def default(self, typ: Type) -> Type:\n if isinstance(typ, UnboundType):\n return AnyType()\n elif isinstance(typ, Void) or isinstance(typ, ErrorType):\n return ErrorType()\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n", "path": "mypy/meet.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom typing import List, Optional\n\nfrom mypy.join import is_similar_callables, combine_similar_callables, join_type_list\nfrom mypy.types import (\n Type, AnyType, TypeVisitor, UnboundType, Void, ErrorType, NoneTyp, TypeVarType,\n Instance, CallableType, TupleType, TypedDictType, ErasedType, TypeList, UnionType, PartialType,\n DeletedType, UninhabitedType, TypeType\n)\nfrom mypy.subtypes import is_equivalent, is_subtype\n\nfrom mypy import experiments\n\n# TODO Describe this module.\n\n\ndef meet_types(s: Type, t: Type) -> Type:\n \"\"\"Return the greatest lower bound of two types.\"\"\"\n if isinstance(s, ErasedType):\n return s\n if isinstance(s, AnyType):\n return t\n if isinstance(s, UnionType) and not isinstance(t, UnionType):\n s, t = t, s\n return t.accept(TypeMeetVisitor(s))\n\n\ndef meet_simple(s: Type, t: Type, default_right: bool = True) -> Type:\n if s == t:\n return s\n if isinstance(s, UnionType):\n return UnionType.make_simplified_union([meet_types(x, t) for x in s.items])\n elif not is_overlapping_types(s, t, use_promotions=True):\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n else:\n if default_right:\n return t\n else:\n return s\n\n\ndef is_overlapping_types(t: Type, s: Type, use_promotions: bool = False) -> bool:\n \"\"\"Can a value of type t be a value of type s, or vice versa?\n\n Note that this effectively checks against erased types, since type\n variables are erased at runtime and the overlapping check is based\n on runtime behavior.\n\n If use_promotions is True, also consider type promotions (int and\n float would only be overlapping if it's True).\n\n This does not consider multiple inheritance. For example, A and B in\n the following example are not considered overlapping, even though\n via C they can be overlapping:\n\n class A: ...\n class B: ...\n class C(A, B): ...\n\n The rationale is that this case is usually very unlikely as multiple\n inheritance is rare. Also, we can't reliably determine whether\n multiple inheritance actually occurs somewhere in a program, due to\n stub files hiding implementation details, dynamic loading etc.\n\n TODO: Don't consider tuples always overlapping.\n TODO: Don't consider callables always overlapping.\n TODO: Don't consider type variables with values always overlapping.\n \"\"\"\n # Any overlaps with everything\n if isinstance(t, AnyType) or isinstance(s, AnyType):\n return True\n\n # Since we are effectively working with the erased types, we only\n # need to handle occurrences of TypeVarType at the top level.\n if isinstance(t, TypeVarType):\n t = t.erase_to_union_or_bound()\n if isinstance(s, TypeVarType):\n s = s.erase_to_union_or_bound()\n if isinstance(t, Instance):\n if isinstance(s, Instance):\n # Consider two classes non-disjoint if one is included in the mro\n # of another.\n if use_promotions:\n # Consider cases like int vs float to be overlapping where\n # there is only a type promotion relationship but not proper\n # subclassing.\n if t.type._promote and is_overlapping_types(t.type._promote, s):\n return True\n if s.type._promote and is_overlapping_types(s.type._promote, t):\n return True\n return t.type in s.type.mro or s.type in t.type.mro\n if isinstance(t, UnionType):\n return any(is_overlapping_types(item, s)\n for item in t.items)\n if isinstance(s, UnionType):\n return any(is_overlapping_types(t, item)\n for item in s.items)\n if isinstance(t, TypeType) and isinstance(s, TypeType):\n # If both types are TypeType, compare their inner types.\n return is_overlapping_types(t.item, s.item, use_promotions)\n elif isinstance(t, TypeType) or isinstance(s, TypeType):\n # If exactly only one of t or s is a TypeType, check if one of them\n # is an `object` or a `type` and otherwise assume no overlap.\n other = s if isinstance(t, TypeType) else t\n if isinstance(other, Instance):\n return other.type.fullname() in {'builtins.object', 'builtins.type'}\n else:\n return False\n if experiments.STRICT_OPTIONAL:\n if isinstance(t, NoneTyp) != isinstance(s, NoneTyp):\n # NoneTyp does not overlap with other non-Union types under strict Optional checking\n return False\n # We conservatively assume that non-instance, non-union, and non-TypeType types can overlap\n # any other types.\n return True\n\n\nclass TypeMeetVisitor(TypeVisitor[Type]):\n def __init__(self, s: Type) -> None:\n self.s = s\n\n def visit_unbound_type(self, t: UnboundType) -> Type:\n if isinstance(self.s, Void) or isinstance(self.s, ErrorType):\n return ErrorType()\n elif isinstance(self.s, NoneTyp):\n if experiments.STRICT_OPTIONAL:\n return AnyType()\n else:\n return self.s\n elif isinstance(self.s, UninhabitedType):\n return self.s\n else:\n return AnyType()\n\n def visit_error_type(self, t: ErrorType) -> Type:\n return t\n\n def visit_type_list(self, t: TypeList) -> Type:\n assert False, 'Not supported'\n\n def visit_any(self, t: AnyType) -> Type:\n return self.s\n\n def visit_union_type(self, t: UnionType) -> Type:\n if isinstance(self.s, UnionType):\n meets = [] # type: List[Type]\n for x in t.items:\n for y in self.s.items:\n meets.append(meet_types(x, y))\n else:\n meets = [meet_types(x, self.s)\n for x in t.items]\n return UnionType.make_simplified_union(meets)\n\n def visit_void(self, t: Void) -> Type:\n if isinstance(self.s, Void):\n return t\n else:\n return ErrorType()\n\n def visit_none_type(self, t: NoneTyp) -> Type:\n if experiments.STRICT_OPTIONAL:\n if isinstance(self.s, NoneTyp) or (isinstance(self.s, Instance) and\n self.s.type.fullname() == 'builtins.object'):\n return t\n else:\n return UninhabitedType()\n else:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n return t\n else:\n return ErrorType()\n\n def visit_uninhabited_type(self, t: UninhabitedType) -> Type:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n return t\n else:\n return ErrorType()\n\n def visit_deleted_type(self, t: DeletedType) -> Type:\n if not isinstance(self.s, Void) and not isinstance(self.s, ErrorType):\n if isinstance(self.s, NoneTyp):\n if experiments.STRICT_OPTIONAL:\n return t\n else:\n return self.s\n elif isinstance(self.s, UninhabitedType):\n return self.s\n else:\n return t\n else:\n return ErrorType()\n\n def visit_erased_type(self, t: ErasedType) -> Type:\n return self.s\n\n def visit_type_var(self, t: TypeVarType) -> Type:\n if isinstance(self.s, TypeVarType) and self.s.id == t.id:\n return self.s\n else:\n return self.default(self.s)\n\n def visit_instance(self, t: Instance) -> Type:\n if isinstance(self.s, Instance):\n si = self.s\n if t.type == si.type:\n if is_subtype(t, self.s) or is_subtype(self.s, t):\n # Combine type arguments. We could have used join below\n # equivalently.\n args = [] # type: List[Type]\n for i in range(len(t.args)):\n args.append(self.meet(t.args[i], si.args[i]))\n return Instance(t.type, args)\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n else:\n if is_subtype(t, self.s):\n return t\n elif is_subtype(self.s, t):\n # See also above comment.\n return self.s\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n elif isinstance(self.s, TypeType):\n return meet_types(t, self.s)\n elif isinstance(self.s, TupleType):\n return meet_types(t, self.s)\n else:\n return self.default(self.s)\n\n def visit_callable_type(self, t: CallableType) -> Type:\n if isinstance(self.s, CallableType) and is_similar_callables(t, self.s):\n return combine_similar_callables(t, self.s)\n else:\n return self.default(self.s)\n\n def visit_tuple_type(self, t: TupleType) -> Type:\n if isinstance(self.s, TupleType) and self.s.length() == t.length():\n items = [] # type: List[Type]\n for i in range(t.length()):\n items.append(self.meet(t.items[i], self.s.items[i]))\n # TODO: What if the fallbacks are different?\n return TupleType(items, t.fallback)\n # meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>].\n elif (isinstance(self.s, Instance) and\n self.s.type.fullname() == 'builtins.tuple' and self.s.args):\n return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items])\n else:\n return self.default(self.s)\n\n def visit_typeddict_type(self, t: TypedDictType) -> Type:\n if isinstance(self.s, TypedDictType):\n for (_, l, r) in self.s.zip(t):\n if not is_equivalent(l, r):\n return self.default(self.s)\n items = OrderedDict([\n (item_name, s_item_type or t_item_type)\n for (item_name, s_item_type, t_item_type) in self.s.zipall(t)\n ])\n mapping_value_type = join_type_list(list(items.values()))\n fallback = self.s.create_anonymous_fallback(value_type=mapping_value_type)\n return TypedDictType(items, fallback)\n else:\n return self.default(self.s)\n\n def visit_partial_type(self, t: PartialType) -> Type:\n # We can't determine the meet of partial types. We should never get here.\n assert False, 'Internal error'\n\n def visit_type_type(self, t: TypeType) -> Type:\n if isinstance(self.s, TypeType):\n typ = self.meet(t.item, self.s.item)\n if not isinstance(typ, NoneTyp):\n typ = TypeType(typ, line=t.line)\n return typ\n elif isinstance(self.s, Instance) and self.s.type.fullname() == 'builtins.type':\n return t\n else:\n return self.default(self.s)\n\n def meet(self, s: Type, t: Type) -> Type:\n return meet_types(s, t)\n\n def default(self, typ: Type) -> Type:\n if isinstance(typ, UnboundType):\n return AnyType()\n elif isinstance(typ, Void) or isinstance(typ, ErrorType):\n return ErrorType()\n else:\n if experiments.STRICT_OPTIONAL:\n return UninhabitedType()\n else:\n return NoneTyp()\n", "path": "mypy/meet.py"}]}
3,822
140
gh_patches_debug_3506
rasdani/github-patches
git_diff
vega__altair-692
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Two renderers with same name Working with a fresh Anaconda installation of Jupyter 3.6. Followed Altair Notebook installation instructions. Basic example (flower petals scatter plot) rendered but with JSON underneath graph (issue #634). I thought this might be due to having selected the wrong renderer., so I went to list the available renderers. `alt.renderers.names()` returns `['default', 'json', 'notebook', 'notebook']` Secretly hoping the second `notebook` renderer solves #634. In any case, I think you shouldn't be able to have two renderers with the same name. (hs teacher, maybe I'm missing something) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `altair/utils/plugin_registry.py` Content: ``` 1 from typing import Callable, Generic, List, TypeVar, Union, cast 2 3 import entrypoints 4 5 6 PluginType = TypeVar('PluginType') 7 8 9 class PluginRegistry(Generic[PluginType]): 10 """A registry for plugins. 11 12 This is a plugin registry that allows plugins to be loaded/registered 13 in two ways: 14 15 1. Through an explicit call to ``.register(name, value)``. 16 2. By looking for other Python packages that are installed and provide 17 a setuptools entry point group. 18 19 When you create an instance of this class, provide the name of the 20 entry point group to use:: 21 22 reg = PluginRegister('my_entrypoint_group') 23 24 """ 25 # this is a mapping of name to error message to allow custom error messages 26 # in case an entrypoint is not found 27 entrypoint_err_messages = {} 28 29 def __init__(self, entry_point_group: str = '', plugin_type=object) -> None: 30 """Create a PluginRegistry for a named entry point group. 31 32 Parameters 33 ========== 34 entry_point_group: str 35 The name of the entry point group. 36 plugin_type: object 37 A type that will optionally be used for runtime type checking of 38 loaded plugins using isinstance. 39 """ 40 self.entry_point_group = entry_point_group 41 self.plugin_type = plugin_type 42 self._active = None # type: None 43 self._active_name = '' # type: str 44 self._plugins = {} # type: dict 45 self._options = {} # type: dict 46 47 def register(self, name: str, value: Union[PluginType,None]) -> PluginType: 48 """Register a plugin by name and value. 49 50 This method is used for explicit registration of a plugin and shouldn't be 51 used to manage entry point managed plugins, which are auto-loaded. 52 53 Parameters 54 ========== 55 name: str 56 The name of the plugin. 57 value: PluginType or None 58 The actual plugin object to register or None to unregister that plugin. 59 60 Returns 61 ======= 62 plugin: PluginType 63 The plugin that was registered or unregistered. 64 """ 65 if value is None and name in self._plugins: 66 return self._plugins.pop(name) 67 else: 68 assert isinstance(value, self.plugin_type) 69 self._plugins[name] = value 70 return value 71 72 73 def names(self) -> List[str]: 74 """List the names of the registered and entry points plugins.""" 75 exts = list(self._plugins.keys()) 76 more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)] 77 exts.extend(more_exts) 78 return exts 79 80 81 def enable(self, name: str) -> None: 82 """Enable a plugin by name.""" 83 if name not in self._plugins: 84 try: 85 ep = entrypoints.get_single(self.entry_point_group, name) 86 except entrypoints.NoSuchEntryPoint as err: 87 if name in self.entrypoint_err_messages: 88 raise ValueError(self.entrypoint_err_messages[name]) 89 else: 90 raise 91 value = cast(PluginType, ep.load()) 92 assert isinstance(value, self.plugin_type) 93 self.register(name, value) 94 self._active_name = name 95 self._active = self._plugins[name] 96 97 @property 98 def active(self) -> str: 99 """Return the name of the currently active plugin""" 100 return self._active_name 101 102 def get(self) -> PluginType: 103 """Return the currently active plugin.""" 104 return self._active 105 106 def __repr__(self) -> str: 107 return ("{0}(active={1!r}, registered={2!r})" 108 "".format(self.__class__.__name__, 109 self._active_name, 110 list(self.names()))) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py --- a/altair/utils/plugin_registry.py +++ b/altair/utils/plugin_registry.py @@ -75,7 +75,7 @@ exts = list(self._plugins.keys()) more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)] exts.extend(more_exts) - return exts + return sorted(set(exts)) def enable(self, name: str) -> None:
{"golden_diff": "diff --git a/altair/utils/plugin_registry.py b/altair/utils/plugin_registry.py\n--- a/altair/utils/plugin_registry.py\n+++ b/altair/utils/plugin_registry.py\n@@ -75,7 +75,7 @@\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]\n exts.extend(more_exts)\n- return exts\n+ return sorted(set(exts))\n \n \n def enable(self, name: str) -> None:\n", "issue": "Two renderers with same name\nWorking with a fresh Anaconda installation of Jupyter 3.6. Followed Altair Notebook installation instructions. Basic example (flower petals scatter plot) rendered but with JSON underneath graph (issue #634). I thought this might be due to having selected the wrong renderer., so I went to list the available renderers. \r\n\r\n`alt.renderers.names()`\r\nreturns\r\n`['default', 'json', 'notebook', 'notebook']`\r\n\r\nSecretly hoping the second `notebook` renderer solves #634. In any case, I think you shouldn't be able to have two renderers with the same name. \r\n\r\n(hs teacher, maybe I'm missing something)\n", "before_files": [{"content": "from typing import Callable, Generic, List, TypeVar, Union, cast\n\nimport entrypoints\n\n\nPluginType = TypeVar('PluginType')\n\n\nclass PluginRegistry(Generic[PluginType]):\n \"\"\"A registry for plugins.\n\n This is a plugin registry that allows plugins to be loaded/registered\n in two ways:\n\n 1. Through an explicit call to ``.register(name, value)``.\n 2. By looking for other Python packages that are installed and provide\n a setuptools entry point group.\n\n When you create an instance of this class, provide the name of the\n entry point group to use::\n\n reg = PluginRegister('my_entrypoint_group')\n\n \"\"\"\n # this is a mapping of name to error message to allow custom error messages\n # in case an entrypoint is not found\n entrypoint_err_messages = {}\n\n def __init__(self, entry_point_group: str = '', plugin_type=object) -> None:\n \"\"\"Create a PluginRegistry for a named entry point group.\n\n Parameters\n ==========\n entry_point_group: str\n The name of the entry point group.\n plugin_type: object\n A type that will optionally be used for runtime type checking of\n loaded plugins using isinstance.\n \"\"\"\n self.entry_point_group = entry_point_group\n self.plugin_type = plugin_type\n self._active = None # type: None\n self._active_name = '' # type: str\n self._plugins = {} # type: dict\n self._options = {} # type: dict\n\n def register(self, name: str, value: Union[PluginType,None]) -> PluginType:\n \"\"\"Register a plugin by name and value.\n\n This method is used for explicit registration of a plugin and shouldn't be\n used to manage entry point managed plugins, which are auto-loaded.\n\n Parameters\n ==========\n name: str\n The name of the plugin.\n value: PluginType or None\n The actual plugin object to register or None to unregister that plugin.\n\n Returns\n =======\n plugin: PluginType\n The plugin that was registered or unregistered.\n \"\"\"\n if value is None and name in self._plugins:\n return self._plugins.pop(name)\n else:\n assert isinstance(value, self.plugin_type)\n self._plugins[name] = value\n return value\n\n\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]\n exts.extend(more_exts)\n return exts\n\n\n def enable(self, name: str) -> None:\n \"\"\"Enable a plugin by name.\"\"\"\n if name not in self._plugins:\n try:\n ep = entrypoints.get_single(self.entry_point_group, name)\n except entrypoints.NoSuchEntryPoint as err:\n if name in self.entrypoint_err_messages:\n raise ValueError(self.entrypoint_err_messages[name])\n else:\n raise\n value = cast(PluginType, ep.load())\n assert isinstance(value, self.plugin_type)\n self.register(name, value)\n self._active_name = name\n self._active = self._plugins[name]\n\n @property\n def active(self) -> str:\n \"\"\"Return the name of the currently active plugin\"\"\"\n return self._active_name\n\n def get(self) -> PluginType:\n \"\"\"Return the currently active plugin.\"\"\"\n return self._active\n\n def __repr__(self) -> str:\n return (\"{0}(active={1!r}, registered={2!r})\"\n \"\".format(self.__class__.__name__,\n self._active_name,\n list(self.names())))\n", "path": "altair/utils/plugin_registry.py"}], "after_files": [{"content": "from typing import Callable, Generic, List, TypeVar, Union, cast\n\nimport entrypoints\n\n\nPluginType = TypeVar('PluginType')\n\n\nclass PluginRegistry(Generic[PluginType]):\n \"\"\"A registry for plugins.\n\n This is a plugin registry that allows plugins to be loaded/registered\n in two ways:\n\n 1. Through an explicit call to ``.register(name, value)``.\n 2. By looking for other Python packages that are installed and provide\n a setuptools entry point group.\n\n When you create an instance of this class, provide the name of the\n entry point group to use::\n\n reg = PluginRegister('my_entrypoint_group')\n\n \"\"\"\n # this is a mapping of name to error message to allow custom error messages\n # in case an entrypoint is not found\n entrypoint_err_messages = {}\n\n def __init__(self, entry_point_group: str = '', plugin_type=object) -> None:\n \"\"\"Create a PluginRegistry for a named entry point group.\n\n Parameters\n ==========\n entry_point_group: str\n The name of the entry point group.\n plugin_type: object\n A type that will optionally be used for runtime type checking of\n loaded plugins using isinstance.\n \"\"\"\n self.entry_point_group = entry_point_group\n self.plugin_type = plugin_type\n self._active = None # type: None\n self._active_name = '' # type: str\n self._plugins = {} # type: dict\n self._options = {} # type: dict\n\n def register(self, name: str, value: Union[PluginType,None]) -> PluginType:\n \"\"\"Register a plugin by name and value.\n\n This method is used for explicit registration of a plugin and shouldn't be\n used to manage entry point managed plugins, which are auto-loaded.\n\n Parameters\n ==========\n name: str\n The name of the plugin.\n value: PluginType or None\n The actual plugin object to register or None to unregister that plugin.\n\n Returns\n =======\n plugin: PluginType\n The plugin that was registered or unregistered.\n \"\"\"\n if value is None and name in self._plugins:\n return self._plugins.pop(name)\n else:\n assert isinstance(value, self.plugin_type)\n self._plugins[name] = value\n return value\n\n\n def names(self) -> List[str]:\n \"\"\"List the names of the registered and entry points plugins.\"\"\"\n exts = list(self._plugins.keys())\n more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]\n exts.extend(more_exts)\n return sorted(set(exts))\n\n\n def enable(self, name: str) -> None:\n \"\"\"Enable a plugin by name.\"\"\"\n if name not in self._plugins:\n try:\n ep = entrypoints.get_single(self.entry_point_group, name)\n except entrypoints.NoSuchEntryPoint as err:\n if name in self.entrypoint_err_messages:\n raise ValueError(self.entrypoint_err_messages[name])\n else:\n raise\n value = cast(PluginType, ep.load())\n assert isinstance(value, self.plugin_type)\n self.register(name, value)\n self._active_name = name\n self._active = self._plugins[name]\n\n @property\n def active(self) -> str:\n \"\"\"Return the name of the currently active plugin\"\"\"\n return self._active_name\n\n def get(self) -> PluginType:\n \"\"\"Return the currently active plugin.\"\"\"\n return self._active\n\n def __repr__(self) -> str:\n return (\"{0}(active={1!r}, registered={2!r})\"\n \"\".format(self.__class__.__name__,\n self._active_name,\n list(self.names())))\n", "path": "altair/utils/plugin_registry.py"}]}
1,455
119
gh_patches_debug_18401
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-941
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [ENH] Add `softmax` to math submodule In reviewing the `math` module, I'd like to add `softmax` to the functions. This will complete the "activation" functions :) as we have sigmoids but no softmax. [ENH] Add `softmax` to math submodule In reviewing the `math` module, I'd like to add `softmax` to the functions. This will complete the "activation" functions :) as we have sigmoids but no softmax. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/math.py` Content: ``` 1 """ Miscellaneous mathematical operators. """ 2 3 import warnings 4 from typing import Tuple 5 6 import numpy as np 7 import pandas as pd 8 import pandas_flavor as pf 9 from pandas.api.types import is_numeric_dtype 10 from scipy.special import expit 11 from scipy.stats import norm 12 13 14 @pf.register_series_method 15 def log(s: pd.Series, error: str = "warn") -> pd.Series: 16 """ 17 Take natural logarithm of the Series. 18 19 :param s: Input Series. 20 :param error: Determines behavior when taking the log of nonpositive 21 entries. If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, 22 then a `RuntimeError` is thrown. Otherwise, nothing is thrown and 23 log of nonpositive values is `np.nan`; defaults to `'warn'`. 24 :raises RuntimeError: Raised when there are nonpositive values in the 25 Series and `error='raise'`. 26 :return: Transformed Series. 27 """ 28 s = s.copy() 29 nonpositive = s <= 0 30 if (nonpositive).any(): 31 msg = f"Log taken on {nonpositive.sum()} nonpositive value(s)" 32 if error.lower() == "warn": 33 warnings.warn(msg, RuntimeWarning) 34 if error.lower() == "raise": 35 raise RuntimeError(msg) 36 else: 37 pass 38 s[nonpositive] = np.nan 39 return np.log(s) 40 41 42 @pf.register_series_method 43 def exp(s: pd.Series) -> pd.Series: 44 """ 45 Take the exponential transform of the series. 46 47 :param s: Input Series. 48 :return: Transformed Series. 49 """ 50 return np.exp(s) 51 52 53 @pf.register_series_method 54 def sigmoid(s: pd.Series) -> pd.Series: 55 """ 56 Take the sigmoid transform of the series where: 57 58 ```python 59 sigmoid(x) = 1 / (1 + exp(-x)) 60 ``` 61 62 :param s: Input Series. 63 :return: Transformed Series. 64 """ 65 return expit(s) 66 67 68 @pf.register_series_method 69 def logit(s: pd.Series, error: str = "warn") -> pd.Series: 70 """ 71 Take logit transform of the Series where: 72 73 ```python 74 logit(p) = log(p/(1-p)) 75 ``` 76 77 :param s: Input Series. 78 :param error: Determines behavior when `s / (1-s)` is outside of `(0, 1)`. 79 If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then a 80 `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan` 81 is returned for the problematic entries; defaults to `'warn'`. 82 :return: Transformed Series. 83 :raises RuntimeError: if `error` is set to `'raise'`. 84 """ 85 s = s.copy() 86 odds_ratio = s / (1 - s) 87 outside_support = (odds_ratio <= 0) | (odds_ratio >= 1) 88 if (outside_support).any(): 89 msg = f"Odds ratio for {outside_support.sum()} value(s) \ 90 are outside of (0, 1)" 91 if error.lower() == "warn": 92 warnings.warn(msg, RuntimeWarning) 93 if error.lower() == "raise": 94 raise RuntimeError(msg) 95 else: 96 pass 97 odds_ratio[outside_support] = np.nan 98 return odds_ratio.log(error="ignore") 99 100 101 @pf.register_series_method 102 def normal_cdf(s: pd.Series) -> pd.Series: 103 """ 104 Transforms the Series via the CDF of the Normal distribution. 105 106 :param s: Input Series. 107 :return: Transformed Series. 108 """ 109 return pd.Series(norm.cdf(s), index=s.index) 110 111 112 @pf.register_series_method 113 def probit(s: pd.Series, error: str = "warn") -> pd.Series: 114 """ 115 Transforms the Series via the inverse CDF of the Normal distribution. 116 117 :param s: Input Series. 118 :param error: Determines behavior when `s` is outside of `(0, 1)`. 119 If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then 120 a `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan` 121 is returned for the problematic entries; defaults to `'warn'`. 122 :raises RuntimeError: Raised when there are problematic values 123 in the Series and `error='raise'`. 124 :return: Transformed Series 125 """ 126 s = s.copy() 127 outside_support = (s <= 0) | (s >= 1) 128 if (outside_support).any(): 129 msg = f"{outside_support.sum()} value(s) are outside of (0, 1)" 130 if error.lower() == "warn": 131 warnings.warn(msg, RuntimeWarning) 132 if error.lower() == "raise": 133 raise RuntimeError(msg) 134 else: 135 pass 136 s[outside_support] = np.nan 137 with np.errstate(all="ignore"): 138 out = pd.Series(norm.ppf(s), index=s.index) 139 return out 140 141 142 @pf.register_series_method 143 def z_score( 144 s: pd.Series, 145 moments_dict: dict = None, 146 keys: Tuple[str, str] = ("mean", "std"), 147 ) -> pd.Series: 148 """ 149 Transforms the Series into z-scores where: 150 151 ```python 152 z = (s - s.mean()) / s.std() 153 ``` 154 155 :param s: Input Series. 156 :param moments_dict: If not `None`, then the mean and standard 157 deviation used to compute the z-score transformation is 158 saved as entries in `moments_dict` with keys determined by 159 the `keys` argument; defaults to `None`. 160 :param keys: Determines the keys saved in `moments_dict` 161 if moments are saved; defaults to (`'mean'`, `'std'`). 162 :return: Transformed Series. 163 """ 164 mean = s.mean() 165 std = s.std() 166 if std == 0: 167 return 0 168 if moments_dict is not None: 169 moments_dict[keys[0]] = mean 170 moments_dict[keys[1]] = std 171 return (s - mean) / std 172 173 174 @pf.register_series_method 175 def ecdf(s: pd.Series) -> Tuple[np.ndarray, np.ndarray]: 176 """ 177 Return cumulative distribution of values in a series. 178 179 Intended to be used with the following pattern: 180 181 ```python 182 df = pd.DataFrame(...) 183 184 # Obtain ECDF values to be plotted 185 x, y = df["column_name"].ecdf() 186 187 # Plot ECDF values 188 plt.scatter(x, y) 189 ``` 190 191 Null values must be dropped from the series, 192 otherwise a `ValueError` is raised. 193 194 Also, if the `dtype` of the series is not numeric, 195 a `TypeError` is raised. 196 197 :param s: A pandas series. `dtype` should be numeric. 198 :returns: `(x, y)`. 199 `x`: sorted array of values. 200 `y`: cumulative fraction of data points with value `x` or lower. 201 :raises TypeError: if series is not numeric. 202 :raises ValueError: if series contains nulls. 203 """ 204 if not is_numeric_dtype(s): 205 raise TypeError(f"series {s.name} must be numeric!") 206 if not s.isna().sum() == 0: 207 raise ValueError(f"series {s.name} contains nulls. Please drop them.") 208 209 n = len(s) 210 x = np.sort(s) 211 y = np.arange(1, n + 1) / n 212 213 return x, y 214 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/math.py b/janitor/math.py --- a/janitor/math.py +++ b/janitor/math.py @@ -8,6 +8,7 @@ import pandas_flavor as pf from pandas.api.types import is_numeric_dtype from scipy.special import expit +from scipy.special import softmax as scipy_softmax from scipy.stats import norm @@ -65,6 +66,27 @@ return expit(s) +@pf.register_series_method +def softmax(s: pd.Series) -> pd.Series: + """ + Take the softmax transform of the series. + + The softmax function transforms each element of a collection by + computing the exponential of each element divided by the sum of the + exponentials of all the elements. + + That is, if x is a one-dimensional numpy array or pandas Series: + + ```python + softmax(x) = exp(x)/sum(exp(x)) + ``` + + :param s: Input Series. + :return: Transformed Series. + """ + return scipy_softmax(s) + + @pf.register_series_method def logit(s: pd.Series, error: str = "warn") -> pd.Series: """
{"golden_diff": "diff --git a/janitor/math.py b/janitor/math.py\n--- a/janitor/math.py\n+++ b/janitor/math.py\n@@ -8,6 +8,7 @@\n import pandas_flavor as pf\n from pandas.api.types import is_numeric_dtype\n from scipy.special import expit\n+from scipy.special import softmax as scipy_softmax\n from scipy.stats import norm\n \n \n@@ -65,6 +66,27 @@\n return expit(s)\n \n \n+@pf.register_series_method\n+def softmax(s: pd.Series) -> pd.Series:\n+ \"\"\"\n+ Take the softmax transform of the series.\n+\n+ The softmax function transforms each element of a collection by\n+ computing the exponential of each element divided by the sum of the\n+ exponentials of all the elements.\n+\n+ That is, if x is a one-dimensional numpy array or pandas Series:\n+\n+ ```python\n+ softmax(x) = exp(x)/sum(exp(x))\n+ ```\n+\n+ :param s: Input Series.\n+ :return: Transformed Series.\n+ \"\"\"\n+ return scipy_softmax(s)\n+\n+\n @pf.register_series_method\n def logit(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n", "issue": "[ENH] Add `softmax` to math submodule\nIn reviewing the `math` module, I'd like to add `softmax` to the functions. This will complete the \"activation\" functions :) as we have sigmoids but no softmax.\n[ENH] Add `softmax` to math submodule\nIn reviewing the `math` module, I'd like to add `softmax` to the functions. This will complete the \"activation\" functions :) as we have sigmoids but no softmax.\n", "before_files": [{"content": "\"\"\" Miscellaneous mathematical operators. \"\"\"\n\nimport warnings\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.api.types import is_numeric_dtype\nfrom scipy.special import expit\nfrom scipy.stats import norm\n\n\n@pf.register_series_method\ndef log(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Take natural logarithm of the Series.\n\n :param s: Input Series.\n :param error: Determines behavior when taking the log of nonpositive\n entries. If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`,\n then a `RuntimeError` is thrown. Otherwise, nothing is thrown and\n log of nonpositive values is `np.nan`; defaults to `'warn'`.\n :raises RuntimeError: Raised when there are nonpositive values in the\n Series and `error='raise'`.\n :return: Transformed Series.\n \"\"\"\n s = s.copy()\n nonpositive = s <= 0\n if (nonpositive).any():\n msg = f\"Log taken on {nonpositive.sum()} nonpositive value(s)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n s[nonpositive] = np.nan\n return np.log(s)\n\n\n@pf.register_series_method\ndef exp(s: pd.Series) -> pd.Series:\n \"\"\"\n Take the exponential transform of the series.\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return np.exp(s)\n\n\n@pf.register_series_method\ndef sigmoid(s: pd.Series) -> pd.Series:\n \"\"\"\n Take the sigmoid transform of the series where:\n\n ```python\n sigmoid(x) = 1 / (1 + exp(-x))\n ```\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return expit(s)\n\n\n@pf.register_series_method\ndef logit(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Take logit transform of the Series where:\n\n ```python\n logit(p) = log(p/(1-p))\n ```\n\n :param s: Input Series.\n :param error: Determines behavior when `s / (1-s)` is outside of `(0, 1)`.\n If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then a\n `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan`\n is returned for the problematic entries; defaults to `'warn'`.\n :return: Transformed Series.\n :raises RuntimeError: if `error` is set to `'raise'`.\n \"\"\"\n s = s.copy()\n odds_ratio = s / (1 - s)\n outside_support = (odds_ratio <= 0) | (odds_ratio >= 1)\n if (outside_support).any():\n msg = f\"Odds ratio for {outside_support.sum()} value(s) \\\nare outside of (0, 1)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n odds_ratio[outside_support] = np.nan\n return odds_ratio.log(error=\"ignore\")\n\n\n@pf.register_series_method\ndef normal_cdf(s: pd.Series) -> pd.Series:\n \"\"\"\n Transforms the Series via the CDF of the Normal distribution.\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return pd.Series(norm.cdf(s), index=s.index)\n\n\n@pf.register_series_method\ndef probit(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Transforms the Series via the inverse CDF of the Normal distribution.\n\n :param s: Input Series.\n :param error: Determines behavior when `s` is outside of `(0, 1)`.\n If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then\n a `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan`\n is returned for the problematic entries; defaults to `'warn'`.\n :raises RuntimeError: Raised when there are problematic values\n in the Series and `error='raise'`.\n :return: Transformed Series\n \"\"\"\n s = s.copy()\n outside_support = (s <= 0) | (s >= 1)\n if (outside_support).any():\n msg = f\"{outside_support.sum()} value(s) are outside of (0, 1)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n s[outside_support] = np.nan\n with np.errstate(all=\"ignore\"):\n out = pd.Series(norm.ppf(s), index=s.index)\n return out\n\n\n@pf.register_series_method\ndef z_score(\n s: pd.Series,\n moments_dict: dict = None,\n keys: Tuple[str, str] = (\"mean\", \"std\"),\n) -> pd.Series:\n \"\"\"\n Transforms the Series into z-scores where:\n\n ```python\n z = (s - s.mean()) / s.std()\n ```\n\n :param s: Input Series.\n :param moments_dict: If not `None`, then the mean and standard\n deviation used to compute the z-score transformation is\n saved as entries in `moments_dict` with keys determined by\n the `keys` argument; defaults to `None`.\n :param keys: Determines the keys saved in `moments_dict`\n if moments are saved; defaults to (`'mean'`, `'std'`).\n :return: Transformed Series.\n \"\"\"\n mean = s.mean()\n std = s.std()\n if std == 0:\n return 0\n if moments_dict is not None:\n moments_dict[keys[0]] = mean\n moments_dict[keys[1]] = std\n return (s - mean) / std\n\n\n@pf.register_series_method\ndef ecdf(s: pd.Series) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Return cumulative distribution of values in a series.\n\n Intended to be used with the following pattern:\n\n ```python\n df = pd.DataFrame(...)\n\n # Obtain ECDF values to be plotted\n x, y = df[\"column_name\"].ecdf()\n\n # Plot ECDF values\n plt.scatter(x, y)\n ```\n\n Null values must be dropped from the series,\n otherwise a `ValueError` is raised.\n\n Also, if the `dtype` of the series is not numeric,\n a `TypeError` is raised.\n\n :param s: A pandas series. `dtype` should be numeric.\n :returns: `(x, y)`.\n `x`: sorted array of values.\n `y`: cumulative fraction of data points with value `x` or lower.\n :raises TypeError: if series is not numeric.\n :raises ValueError: if series contains nulls.\n \"\"\"\n if not is_numeric_dtype(s):\n raise TypeError(f\"series {s.name} must be numeric!\")\n if not s.isna().sum() == 0:\n raise ValueError(f\"series {s.name} contains nulls. Please drop them.\")\n\n n = len(s)\n x = np.sort(s)\n y = np.arange(1, n + 1) / n\n\n return x, y\n", "path": "janitor/math.py"}], "after_files": [{"content": "\"\"\" Miscellaneous mathematical operators. \"\"\"\n\nimport warnings\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.api.types import is_numeric_dtype\nfrom scipy.special import expit\nfrom scipy.special import softmax as scipy_softmax\nfrom scipy.stats import norm\n\n\n@pf.register_series_method\ndef log(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Take natural logarithm of the Series.\n\n :param s: Input Series.\n :param error: Determines behavior when taking the log of nonpositive\n entries. If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`,\n then a `RuntimeError` is thrown. Otherwise, nothing is thrown and\n log of nonpositive values is `np.nan`; defaults to `'warn'`.\n :raises RuntimeError: Raised when there are nonpositive values in the\n Series and `error='raise'`.\n :return: Transformed Series.\n \"\"\"\n s = s.copy()\n nonpositive = s <= 0\n if (nonpositive).any():\n msg = f\"Log taken on {nonpositive.sum()} nonpositive value(s)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n s[nonpositive] = np.nan\n return np.log(s)\n\n\n@pf.register_series_method\ndef exp(s: pd.Series) -> pd.Series:\n \"\"\"\n Take the exponential transform of the series.\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return np.exp(s)\n\n\n@pf.register_series_method\ndef sigmoid(s: pd.Series) -> pd.Series:\n \"\"\"\n Take the sigmoid transform of the series where:\n\n ```python\n sigmoid(x) = 1 / (1 + exp(-x))\n ```\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return expit(s)\n\n\n@pf.register_series_method\ndef softmax(s: pd.Series) -> pd.Series:\n \"\"\"\n Take the softmax transform of the series.\n\n The softmax function transforms each element of a collection by\n computing the exponential of each element divided by the sum of the\n exponentials of all the elements.\n\n That is, if x is a one-dimensional numpy array or pandas Series:\n\n ```python\n softmax(x) = exp(x)/sum(exp(x))\n ```\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return scipy_softmax(s)\n\n\n@pf.register_series_method\ndef logit(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Take logit transform of the Series where:\n\n ```python\n logit(p) = log(p/(1-p))\n ```\n\n :param s: Input Series.\n :param error: Determines behavior when `s / (1-s)` is outside of `(0, 1)`.\n If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then a\n `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan`\n is returned for the problematic entries; defaults to `'warn'`.\n :return: Transformed Series.\n :raises RuntimeError: if `error` is set to `'raise'`.\n \"\"\"\n s = s.copy()\n odds_ratio = s / (1 - s)\n outside_support = (odds_ratio <= 0) | (odds_ratio >= 1)\n if (outside_support).any():\n msg = f\"Odds ratio for {outside_support.sum()} value(s) \\\nare outside of (0, 1)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n odds_ratio[outside_support] = np.nan\n return odds_ratio.log(error=\"ignore\")\n\n\n@pf.register_series_method\ndef normal_cdf(s: pd.Series) -> pd.Series:\n \"\"\"\n Transforms the Series via the CDF of the Normal distribution.\n\n :param s: Input Series.\n :return: Transformed Series.\n \"\"\"\n return pd.Series(norm.cdf(s), index=s.index)\n\n\n@pf.register_series_method\ndef probit(s: pd.Series, error: str = \"warn\") -> pd.Series:\n \"\"\"\n Transforms the Series via the inverse CDF of the Normal distribution.\n\n :param s: Input Series.\n :param error: Determines behavior when `s` is outside of `(0, 1)`.\n If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then\n a `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan`\n is returned for the problematic entries; defaults to `'warn'`.\n :raises RuntimeError: Raised when there are problematic values\n in the Series and `error='raise'`.\n :return: Transformed Series\n \"\"\"\n s = s.copy()\n outside_support = (s <= 0) | (s >= 1)\n if (outside_support).any():\n msg = f\"{outside_support.sum()} value(s) are outside of (0, 1)\"\n if error.lower() == \"warn\":\n warnings.warn(msg, RuntimeWarning)\n if error.lower() == \"raise\":\n raise RuntimeError(msg)\n else:\n pass\n s[outside_support] = np.nan\n with np.errstate(all=\"ignore\"):\n out = pd.Series(norm.ppf(s), index=s.index)\n return out\n\n\n@pf.register_series_method\ndef z_score(\n s: pd.Series,\n moments_dict: dict = None,\n keys: Tuple[str, str] = (\"mean\", \"std\"),\n) -> pd.Series:\n \"\"\"\n Transforms the Series into z-scores where:\n\n ```python\n z = (s - s.mean()) / s.std()\n ```\n\n :param s: Input Series.\n :param moments_dict: If not `None`, then the mean and standard\n deviation used to compute the z-score transformation is\n saved as entries in `moments_dict` with keys determined by\n the `keys` argument; defaults to `None`.\n :param keys: Determines the keys saved in `moments_dict`\n if moments are saved; defaults to (`'mean'`, `'std'`).\n :return: Transformed Series.\n \"\"\"\n mean = s.mean()\n std = s.std()\n if std == 0:\n return 0\n if moments_dict is not None:\n moments_dict[keys[0]] = mean\n moments_dict[keys[1]] = std\n return (s - mean) / std\n\n\n@pf.register_series_method\ndef ecdf(s: pd.Series) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Return cumulative distribution of values in a series.\n\n Intended to be used with the following pattern:\n\n ```python\n df = pd.DataFrame(...)\n\n # Obtain ECDF values to be plotted\n x, y = df[\"column_name\"].ecdf()\n\n # Plot ECDF values\n plt.scatter(x, y)\n ```\n\n Null values must be dropped from the series,\n otherwise a `ValueError` is raised.\n\n Also, if the `dtype` of the series is not numeric,\n a `TypeError` is raised.\n\n :param s: A pandas series. `dtype` should be numeric.\n :returns: `(x, y)`.\n `x`: sorted array of values.\n `y`: cumulative fraction of data points with value `x` or lower.\n :raises TypeError: if series is not numeric.\n :raises ValueError: if series contains nulls.\n \"\"\"\n if not is_numeric_dtype(s):\n raise TypeError(f\"series {s.name} must be numeric!\")\n if not s.isna().sum() == 0:\n raise ValueError(f\"series {s.name} contains nulls. Please drop them.\")\n\n n = len(s)\n x = np.sort(s)\n y = np.arange(1, n + 1) / n\n\n return x, y\n", "path": "janitor/math.py"}]}
2,547
270
gh_patches_debug_25592
rasdani/github-patches
git_diff
scverse__scanpy-155
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Recipes with plotting option throw import error Some of the preprocessing recipes have a `plot` argument, but as far as I can tell, they'll only throw an error. `recipe_zheng17` and `recipe_seurat` have the lines: ```python if plot: from .. import plotting as pl # should not import at the top of the file pl.filter_genes_dispersion(filter_result, log=True) ``` But `plotting` doesn't have the function `filter_genes_dispersion` exposed. Here's an example of the error using `scanpy` pulled from github, but the same issue occurs on the release on pypi: ```python In [1]: import numpy as np ...: import pandas as pd ...: import scanpy.api as sc ...: ...: sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3) ...: sc.settings.set_figure_params(dpi=80) # low dpi (dots per inch) yields small inline figures ...: sc.logging.print_versions() /Users/isaac/miniconda3/envs/scanpy/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters adatascanpy==1.0.4+91.ge9ae4ff anndata==0.6 numpy==1.14.3 scipy==1.1.0 pandas==0.22.0 scikit-learn==0.19.1 statsmodels==0.8.0 In [2]: adata = sc.read("./data/pbmc3k_filtered_gene_bc_matrices/hg19/matrix.mtx").T --> This might be very slow. Consider passing `cache=True`, which enables much faster reading from a cache file. In [3]: sc.pp.recipe_zheng17(adata, plot=True) running recipe zheng17 --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-3-c19f237f1c6e> in <module>() ----> 1 sc.pp.recipe_zheng17(adata, plot=True) ~/github/scanpy/scanpy/preprocessing/recipes.py in recipe_zheng17(adata, n_top_genes, log, plot, copy) 106 if plot: 107 from .. import plotting as pl # should not import at the top of the file --> 108 pl.filter_genes_dispersion(filter_result, log=True) 109 # actually filter the genes, the following is the inplace version of 110 # adata = adata[:, filter_result.gene_subset] AttributeError: module 'scanpy.plotting' has no attribute 'filter_genes_dispersion' ``` It looks like there's a pretty easy fix here, so I'd be up for making a pull request if you'd like. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scanpy/preprocessing/recipes.py` Content: ``` 1 """Preprocessing recipes from the literature 2 """ 3 4 from . import simple as pp 5 from .. import logging as logg 6 7 8 def recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2, 9 n_pcs=50, svd_solver='randomized', random_state=0, 10 copy=False): 11 """Normalization and filtering as of [Weinreb17]_. 12 13 Expects non-logarithmized data. If using logarithmized data, pass `log=False`. 14 15 Parameters 16 ---------- 17 adata : :class:`~scanpy.api.AnnData` 18 Annotated data matrix. 19 copy : bool (default: False) 20 Return a copy if true. 21 """ 22 from scipy.sparse import issparse 23 if issparse(adata.X): 24 raise ValueError('`recipe_weinreb16 does not support sparse matrices.') 25 if copy: adata = adata.copy() 26 if log: pp.log1p(adata) 27 adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X, 28 max_fraction=0.05, 29 mult_with_mean=True) 30 gene_subset = pp.filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold) 31 adata._inplace_subset_var(gene_subset) # this modifies the object itself 32 X_pca = pp.pca(pp.zscore_deprecated(adata.X), 33 n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state) 34 # update adata 35 adata.obsm['X_pca'] = X_pca 36 return adata if copy else None 37 38 39 def recipe_seurat(adata, log=True, plot=False, copy=False): 40 """Normalization and filtering as of Seurat [Satija15]_. 41 42 This uses a particular preprocessing. 43 44 Expects non-logarithmized data. If using logarithmized data, pass `log=False`. 45 """ 46 if copy: adata = adata.copy() 47 pp.filter_cells(adata, min_genes=200) 48 pp.filter_genes(adata, min_cells=3) 49 pp.normalize_per_cell(adata, counts_per_cell_after=1e4) 50 filter_result = pp.filter_genes_dispersion( 51 adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log) 52 if plot: 53 from .. import plotting as pl # should not import at the top of the file 54 pl.filter_genes_dispersion(filter_result, log=not log) 55 adata._inplace_subset_var(filter_result.gene_subset) # filter genes 56 if log: pp.log1p(adata) 57 pp.scale(adata, max_value=10) 58 return adata if copy else None 59 60 61 def recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False): 62 """Normalization and filtering as of [Zheng17]_. 63 64 Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x 65 Genomics. 66 67 Expects non-logarithmized data. If using logarithmized data, pass `log=False`. 68 69 The recipe runs the following steps:: 70 71 sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count 72 sc.pp.normalize_per_cell( # normalize with total UMI count per cell 73 adata, key_n_counts='n_counts_all') 74 filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes 75 adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False) 76 adata = adata[:, filter_result.gene_subset] # subset the genes 77 sc.pp.normalize_per_cell(adata) # renormalize after filtering 78 if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1) 79 sc.pp.scale(adata) # scale to unit variance and shift to zero mean 80 81 82 Parameters 83 ---------- 84 adata : :class:`~scanpy.api.AnnData` 85 Annotated data matrix. 86 n_top_genes : `int`, optional (default: 1000) 87 Number of genes to keep. 88 log : `bool`, optional (default: `True`) 89 Take logarithm. 90 plot : `bool`, optional (default: `True`) 91 Show a plot of the gene dispersion vs. mean relation. 92 copy : `bool`, optional (default: `False`) 93 Return a copy of `adata` instead of updating it. 94 95 Returns 96 ------- 97 Returns or updates `adata` depending on `copy`. 98 """ 99 logg.info('running recipe zheng17', reset=True) 100 if copy: adata = adata.copy() 101 pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count 102 pp.normalize_per_cell(adata, # normalize with total UMI count per cell 103 key_n_counts='n_counts_all') 104 filter_result = pp.filter_genes_dispersion( 105 adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False) 106 if plot: 107 from .. import plotting as pl # should not import at the top of the file 108 pl.filter_genes_dispersion(filter_result, log=True) 109 # actually filter the genes, the following is the inplace version of 110 # adata = adata[:, filter_result.gene_subset] 111 adata._inplace_subset_var(filter_result.gene_subset) # filter genes 112 pp.normalize_per_cell(adata) # renormalize after filtering 113 if log: pp.log1p(adata) # log transform: X = log(X + 1) 114 pp.scale(adata) 115 logg.info(' finished', time=True) 116 return adata if copy else None 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scanpy/preprocessing/recipes.py b/scanpy/preprocessing/recipes.py --- a/scanpy/preprocessing/recipes.py +++ b/scanpy/preprocessing/recipes.py @@ -51,7 +51,7 @@ adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log) if plot: from .. import plotting as pl # should not import at the top of the file - pl.filter_genes_dispersion(filter_result, log=not log) + pl.preprocessing.filter_genes_dispersion(filter_result, log=not log) adata._inplace_subset_var(filter_result.gene_subset) # filter genes if log: pp.log1p(adata) pp.scale(adata, max_value=10) @@ -105,7 +105,7 @@ adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False) if plot: from .. import plotting as pl # should not import at the top of the file - pl.filter_genes_dispersion(filter_result, log=True) + pl.preprocessing.filter_genes_dispersion(filter_result, log=True) # actually filter the genes, the following is the inplace version of # adata = adata[:, filter_result.gene_subset] adata._inplace_subset_var(filter_result.gene_subset) # filter genes
{"golden_diff": "diff --git a/scanpy/preprocessing/recipes.py b/scanpy/preprocessing/recipes.py\n--- a/scanpy/preprocessing/recipes.py\n+++ b/scanpy/preprocessing/recipes.py\n@@ -51,7 +51,7 @@\n adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n- pl.filter_genes_dispersion(filter_result, log=not log)\n+ pl.preprocessing.filter_genes_dispersion(filter_result, log=not log)\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n if log: pp.log1p(adata)\n pp.scale(adata, max_value=10)\n@@ -105,7 +105,7 @@\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n- pl.filter_genes_dispersion(filter_result, log=True)\n+ pl.preprocessing.filter_genes_dispersion(filter_result, log=True)\n # actually filter the genes, the following is the inplace version of\n # adata = adata[:, filter_result.gene_subset]\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n", "issue": "Recipes with plotting option throw import error\nSome of the preprocessing recipes have a `plot` argument, but as far as I can tell, they'll only throw an error. `recipe_zheng17` and `recipe_seurat` have the lines:\r\n\r\n```python\r\n if plot:\r\n from .. import plotting as pl # should not import at the top of the file\r\n pl.filter_genes_dispersion(filter_result, log=True)\r\n```\r\n\r\nBut `plotting` doesn't have the function `filter_genes_dispersion` exposed. Here's an example of the error using `scanpy` pulled from github, but the same issue occurs on the release on pypi:\r\n\r\n```python\r\nIn [1]: import numpy as np\r\n ...: import pandas as pd\r\n ...: import scanpy.api as sc\r\n ...: \r\n ...: sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)\r\n ...: sc.settings.set_figure_params(dpi=80) # low dpi (dots per inch) yields small inline figures\r\n ...: sc.logging.print_versions()\r\n/Users/isaac/miniconda3/envs/scanpy/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\nadatascanpy==1.0.4+91.ge9ae4ff anndata==0.6 numpy==1.14.3 scipy==1.1.0 pandas==0.22.0 scikit-learn==0.19.1 statsmodels==0.8.0 \r\n\r\nIn [2]: adata = sc.read(\"./data/pbmc3k_filtered_gene_bc_matrices/hg19/matrix.mtx\").T\r\n--> This might be very slow. Consider passing `cache=True`, which enables much faster reading from a cache file.\r\nIn [3]: sc.pp.recipe_zheng17(adata, plot=True)\r\nrunning recipe zheng17\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-3-c19f237f1c6e> in <module>()\r\n----> 1 sc.pp.recipe_zheng17(adata, plot=True)\r\n\r\n~/github/scanpy/scanpy/preprocessing/recipes.py in recipe_zheng17(adata, n_top_genes, log, plot, copy)\r\n 106 if plot:\r\n 107 from .. import plotting as pl # should not import at the top of the file\r\n--> 108 pl.filter_genes_dispersion(filter_result, log=True)\r\n 109 # actually filter the genes, the following is the inplace version of\r\n 110 # adata = adata[:, filter_result.gene_subset]\r\n\r\nAttributeError: module 'scanpy.plotting' has no attribute 'filter_genes_dispersion'\r\n```\r\n\r\nIt looks like there's a pretty easy fix here, so I'd be up for making a pull request if you'd like.\n", "before_files": [{"content": "\"\"\"Preprocessing recipes from the literature\n\"\"\"\n\nfrom . import simple as pp\nfrom .. import logging as logg\n\n\ndef recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2,\n n_pcs=50, svd_solver='randomized', random_state=0,\n copy=False):\n \"\"\"Normalization and filtering as of [Weinreb17]_.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n Annotated data matrix.\n copy : bool (default: False)\n Return a copy if true.\n \"\"\"\n from scipy.sparse import issparse\n if issparse(adata.X):\n raise ValueError('`recipe_weinreb16 does not support sparse matrices.')\n if copy: adata = adata.copy()\n if log: pp.log1p(adata)\n adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,\n max_fraction=0.05,\n mult_with_mean=True)\n gene_subset = pp.filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)\n adata._inplace_subset_var(gene_subset) # this modifies the object itself\n X_pca = pp.pca(pp.zscore_deprecated(adata.X),\n n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)\n # update adata\n adata.obsm['X_pca'] = X_pca\n return adata if copy else None\n\n\ndef recipe_seurat(adata, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of Seurat [Satija15]_.\n\n This uses a particular preprocessing.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n \"\"\"\n if copy: adata = adata.copy()\n pp.filter_cells(adata, min_genes=200)\n pp.filter_genes(adata, min_cells=3)\n pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n filter_result = pp.filter_genes_dispersion(\n adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n pl.filter_genes_dispersion(filter_result, log=not log)\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n if log: pp.log1p(adata)\n pp.scale(adata, max_value=10)\n return adata if copy else None\n\n\ndef recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of [Zheng17]_.\n\n Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x\n Genomics.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n\n The recipe runs the following steps::\n\n sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n sc.pp.normalize_per_cell( # normalize with total UMI count per cell\n adata, key_n_counts='n_counts_all')\n filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n adata = adata[:, filter_result.gene_subset] # subset the genes\n sc.pp.normalize_per_cell(adata) # renormalize after filtering\n if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)\n sc.pp.scale(adata) # scale to unit variance and shift to zero mean\n\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n Annotated data matrix.\n n_top_genes : `int`, optional (default: 1000)\n Number of genes to keep.\n log : `bool`, optional (default: `True`)\n Take logarithm.\n plot : `bool`, optional (default: `True`)\n Show a plot of the gene dispersion vs. mean relation.\n copy : `bool`, optional (default: `False`)\n Return a copy of `adata` instead of updating it.\n\n Returns\n -------\n Returns or updates `adata` depending on `copy`.\n \"\"\"\n logg.info('running recipe zheng17', reset=True)\n if copy: adata = adata.copy()\n pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n pp.normalize_per_cell(adata, # normalize with total UMI count per cell\n key_n_counts='n_counts_all')\n filter_result = pp.filter_genes_dispersion(\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n pl.filter_genes_dispersion(filter_result, log=True)\n # actually filter the genes, the following is the inplace version of\n # adata = adata[:, filter_result.gene_subset]\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n pp.normalize_per_cell(adata) # renormalize after filtering\n if log: pp.log1p(adata) # log transform: X = log(X + 1)\n pp.scale(adata)\n logg.info(' finished', time=True)\n return adata if copy else None\n", "path": "scanpy/preprocessing/recipes.py"}], "after_files": [{"content": "\"\"\"Preprocessing recipes from the literature\n\"\"\"\n\nfrom . import simple as pp\nfrom .. import logging as logg\n\n\ndef recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2,\n n_pcs=50, svd_solver='randomized', random_state=0,\n copy=False):\n \"\"\"Normalization and filtering as of [Weinreb17]_.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n Annotated data matrix.\n copy : bool (default: False)\n Return a copy if true.\n \"\"\"\n from scipy.sparse import issparse\n if issparse(adata.X):\n raise ValueError('`recipe_weinreb16 does not support sparse matrices.')\n if copy: adata = adata.copy()\n if log: pp.log1p(adata)\n adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,\n max_fraction=0.05,\n mult_with_mean=True)\n gene_subset = pp.filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)\n adata._inplace_subset_var(gene_subset) # this modifies the object itself\n X_pca = pp.pca(pp.zscore_deprecated(adata.X),\n n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)\n # update adata\n adata.obsm['X_pca'] = X_pca\n return adata if copy else None\n\n\ndef recipe_seurat(adata, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of Seurat [Satija15]_.\n\n This uses a particular preprocessing.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n \"\"\"\n if copy: adata = adata.copy()\n pp.filter_cells(adata, min_genes=200)\n pp.filter_genes(adata, min_cells=3)\n pp.normalize_per_cell(adata, counts_per_cell_after=1e4)\n filter_result = pp.filter_genes_dispersion(\n adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n pl.preprocessing.filter_genes_dispersion(filter_result, log=not log)\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n if log: pp.log1p(adata)\n pp.scale(adata, max_value=10)\n return adata if copy else None\n\n\ndef recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):\n \"\"\"Normalization and filtering as of [Zheng17]_.\n\n Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x\n Genomics.\n\n Expects non-logarithmized data. If using logarithmized data, pass `log=False`.\n\n The recipe runs the following steps::\n\n sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n sc.pp.normalize_per_cell( # normalize with total UMI count per cell\n adata, key_n_counts='n_counts_all')\n filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n adata = adata[:, filter_result.gene_subset] # subset the genes\n sc.pp.normalize_per_cell(adata) # renormalize after filtering\n if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)\n sc.pp.scale(adata) # scale to unit variance and shift to zero mean\n\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n Annotated data matrix.\n n_top_genes : `int`, optional (default: 1000)\n Number of genes to keep.\n log : `bool`, optional (default: `True`)\n Take logarithm.\n plot : `bool`, optional (default: `True`)\n Show a plot of the gene dispersion vs. mean relation.\n copy : `bool`, optional (default: `False`)\n Return a copy of `adata` instead of updating it.\n\n Returns\n -------\n Returns or updates `adata` depending on `copy`.\n \"\"\"\n logg.info('running recipe zheng17', reset=True)\n if copy: adata = adata.copy()\n pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count\n pp.normalize_per_cell(adata, # normalize with total UMI count per cell\n key_n_counts='n_counts_all')\n filter_result = pp.filter_genes_dispersion(\n adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)\n if plot:\n from .. import plotting as pl # should not import at the top of the file\n pl.preprocessing.filter_genes_dispersion(filter_result, log=True)\n # actually filter the genes, the following is the inplace version of\n # adata = adata[:, filter_result.gene_subset]\n adata._inplace_subset_var(filter_result.gene_subset) # filter genes\n pp.normalize_per_cell(adata) # renormalize after filtering\n if log: pp.log1p(adata) # log transform: X = log(X + 1)\n pp.scale(adata)\n logg.info(' finished', time=True)\n return adata if copy else None\n", "path": "scanpy/preprocessing/recipes.py"}]}
2,503
318
gh_patches_debug_51314
rasdani/github-patches
git_diff
scikit-image__scikit-image-2643
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- module 'skimage.filters' has no attribute 'denoise_tv_chambolle' There are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48) Thus `from skimage.filters import *` gives: ``` AttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/filters/__init__.py` Content: ``` 1 from .lpi_filter import inverse, wiener, LPIFilter2D 2 from ._gaussian import gaussian 3 from .edges import (sobel, sobel_h, sobel_v, 4 scharr, scharr_h, scharr_v, 5 prewitt, prewitt_h, prewitt_v, 6 roberts, roberts_pos_diag, roberts_neg_diag, 7 laplace) 8 from ._rank_order import rank_order 9 from ._gabor import gabor_kernel, gabor 10 from ._frangi import frangi, hessian 11 from .thresholding import (threshold_local, 12 threshold_adaptive, threshold_otsu, threshold_yen, 13 threshold_isodata, threshold_li, threshold_minimum, 14 threshold_mean, threshold_triangle, 15 threshold_niblack, threshold_sauvola, 16 try_all_threshold) 17 from . import rank 18 from .rank import median 19 20 from .._shared.utils import deprecated, copy_func 21 22 23 gaussian_filter = copy_func(gaussian, name='gaussian_filter') 24 gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter) 25 gabor_filter = copy_func(gabor, name='gabor_filter') 26 gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter) 27 28 __all__ = ['inverse', 29 'wiener', 30 'LPIFilter2D', 31 'gaussian', 32 'median', 33 'sobel', 34 'sobel_h', 35 'sobel_v', 36 'scharr', 37 'scharr_h', 38 'scharr_v', 39 'prewitt', 40 'prewitt_h', 41 'prewitt_v', 42 'roberts', 43 'roberts_pos_diag', 44 'roberts_neg_diag', 45 'laplace', 46 'denoise_tv_chambolle', 47 'denoise_bilateral', 48 'denoise_tv_bregman', 49 'rank_order', 50 'gabor_kernel', 51 'gabor', 52 'try_all_threshold', 53 'frangi', 54 'hessian', 55 'threshold_adaptive', 56 'threshold_otsu', 57 'threshold_yen', 58 'threshold_isodata', 59 'threshold_li', 60 'threshold_minimum', 61 'threshold_mean', 62 'threshold_niblack', 63 'threshold_sauvola', 64 'threshold_triangle', 65 'rank'] 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py --- a/skimage/filters/__init__.py +++ b/skimage/filters/__init__.py @@ -43,9 +43,6 @@ 'roberts_pos_diag', 'roberts_neg_diag', 'laplace', - 'denoise_tv_chambolle', - 'denoise_bilateral', - 'denoise_tv_bregman', 'rank_order', 'gabor_kernel', 'gabor',
{"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,9 +43,6 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n- 'denoise_tv_chambolle',\n- 'denoise_bilateral',\n- 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "module 'skimage.filters' has no attribute 'denoise_tv_chambolle' \nThere are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48)\r\n\r\nThus `from skimage.filters import *` gives:\r\n\r\n```\r\nAttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle'\r\n```\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom ._frangi import frangi, hessian\nfrom .thresholding import (threshold_local,\n threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n try_all_threshold)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated, copy_func\n\n\ngaussian_filter = copy_func(gaussian, name='gaussian_filter')\ngaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)\ngabor_filter = copy_func(gabor, name='gabor_filter')\ngabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'frangi',\n 'hessian',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'rank']\n", "path": "skimage/filters/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom ._frangi import frangi, hessian\nfrom .thresholding import (threshold_local,\n threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n try_all_threshold)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated, copy_func\n\n\ngaussian_filter = copy_func(gaussian, name='gaussian_filter')\ngaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)\ngabor_filter = copy_func(gabor, name='gabor_filter')\ngabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'frangi',\n 'hessian',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'rank']\n", "path": "skimage/filters/__init__.py"}]}
972
125
gh_patches_debug_14631
rasdani/github-patches
git_diff
joke2k__faker-1840
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect type annotation on Generator.add_provider * Faker version: 18.3.2 * OS: MacOS The type annotation on `Generator.add_provider` is needlessly restrictive - the implementation clearly supports passing either a type or an instance, but the type annotation only allows instances: ```python # faker/generator.py def add_provider(self, provider: "BaseProvider") -> None: if isinstance(provider, type): provider = provider(self) ``` ### Steps to reproduce 1. Write code that adds a provider class to a `Generator` 2. Run mypy ```python class MyCustomProvider(BaseProvider): pass fake = Factory.create("en_US") fake.add_provider(MyCustomProvider) ``` ### Expected behavior `mypy` check should pass ### Actual behavior `mypy` produces an error: ``` error: Argument 1 to "add_provider" of "Generator" has incompatible type "Type[MyCustomProvider]"; expected "BaseProvider" [arg-type] ``` Incorrect type annotation on Generator.add_provider * Faker version: 18.3.2 * OS: MacOS The type annotation on `Generator.add_provider` is needlessly restrictive - the implementation clearly supports passing either a type or an instance, but the type annotation only allows instances: ```python # faker/generator.py def add_provider(self, provider: "BaseProvider") -> None: if isinstance(provider, type): provider = provider(self) ``` ### Steps to reproduce 1. Write code that adds a provider class to a `Generator` 2. Run mypy ```python class MyCustomProvider(BaseProvider): pass fake = Factory.create("en_US") fake.add_provider(MyCustomProvider) ``` ### Expected behavior `mypy` check should pass ### Actual behavior `mypy` produces an error: ``` error: Argument 1 to "add_provider" of "Generator" has incompatible type "Type[MyCustomProvider]"; expected "BaseProvider" [arg-type] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/generator.py` Content: ``` 1 import random as random_module 2 import re 3 4 from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional 5 6 from .typing import SeedType 7 8 if TYPE_CHECKING: 9 from .providers import BaseProvider 10 11 _re_token = re.compile(r"\{\{\s*(\w+)(:\s*\w+?)?\s*\}\}") 12 random = random_module.Random() 13 mod_random = random # compat with name released in 0.8 14 15 16 Sentinel = object() 17 18 19 class Generator: 20 __config: Dict[str, Dict[Hashable, Any]] = { 21 "arguments": {}, 22 } 23 24 _is_seeded = False 25 _global_seed = Sentinel 26 27 def __init__(self, **config: Dict) -> None: 28 self.providers: List["BaseProvider"] = [] 29 self.__config = dict(list(self.__config.items()) + list(config.items())) 30 self.__random = random 31 32 def add_provider(self, provider: "BaseProvider") -> None: 33 if isinstance(provider, type): 34 provider = provider(self) 35 36 self.providers.insert(0, provider) 37 38 for method_name in dir(provider): 39 # skip 'private' method 40 if method_name.startswith("_"): 41 continue 42 43 faker_function = getattr(provider, method_name) 44 45 if callable(faker_function): 46 # add all faker method to generator 47 self.set_formatter(method_name, faker_function) 48 49 def provider(self, name: str) -> Optional["BaseProvider"]: 50 try: 51 lst = [p for p in self.get_providers() if hasattr(p, "__provider__") and p.__provider__ == name.lower()] 52 return lst[0] 53 except IndexError: 54 return None 55 56 def get_providers(self) -> List["BaseProvider"]: 57 """Returns added providers.""" 58 return self.providers 59 60 @property 61 def random(self) -> random_module.Random: 62 return self.__random 63 64 @random.setter 65 def random(self, value: random_module.Random) -> None: 66 self.__random = value 67 68 def seed_instance(self, seed: Optional[SeedType] = None) -> "Generator": 69 """Calls random.seed""" 70 if self.__random == random: 71 # create per-instance random obj when first time seed_instance() is 72 # called 73 self.__random = random_module.Random() 74 self.__random.seed(seed) 75 self._is_seeded = True 76 return self 77 78 @classmethod 79 def seed(cls, seed: Optional[SeedType] = None) -> None: 80 random.seed(seed) 81 cls._global_seed = seed 82 cls._is_seeded = True 83 84 def format(self, formatter: str, *args: Any, **kwargs: Any) -> str: 85 """ 86 This is a secure way to make a fake from another Provider. 87 """ 88 return self.get_formatter(formatter)(*args, **kwargs) 89 90 def get_formatter(self, formatter: str) -> Callable: 91 try: 92 return getattr(self, formatter) 93 except AttributeError: 94 if "locale" in self.__config: 95 msg = f'Unknown formatter {formatter!r} with locale {self.__config["locale"]!r}' 96 else: 97 raise AttributeError(f"Unknown formatter {formatter!r}") 98 raise AttributeError(msg) 99 100 def set_formatter(self, name: str, method: Callable) -> None: 101 """ 102 This method adds a provider method to generator. 103 Override this method to add some decoration or logging stuff. 104 """ 105 setattr(self, name, method) 106 107 def set_arguments(self, group: str, argument: str, value: Optional[Any] = None) -> None: 108 """ 109 Creates an argument group, with an individual argument or a dictionary 110 of arguments. The argument groups is used to apply arguments to tokens, 111 when using the generator.parse() method. To further manage argument 112 groups, use get_arguments() and del_arguments() methods. 113 114 generator.set_arguments('small', 'max_value', 10) 115 generator.set_arguments('small', {'min_value': 5, 'max_value': 10}) 116 """ 117 if group not in self.__config["arguments"]: 118 self.__config["arguments"][group] = {} 119 120 if isinstance(argument, dict): 121 self.__config["arguments"][group] = argument 122 elif not isinstance(argument, str): 123 raise ValueError("Arguments must be either a string or dictionary") 124 else: 125 self.__config["arguments"][group][argument] = value 126 127 def get_arguments(self, group: str, argument: Optional[str] = None) -> Any: 128 """ 129 Get the value of an argument configured within a argument group, or 130 the entire group as a dictionary. Used in conjunction with the 131 set_arguments() method. 132 133 generator.get_arguments('small', 'max_value') 134 generator.get_arguments('small') 135 """ 136 if group in self.__config["arguments"] and argument: 137 result = self.__config["arguments"][group].get(argument) 138 else: 139 result = self.__config["arguments"].get(group) 140 141 return result 142 143 def del_arguments(self, group: str, argument: Optional[str] = None) -> Any: 144 """ 145 Delete an argument from an argument group or the entire argument group. 146 Used in conjunction with the set_arguments() method. 147 148 generator.del_arguments('small') 149 generator.del_arguments('small', 'max_value') 150 """ 151 if group in self.__config["arguments"]: 152 if argument: 153 result = self.__config["arguments"][group].pop(argument) 154 else: 155 result = self.__config["arguments"].pop(group) 156 else: 157 result = None 158 159 return result 160 161 def parse(self, text: str) -> str: 162 """ 163 Replaces tokens like '{{ tokenName }}' or '{{tokenName}}' in a string with 164 the result from the token method call. Arguments can be parsed by using an 165 argument group. For more information on the use of argument groups, please 166 refer to the set_arguments() method. 167 168 Example: 169 170 generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'}) 171 generator.set_arguments('small', 'max_value', 10) 172 173 generator.parse('{{ color:red_rgb }} - {{ pyint:small }}') 174 """ 175 return _re_token.sub(self.__format_token, text) 176 177 def __format_token(self, matches): 178 formatter, argument_group = list(matches.groups()) 179 argument_group = argument_group.lstrip(":").strip() if argument_group else "" 180 181 if argument_group: 182 try: 183 arguments = self.__config["arguments"][argument_group] 184 except KeyError: 185 raise AttributeError(f"Unknown argument group {argument_group!r}") 186 187 formatted = str(self.format(formatter, **arguments)) 188 else: 189 formatted = str(self.format(formatter)) 190 191 return "".join(formatted) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/generator.py b/faker/generator.py --- a/faker/generator.py +++ b/faker/generator.py @@ -1,7 +1,7 @@ import random as random_module import re -from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional, Type, Union from .typing import SeedType @@ -29,7 +29,7 @@ self.__config = dict(list(self.__config.items()) + list(config.items())) self.__random = random - def add_provider(self, provider: "BaseProvider") -> None: + def add_provider(self, provider: Union["BaseProvider", Type["BaseProvider"]]) -> None: if isinstance(provider, type): provider = provider(self)
{"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -1,7 +1,7 @@\n import random as random_module\n import re\n \n-from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional\n+from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional, Type, Union\n \n from .typing import SeedType\n \n@@ -29,7 +29,7 @@\n self.__config = dict(list(self.__config.items()) + list(config.items()))\n self.__random = random\n \n- def add_provider(self, provider: \"BaseProvider\") -> None:\n+ def add_provider(self, provider: Union[\"BaseProvider\", Type[\"BaseProvider\"]]) -> None:\n if isinstance(provider, type):\n provider = provider(self)\n", "issue": "Incorrect type annotation on Generator.add_provider\n* Faker version: 18.3.2\r\n* OS: MacOS\r\n\r\nThe type annotation on `Generator.add_provider` is needlessly restrictive - the implementation clearly supports passing either a type or an instance, but the type annotation only allows instances:\r\n\r\n```python\r\n # faker/generator.py\r\n def add_provider(self, provider: \"BaseProvider\") -> None:\r\n if isinstance(provider, type):\r\n provider = provider(self)\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. Write code that adds a provider class to a `Generator`\r\n2. Run mypy\r\n\r\n```python\r\nclass MyCustomProvider(BaseProvider):\r\n pass\r\n\r\nfake = Factory.create(\"en_US\")\r\nfake.add_provider(MyCustomProvider)\r\n```\r\n\r\n### Expected behavior\r\n\r\n`mypy` check should pass\r\n\r\n### Actual behavior\r\n\r\n`mypy` produces an error:\r\n\r\n```\r\nerror: Argument 1 to \"add_provider\" of \"Generator\" has incompatible type \"Type[MyCustomProvider]\"; expected \"BaseProvider\" [arg-type]\r\n```\r\n\nIncorrect type annotation on Generator.add_provider\n* Faker version: 18.3.2\r\n* OS: MacOS\r\n\r\nThe type annotation on `Generator.add_provider` is needlessly restrictive - the implementation clearly supports passing either a type or an instance, but the type annotation only allows instances:\r\n\r\n```python\r\n # faker/generator.py\r\n def add_provider(self, provider: \"BaseProvider\") -> None:\r\n if isinstance(provider, type):\r\n provider = provider(self)\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. Write code that adds a provider class to a `Generator`\r\n2. Run mypy\r\n\r\n```python\r\nclass MyCustomProvider(BaseProvider):\r\n pass\r\n\r\nfake = Factory.create(\"en_US\")\r\nfake.add_provider(MyCustomProvider)\r\n```\r\n\r\n### Expected behavior\r\n\r\n`mypy` check should pass\r\n\r\n### Actual behavior\r\n\r\n`mypy` produces an error:\r\n\r\n```\r\nerror: Argument 1 to \"add_provider\" of \"Generator\" has incompatible type \"Type[MyCustomProvider]\"; expected \"BaseProvider\" [arg-type]\r\n```\r\n\n", "before_files": [{"content": "import random as random_module\nimport re\n\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional\n\nfrom .typing import SeedType\n\nif TYPE_CHECKING:\n from .providers import BaseProvider\n\n_re_token = re.compile(r\"\\{\\{\\s*(\\w+)(:\\s*\\w+?)?\\s*\\}\\}\")\nrandom = random_module.Random()\nmod_random = random # compat with name released in 0.8\n\n\nSentinel = object()\n\n\nclass Generator:\n __config: Dict[str, Dict[Hashable, Any]] = {\n \"arguments\": {},\n }\n\n _is_seeded = False\n _global_seed = Sentinel\n\n def __init__(self, **config: Dict) -> None:\n self.providers: List[\"BaseProvider\"] = []\n self.__config = dict(list(self.__config.items()) + list(config.items()))\n self.__random = random\n\n def add_provider(self, provider: \"BaseProvider\") -> None:\n if isinstance(provider, type):\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith(\"_\"):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if callable(faker_function):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name: str) -> Optional[\"BaseProvider\"]:\n try:\n lst = [p for p in self.get_providers() if hasattr(p, \"__provider__\") and p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self) -> List[\"BaseProvider\"]:\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self) -> random_module.Random:\n return self.__random\n\n @random.setter\n def random(self, value: random_module.Random) -> None:\n self.__random = value\n\n def seed_instance(self, seed: Optional[SeedType] = None) -> \"Generator\":\n \"\"\"Calls random.seed\"\"\"\n if self.__random == random:\n # create per-instance random obj when first time seed_instance() is\n # called\n self.__random = random_module.Random()\n self.__random.seed(seed)\n self._is_seeded = True\n return self\n\n @classmethod\n def seed(cls, seed: Optional[SeedType] = None) -> None:\n random.seed(seed)\n cls._global_seed = seed\n cls._is_seeded = True\n\n def format(self, formatter: str, *args: Any, **kwargs: Any) -> str:\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter: str) -> Callable:\n try:\n return getattr(self, formatter)\n except AttributeError:\n if \"locale\" in self.__config:\n msg = f'Unknown formatter {formatter!r} with locale {self.__config[\"locale\"]!r}'\n else:\n raise AttributeError(f\"Unknown formatter {formatter!r}\")\n raise AttributeError(msg)\n\n def set_formatter(self, name: str, method: Callable) -> None:\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def set_arguments(self, group: str, argument: str, value: Optional[Any] = None) -> None:\n \"\"\"\n Creates an argument group, with an individual argument or a dictionary\n of arguments. The argument groups is used to apply arguments to tokens,\n when using the generator.parse() method. To further manage argument\n groups, use get_arguments() and del_arguments() methods.\n\n generator.set_arguments('small', 'max_value', 10)\n generator.set_arguments('small', {'min_value': 5, 'max_value': 10})\n \"\"\"\n if group not in self.__config[\"arguments\"]:\n self.__config[\"arguments\"][group] = {}\n\n if isinstance(argument, dict):\n self.__config[\"arguments\"][group] = argument\n elif not isinstance(argument, str):\n raise ValueError(\"Arguments must be either a string or dictionary\")\n else:\n self.__config[\"arguments\"][group][argument] = value\n\n def get_arguments(self, group: str, argument: Optional[str] = None) -> Any:\n \"\"\"\n Get the value of an argument configured within a argument group, or\n the entire group as a dictionary. Used in conjunction with the\n set_arguments() method.\n\n generator.get_arguments('small', 'max_value')\n generator.get_arguments('small')\n \"\"\"\n if group in self.__config[\"arguments\"] and argument:\n result = self.__config[\"arguments\"][group].get(argument)\n else:\n result = self.__config[\"arguments\"].get(group)\n\n return result\n\n def del_arguments(self, group: str, argument: Optional[str] = None) -> Any:\n \"\"\"\n Delete an argument from an argument group or the entire argument group.\n Used in conjunction with the set_arguments() method.\n\n generator.del_arguments('small')\n generator.del_arguments('small', 'max_value')\n \"\"\"\n if group in self.__config[\"arguments\"]:\n if argument:\n result = self.__config[\"arguments\"][group].pop(argument)\n else:\n result = self.__config[\"arguments\"].pop(group)\n else:\n result = None\n\n return result\n\n def parse(self, text: str) -> str:\n \"\"\"\n Replaces tokens like '{{ tokenName }}' or '{{tokenName}}' in a string with\n the result from the token method call. Arguments can be parsed by using an\n argument group. For more information on the use of argument groups, please\n refer to the set_arguments() method.\n\n Example:\n\n generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})\n generator.set_arguments('small', 'max_value', 10)\n\n generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter, argument_group = list(matches.groups())\n argument_group = argument_group.lstrip(\":\").strip() if argument_group else \"\"\n\n if argument_group:\n try:\n arguments = self.__config[\"arguments\"][argument_group]\n except KeyError:\n raise AttributeError(f\"Unknown argument group {argument_group!r}\")\n\n formatted = str(self.format(formatter, **arguments))\n else:\n formatted = str(self.format(formatter))\n\n return \"\".join(formatted)\n", "path": "faker/generator.py"}], "after_files": [{"content": "import random as random_module\nimport re\n\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Optional, Type, Union\n\nfrom .typing import SeedType\n\nif TYPE_CHECKING:\n from .providers import BaseProvider\n\n_re_token = re.compile(r\"\\{\\{\\s*(\\w+)(:\\s*\\w+?)?\\s*\\}\\}\")\nrandom = random_module.Random()\nmod_random = random # compat with name released in 0.8\n\n\nSentinel = object()\n\n\nclass Generator:\n __config: Dict[str, Dict[Hashable, Any]] = {\n \"arguments\": {},\n }\n\n _is_seeded = False\n _global_seed = Sentinel\n\n def __init__(self, **config: Dict) -> None:\n self.providers: List[\"BaseProvider\"] = []\n self.__config = dict(list(self.__config.items()) + list(config.items()))\n self.__random = random\n\n def add_provider(self, provider: Union[\"BaseProvider\", Type[\"BaseProvider\"]]) -> None:\n if isinstance(provider, type):\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith(\"_\"):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if callable(faker_function):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name: str) -> Optional[\"BaseProvider\"]:\n try:\n lst = [p for p in self.get_providers() if hasattr(p, \"__provider__\") and p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self) -> List[\"BaseProvider\"]:\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self) -> random_module.Random:\n return self.__random\n\n @random.setter\n def random(self, value: random_module.Random) -> None:\n self.__random = value\n\n def seed_instance(self, seed: Optional[SeedType] = None) -> \"Generator\":\n \"\"\"Calls random.seed\"\"\"\n if self.__random == random:\n # create per-instance random obj when first time seed_instance() is\n # called\n self.__random = random_module.Random()\n self.__random.seed(seed)\n self._is_seeded = True\n return self\n\n @classmethod\n def seed(cls, seed: Optional[SeedType] = None) -> None:\n random.seed(seed)\n cls._global_seed = seed\n cls._is_seeded = True\n\n def format(self, formatter: str, *args: Any, **kwargs: Any) -> str:\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter: str) -> Callable:\n try:\n return getattr(self, formatter)\n except AttributeError:\n if \"locale\" in self.__config:\n msg = f'Unknown formatter {formatter!r} with locale {self.__config[\"locale\"]!r}'\n else:\n raise AttributeError(f\"Unknown formatter {formatter!r}\")\n raise AttributeError(msg)\n\n def set_formatter(self, name: str, method: Callable) -> None:\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def set_arguments(self, group: str, argument: str, value: Optional[Any] = None) -> None:\n \"\"\"\n Creates an argument group, with an individual argument or a dictionary\n of arguments. The argument groups is used to apply arguments to tokens,\n when using the generator.parse() method. To further manage argument\n groups, use get_arguments() and del_arguments() methods.\n\n generator.set_arguments('small', 'max_value', 10)\n generator.set_arguments('small', {'min_value': 5, 'max_value': 10})\n \"\"\"\n if group not in self.__config[\"arguments\"]:\n self.__config[\"arguments\"][group] = {}\n\n if isinstance(argument, dict):\n self.__config[\"arguments\"][group] = argument\n elif not isinstance(argument, str):\n raise ValueError(\"Arguments must be either a string or dictionary\")\n else:\n self.__config[\"arguments\"][group][argument] = value\n\n def get_arguments(self, group: str, argument: Optional[str] = None) -> Any:\n \"\"\"\n Get the value of an argument configured within a argument group, or\n the entire group as a dictionary. Used in conjunction with the\n set_arguments() method.\n\n generator.get_arguments('small', 'max_value')\n generator.get_arguments('small')\n \"\"\"\n if group in self.__config[\"arguments\"] and argument:\n result = self.__config[\"arguments\"][group].get(argument)\n else:\n result = self.__config[\"arguments\"].get(group)\n\n return result\n\n def del_arguments(self, group: str, argument: Optional[str] = None) -> Any:\n \"\"\"\n Delete an argument from an argument group or the entire argument group.\n Used in conjunction with the set_arguments() method.\n\n generator.del_arguments('small')\n generator.del_arguments('small', 'max_value')\n \"\"\"\n if group in self.__config[\"arguments\"]:\n if argument:\n result = self.__config[\"arguments\"][group].pop(argument)\n else:\n result = self.__config[\"arguments\"].pop(group)\n else:\n result = None\n\n return result\n\n def parse(self, text: str) -> str:\n \"\"\"\n Replaces tokens like '{{ tokenName }}' or '{{tokenName}}' in a string with\n the result from the token method call. Arguments can be parsed by using an\n argument group. For more information on the use of argument groups, please\n refer to the set_arguments() method.\n\n Example:\n\n generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})\n generator.set_arguments('small', 'max_value', 10)\n\n generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter, argument_group = list(matches.groups())\n argument_group = argument_group.lstrip(\":\").strip() if argument_group else \"\"\n\n if argument_group:\n try:\n arguments = self.__config[\"arguments\"][argument_group]\n except KeyError:\n raise AttributeError(f\"Unknown argument group {argument_group!r}\")\n\n formatted = str(self.format(formatter, **arguments))\n else:\n formatted = str(self.format(formatter))\n\n return \"\".join(formatted)\n", "path": "faker/generator.py"}]}
2,634
191
gh_patches_debug_12472
rasdani/github-patches
git_diff
azavea__raster-vision-506
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unit Tests: `command` Directory --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rastervision/utils/files.py` Content: ``` 1 import os 2 import shutil 3 from threading import Timer 4 import logging 5 6 from google.protobuf import json_format 7 8 from rastervision.filesystem.filesystem import FileSystem 9 from rastervision.filesystem.filesystem import ProtobufParseException 10 from rastervision.filesystem.local_filesystem import make_dir 11 12 log = logging.getLogger(__name__) 13 14 15 def get_local_path(uri, download_dir, fs=None): 16 """Convert a URI into a corresponding local path. 17 18 If a uri is local, return it. If it's remote, we generate a path for it 19 within download_dir. For an S3 path of form s3://<bucket>/<key>, the path 20 is <download_dir>/s3/<bucket>/<key>. 21 22 Args: 23 uri: (string) URI of file 24 download_dir: (string) path to directory 25 fs: Optional FileSystem to use 26 27 Returns: 28 (string) a local path 29 """ 30 if uri is None: 31 return None 32 33 if not fs: 34 fs = FileSystem.get_file_system(uri, 'r') 35 path = fs.local_path(uri, download_dir) 36 37 return path 38 39 40 def sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None): 41 """Synchronize a local to a local or remote directory. 42 43 Transfers files from source to destination directories so that the 44 destination has all the source files. If delete is True, also delete 45 files in the destination to match those in the source directory. 46 47 Args: 48 src_dir_uri: (string) URI of local source directory 49 dest_dir_uri: (string) URI of destination directory 50 delete: (bool) 51 fs: Optional FileSystem to use for destination 52 """ 53 if not fs: 54 fs = FileSystem.get_file_system(dest_dir_uri, 'w') 55 fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete) 56 57 58 def sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None): 59 """Synchronize a local or remote directory to a local directory. 60 61 Transfers files from source to destination directories so that the 62 destination has all the source files. If delete is True, also delete 63 files in the destination to match those in the source directory. 64 65 Args: 66 src_dir_uri: (string) URI of source directory 67 dest_dir_uri: (string) URI of local destination directory 68 delete: (bool) 69 fs: Optional FileSystem to use 70 """ 71 if not fs: 72 fs = FileSystem.get_file_system(src_dir_uri, 'r') 73 fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete) 74 75 76 def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None): 77 """Start syncing a directory on a schedule. 78 79 Calls sync_to_dir on a schedule. 80 81 Args: 82 src_dir_uri: (string) Path of the local source directory 83 dest_dir_uri: (string) URI of destination directory 84 sync_interval: (int) period in seconds for syncing 85 fs: Optional FileSystem to use 86 """ 87 88 def _sync_dir(): 89 log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri)) 90 sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs) 91 92 class SyncThread: 93 def __init__(self): 94 thread = Timer(sync_interval, _sync_dir) 95 thread.daemon = True 96 thread.start() 97 self.thread = thread 98 99 def __enter__(self): 100 return self.thread 101 102 def __exit__(self, type, value, traceback): 103 self.thread.cancel() 104 105 return SyncThread() 106 107 108 def download_if_needed(uri, download_dir, fs=None): 109 """Download a file into a directory if it's remote. 110 111 If uri is local, there is no need to download the file. 112 113 Args: 114 uri: (string) URI of file 115 download_dir: (string) local directory to download file into 116 fs: Optional FileSystem to use. 117 118 Returns: 119 (string) path to local file 120 121 Raises: 122 NotReadableError if URI cannot be read from 123 """ 124 if uri is None: 125 return None 126 127 if not fs: 128 fs = FileSystem.get_file_system(uri, 'r') 129 130 path = get_local_path(uri, download_dir, fs=fs) 131 make_dir(path, use_dirname=True) 132 133 if path != uri: 134 log.info('Downloading {} to {}'.format(uri, path)) 135 136 fs.copy_from(uri, path) 137 138 return path 139 140 141 def download_or_copy(uri, target_dir, fs=None): 142 """Downloads or copies a file to a directory 143 144 Args: 145 uri: (string) URI of file 146 target_dir: (string) local directory to copy file to 147 fs: Optional FileSystem to use 148 """ 149 local_path = download_if_needed(uri, target_dir, fs=fs) 150 shutil.copy(local_path, target_dir) 151 return local_path 152 153 154 def file_exists(uri, fs=None): 155 if not fs: 156 fs = FileSystem.get_file_system(uri, 'r') 157 return fs.file_exists(uri) 158 159 160 def list_paths(uri, ext=None, fs=None): 161 if uri is None: 162 return None 163 164 if not fs: 165 fs = FileSystem.get_file_system(uri, 'r') 166 167 return fs.list_paths(uri, ext=ext) 168 169 170 def upload_or_copy(src_path, dst_uri, fs=None): 171 """Upload a file if the destination is remote. 172 173 If dst_uri is local, the file is copied. 174 175 Args: 176 src_path: (string) path to source file 177 dst_uri: (string) URI of destination for file 178 fs: Optional FileSystem to use 179 Raises: 180 NotWritableError if URI cannot be written to 181 """ 182 if dst_uri is None: 183 return 184 185 if not (os.path.isfile(src_path) or os.path.isdir(src_path)): 186 raise Exception('{} does not exist.'.format(src_path)) 187 188 if not src_path == dst_uri: 189 log.info('Uploading {} to {}'.format(src_path, dst_uri)) 190 191 if not fs: 192 fs = FileSystem.get_file_system(dst_uri, 'w') 193 fs.copy_to(src_path, dst_uri) 194 195 196 def file_to_str(uri, fs=None): 197 """Download contents of text file into a string. 198 199 Args: 200 uri: (string) URI of file 201 fs: Optional FileSystem to use 202 203 Returns: 204 (string) with contents of text file 205 206 Raises: 207 NotReadableError if URI cannot be read from 208 """ 209 if not fs: 210 fs = FileSystem.get_file_system(uri, 'r') 211 return fs.read_str(uri) 212 213 214 def str_to_file(content_str, uri, fs=None): 215 """Writes string to text file. 216 217 Args: 218 content_str: string to write 219 uri: (string) URI of file to write 220 fs: Optional FileSystem to use 221 222 Raise: 223 NotWritableError if file_uri cannot be written 224 """ 225 if not fs: 226 fs = FileSystem.get_file_system(uri, 'r') 227 return fs.write_str(uri, content_str) 228 229 230 def load_json_config(uri, message, fs=None): 231 """Load a JSON-formatted protobuf config file. 232 233 Args: 234 uri: (string) URI of config file 235 message: (google.protobuf.message.Message) empty protobuf message of 236 to load the config into. The type needs to match the content of 237 uri. 238 fs: Optional FileSystem to use. 239 240 Returns: 241 the same message passed as input with fields filled in from uri 242 243 Raises: 244 ProtobufParseException if uri cannot be parsed 245 """ 246 try: 247 return json_format.Parse(file_to_str(uri, fs=fs), message) 248 except json_format.ParseError as e: 249 error_msg = ('Problem parsing protobuf file {}. '.format(uri) + 250 'You might need to run scripts/compile') 251 raise ProtobufParseException(error_msg) from e 252 253 254 def save_json_config(message, uri, fs=None): 255 """Save a protobuf object to a JSON file. 256 257 Args: 258 message: (google.protobuf.message.Message) protobuf message 259 uri: (string) URI of JSON file to write message to 260 fs: Optional FileSystem to use 261 262 Raises: 263 NotWritableError if uri cannot be written 264 """ 265 json_str = json_format.MessageToJson(message) 266 str_to_file(json_str, uri, fs=fs) 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rastervision/utils/files.py b/rastervision/utils/files.py --- a/rastervision/utils/files.py +++ b/rastervision/utils/files.py @@ -73,7 +73,8 @@ fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete) -def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None): +def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, + fs=None): # pragma: no cover """Start syncing a directory on a schedule. Calls sync_to_dir on a schedule. @@ -157,7 +158,7 @@ return fs.file_exists(uri) -def list_paths(uri, ext=None, fs=None): +def list_paths(uri, ext='', fs=None): if uri is None: return None
{"golden_diff": "diff --git a/rastervision/utils/files.py b/rastervision/utils/files.py\n--- a/rastervision/utils/files.py\n+++ b/rastervision/utils/files.py\n@@ -73,7 +73,8 @@\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n \n \n-def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None):\n+def start_sync(src_dir_uri, dest_dir_uri, sync_interval=600,\n+ fs=None): # pragma: no cover\n \"\"\"Start syncing a directory on a schedule.\n \n Calls sync_to_dir on a schedule.\n@@ -157,7 +158,7 @@\n return fs.file_exists(uri)\n \n \n-def list_paths(uri, ext=None, fs=None):\n+def list_paths(uri, ext='', fs=None):\n if uri is None:\n return None\n", "issue": "Unit Tests: `command` Directory\n\n", "before_files": [{"content": "import os\nimport shutil\nfrom threading import Timer\nimport logging\n\nfrom google.protobuf import json_format\n\nfrom rastervision.filesystem.filesystem import FileSystem\nfrom rastervision.filesystem.filesystem import ProtobufParseException\nfrom rastervision.filesystem.local_filesystem import make_dir\n\nlog = logging.getLogger(__name__)\n\n\ndef get_local_path(uri, download_dir, fs=None):\n \"\"\"Convert a URI into a corresponding local path.\n\n If a uri is local, return it. If it's remote, we generate a path for it\n within download_dir. For an S3 path of form s3://<bucket>/<key>, the path\n is <download_dir>/s3/<bucket>/<key>.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) path to directory\n fs: Optional FileSystem to use\n\n Returns:\n (string) a local path\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n path = fs.local_path(uri, download_dir)\n\n return path\n\n\ndef sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local to a local or remote directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of local source directory\n dest_dir_uri: (string) URI of destination directory\n delete: (bool)\n fs: Optional FileSystem to use for destination\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(dest_dir_uri, 'w')\n fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local or remote directory to a local directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of source directory\n dest_dir_uri: (string) URI of local destination directory\n delete: (bool)\n fs: Optional FileSystem to use\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(src_dir_uri, 'r')\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef start_sync(src_dir_uri, dest_dir_uri, sync_interval=600, fs=None):\n \"\"\"Start syncing a directory on a schedule.\n\n Calls sync_to_dir on a schedule.\n\n Args:\n src_dir_uri: (string) Path of the local source directory\n dest_dir_uri: (string) URI of destination directory\n sync_interval: (int) period in seconds for syncing\n fs: Optional FileSystem to use\n \"\"\"\n\n def _sync_dir():\n log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri))\n sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs)\n\n class SyncThread:\n def __init__(self):\n thread = Timer(sync_interval, _sync_dir)\n thread.daemon = True\n thread.start()\n self.thread = thread\n\n def __enter__(self):\n return self.thread\n\n def __exit__(self, type, value, traceback):\n self.thread.cancel()\n\n return SyncThread()\n\n\ndef download_if_needed(uri, download_dir, fs=None):\n \"\"\"Download a file into a directory if it's remote.\n\n If uri is local, there is no need to download the file.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) local directory to download file into\n fs: Optional FileSystem to use.\n\n Returns:\n (string) path to local file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n path = get_local_path(uri, download_dir, fs=fs)\n make_dir(path, use_dirname=True)\n\n if path != uri:\n log.info('Downloading {} to {}'.format(uri, path))\n\n fs.copy_from(uri, path)\n\n return path\n\n\ndef download_or_copy(uri, target_dir, fs=None):\n \"\"\"Downloads or copies a file to a directory\n\n Args:\n uri: (string) URI of file\n target_dir: (string) local directory to copy file to\n fs: Optional FileSystem to use\n \"\"\"\n local_path = download_if_needed(uri, target_dir, fs=fs)\n shutil.copy(local_path, target_dir)\n return local_path\n\n\ndef file_exists(uri, fs=None):\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.file_exists(uri)\n\n\ndef list_paths(uri, ext=None, fs=None):\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n return fs.list_paths(uri, ext=ext)\n\n\ndef upload_or_copy(src_path, dst_uri, fs=None):\n \"\"\"Upload a file if the destination is remote.\n\n If dst_uri is local, the file is copied.\n\n Args:\n src_path: (string) path to source file\n dst_uri: (string) URI of destination for file\n fs: Optional FileSystem to use\n Raises:\n NotWritableError if URI cannot be written to\n \"\"\"\n if dst_uri is None:\n return\n\n if not (os.path.isfile(src_path) or os.path.isdir(src_path)):\n raise Exception('{} does not exist.'.format(src_path))\n\n if not src_path == dst_uri:\n log.info('Uploading {} to {}'.format(src_path, dst_uri))\n\n if not fs:\n fs = FileSystem.get_file_system(dst_uri, 'w')\n fs.copy_to(src_path, dst_uri)\n\n\ndef file_to_str(uri, fs=None):\n \"\"\"Download contents of text file into a string.\n\n Args:\n uri: (string) URI of file\n fs: Optional FileSystem to use\n\n Returns:\n (string) with contents of text file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.read_str(uri)\n\n\ndef str_to_file(content_str, uri, fs=None):\n \"\"\"Writes string to text file.\n\n Args:\n content_str: string to write\n uri: (string) URI of file to write\n fs: Optional FileSystem to use\n\n Raise:\n NotWritableError if file_uri cannot be written\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.write_str(uri, content_str)\n\n\ndef load_json_config(uri, message, fs=None):\n \"\"\"Load a JSON-formatted protobuf config file.\n\n Args:\n uri: (string) URI of config file\n message: (google.protobuf.message.Message) empty protobuf message of\n to load the config into. The type needs to match the content of\n uri.\n fs: Optional FileSystem to use.\n\n Returns:\n the same message passed as input with fields filled in from uri\n\n Raises:\n ProtobufParseException if uri cannot be parsed\n \"\"\"\n try:\n return json_format.Parse(file_to_str(uri, fs=fs), message)\n except json_format.ParseError as e:\n error_msg = ('Problem parsing protobuf file {}. '.format(uri) +\n 'You might need to run scripts/compile')\n raise ProtobufParseException(error_msg) from e\n\n\ndef save_json_config(message, uri, fs=None):\n \"\"\"Save a protobuf object to a JSON file.\n\n Args:\n message: (google.protobuf.message.Message) protobuf message\n uri: (string) URI of JSON file to write message to\n fs: Optional FileSystem to use\n\n Raises:\n NotWritableError if uri cannot be written\n \"\"\"\n json_str = json_format.MessageToJson(message)\n str_to_file(json_str, uri, fs=fs)\n", "path": "rastervision/utils/files.py"}], "after_files": [{"content": "import os\nimport shutil\nfrom threading import Timer\nimport logging\n\nfrom google.protobuf import json_format\n\nfrom rastervision.filesystem.filesystem import FileSystem\nfrom rastervision.filesystem.filesystem import ProtobufParseException\nfrom rastervision.filesystem.local_filesystem import make_dir\n\nlog = logging.getLogger(__name__)\n\n\ndef get_local_path(uri, download_dir, fs=None):\n \"\"\"Convert a URI into a corresponding local path.\n\n If a uri is local, return it. If it's remote, we generate a path for it\n within download_dir. For an S3 path of form s3://<bucket>/<key>, the path\n is <download_dir>/s3/<bucket>/<key>.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) path to directory\n fs: Optional FileSystem to use\n\n Returns:\n (string) a local path\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n path = fs.local_path(uri, download_dir)\n\n return path\n\n\ndef sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local to a local or remote directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of local source directory\n dest_dir_uri: (string) URI of destination directory\n delete: (bool)\n fs: Optional FileSystem to use for destination\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(dest_dir_uri, 'w')\n fs.sync_to_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef sync_from_dir(src_dir_uri, dest_dir_uri, delete=False, fs=None):\n \"\"\"Synchronize a local or remote directory to a local directory.\n\n Transfers files from source to destination directories so that the\n destination has all the source files. If delete is True, also delete\n files in the destination to match those in the source directory.\n\n Args:\n src_dir_uri: (string) URI of source directory\n dest_dir_uri: (string) URI of local destination directory\n delete: (bool)\n fs: Optional FileSystem to use\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(src_dir_uri, 'r')\n fs.sync_from_dir(src_dir_uri, dest_dir_uri, delete=delete)\n\n\ndef start_sync(src_dir_uri, dest_dir_uri, sync_interval=600,\n fs=None): # pragma: no cover\n \"\"\"Start syncing a directory on a schedule.\n\n Calls sync_to_dir on a schedule.\n\n Args:\n src_dir_uri: (string) Path of the local source directory\n dest_dir_uri: (string) URI of destination directory\n sync_interval: (int) period in seconds for syncing\n fs: Optional FileSystem to use\n \"\"\"\n\n def _sync_dir():\n log.info('Syncing {} to {}...'.format(src_dir_uri, dest_dir_uri))\n sync_to_dir(src_dir_uri, dest_dir_uri, delete=False, fs=fs)\n\n class SyncThread:\n def __init__(self):\n thread = Timer(sync_interval, _sync_dir)\n thread.daemon = True\n thread.start()\n self.thread = thread\n\n def __enter__(self):\n return self.thread\n\n def __exit__(self, type, value, traceback):\n self.thread.cancel()\n\n return SyncThread()\n\n\ndef download_if_needed(uri, download_dir, fs=None):\n \"\"\"Download a file into a directory if it's remote.\n\n If uri is local, there is no need to download the file.\n\n Args:\n uri: (string) URI of file\n download_dir: (string) local directory to download file into\n fs: Optional FileSystem to use.\n\n Returns:\n (string) path to local file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n path = get_local_path(uri, download_dir, fs=fs)\n make_dir(path, use_dirname=True)\n\n if path != uri:\n log.info('Downloading {} to {}'.format(uri, path))\n\n fs.copy_from(uri, path)\n\n return path\n\n\ndef download_or_copy(uri, target_dir, fs=None):\n \"\"\"Downloads or copies a file to a directory\n\n Args:\n uri: (string) URI of file\n target_dir: (string) local directory to copy file to\n fs: Optional FileSystem to use\n \"\"\"\n local_path = download_if_needed(uri, target_dir, fs=fs)\n shutil.copy(local_path, target_dir)\n return local_path\n\n\ndef file_exists(uri, fs=None):\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.file_exists(uri)\n\n\ndef list_paths(uri, ext='', fs=None):\n if uri is None:\n return None\n\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n\n return fs.list_paths(uri, ext=ext)\n\n\ndef upload_or_copy(src_path, dst_uri, fs=None):\n \"\"\"Upload a file if the destination is remote.\n\n If dst_uri is local, the file is copied.\n\n Args:\n src_path: (string) path to source file\n dst_uri: (string) URI of destination for file\n fs: Optional FileSystem to use\n Raises:\n NotWritableError if URI cannot be written to\n \"\"\"\n if dst_uri is None:\n return\n\n if not (os.path.isfile(src_path) or os.path.isdir(src_path)):\n raise Exception('{} does not exist.'.format(src_path))\n\n if not src_path == dst_uri:\n log.info('Uploading {} to {}'.format(src_path, dst_uri))\n\n if not fs:\n fs = FileSystem.get_file_system(dst_uri, 'w')\n fs.copy_to(src_path, dst_uri)\n\n\ndef file_to_str(uri, fs=None):\n \"\"\"Download contents of text file into a string.\n\n Args:\n uri: (string) URI of file\n fs: Optional FileSystem to use\n\n Returns:\n (string) with contents of text file\n\n Raises:\n NotReadableError if URI cannot be read from\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.read_str(uri)\n\n\ndef str_to_file(content_str, uri, fs=None):\n \"\"\"Writes string to text file.\n\n Args:\n content_str: string to write\n uri: (string) URI of file to write\n fs: Optional FileSystem to use\n\n Raise:\n NotWritableError if file_uri cannot be written\n \"\"\"\n if not fs:\n fs = FileSystem.get_file_system(uri, 'r')\n return fs.write_str(uri, content_str)\n\n\ndef load_json_config(uri, message, fs=None):\n \"\"\"Load a JSON-formatted protobuf config file.\n\n Args:\n uri: (string) URI of config file\n message: (google.protobuf.message.Message) empty protobuf message of\n to load the config into. The type needs to match the content of\n uri.\n fs: Optional FileSystem to use.\n\n Returns:\n the same message passed as input with fields filled in from uri\n\n Raises:\n ProtobufParseException if uri cannot be parsed\n \"\"\"\n try:\n return json_format.Parse(file_to_str(uri, fs=fs), message)\n except json_format.ParseError as e:\n error_msg = ('Problem parsing protobuf file {}. '.format(uri) +\n 'You might need to run scripts/compile')\n raise ProtobufParseException(error_msg) from e\n\n\ndef save_json_config(message, uri, fs=None):\n \"\"\"Save a protobuf object to a JSON file.\n\n Args:\n message: (google.protobuf.message.Message) protobuf message\n uri: (string) URI of JSON file to write message to\n fs: Optional FileSystem to use\n\n Raises:\n NotWritableError if uri cannot be written\n \"\"\"\n json_str = json_format.MessageToJson(message)\n str_to_file(json_str, uri, fs=fs)\n", "path": "rastervision/utils/files.py"}]}
2,795
196
gh_patches_debug_19300
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-16427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ignore subdomains on reddit URLs ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones - [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser ### What is the purpose of your *issue*? - [ ] Bug report (encountered problems with youtube-dl) - [x] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**): - Single video: https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/ Note that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights. --- ### Description of your *issue*, suggested solution and other information Various subs use dm or nm subdomain which apply different css, there's also np subdomain which does the same. reddit also has a massive number of [2 alphabet subdomains to serve the site in different languages](https://np.reddit.com/r/redditdev/comments/2dh18g/is_there_a_list_of_redditcom_subdomains_not/). youtube-dl already supports www and old, but it's not possible to add support for each and every one of them so it'd be better to ignore it altogether since it doesn't affect reddit detection or .json API availability. I'll submit a PR if this solution is acceptable. Ignore subdomains on reddit URLs ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones - [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser ### What is the purpose of your *issue*? - [ ] Bug report (encountered problems with youtube-dl) - [x] Site support request (request for adding support for a new site) - [ ] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**): - Single video: https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/ Note that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights. --- ### Description of your *issue*, suggested solution and other information Various subs use dm or nm subdomain which apply different css, there's also np subdomain which does the same. reddit also has a massive number of [2 alphabet subdomains to serve the site in different languages](https://np.reddit.com/r/redditdev/comments/2dh18g/is_there_a_list_of_redditcom_subdomains_not/). youtube-dl already supports www and old, but it's not possible to add support for each and every one of them so it'd be better to ignore it altogether since it doesn't affect reddit detection or .json API availability. I'll submit a PR if this solution is acceptable. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/reddit.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import re 4 5 from .common import InfoExtractor 6 from ..utils import ( 7 ExtractorError, 8 int_or_none, 9 float_or_none, 10 ) 11 12 13 class RedditIE(InfoExtractor): 14 _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)' 15 _TEST = { 16 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/ 17 'url': 'https://v.redd.it/zv89llsvexdz', 18 'md5': '0a070c53eba7ec4534d95a5a1259e253', 19 'info_dict': { 20 'id': 'zv89llsvexdz', 21 'ext': 'mp4', 22 'title': 'zv89llsvexdz', 23 }, 24 'params': { 25 'format': 'bestvideo', 26 }, 27 } 28 29 def _real_extract(self, url): 30 video_id = self._match_id(url) 31 32 formats = self._extract_m3u8_formats( 33 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id, 34 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) 35 36 formats.extend(self._extract_mpd_formats( 37 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id, 38 mpd_id='dash', fatal=False)) 39 40 self._sort_formats(formats) 41 42 return { 43 'id': video_id, 44 'title': video_id, 45 'formats': formats, 46 } 47 48 49 class RedditRIE(InfoExtractor): 50 _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' 51 _TESTS = [{ 52 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 53 'info_dict': { 54 'id': 'zv89llsvexdz', 55 'ext': 'mp4', 56 'title': 'That small heart attack.', 57 'thumbnail': r're:^https?://.*\.jpg$', 58 'timestamp': 1501941939, 59 'upload_date': '20170805', 60 'uploader': 'Antw87', 61 'like_count': int, 62 'dislike_count': int, 63 'comment_count': int, 64 'age_limit': 0, 65 }, 66 'params': { 67 'format': 'bestvideo', 68 'skip_download': True, 69 }, 70 }, { 71 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 72 'only_matching': True, 73 }, { 74 # imgur 75 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 76 'only_matching': True, 77 }, { 78 # imgur @ old reddit 79 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 80 'only_matching': True, 81 }, { 82 # streamable 83 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', 84 'only_matching': True, 85 }, { 86 # youtube 87 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 88 'only_matching': True, 89 }] 90 91 def _real_extract(self, url): 92 mobj = re.match(self._VALID_URL, url) 93 url, video_id = mobj.group('url', 'id') 94 95 video_id = self._match_id(url) 96 97 data = self._download_json( 98 url + '/.json', video_id)[0]['data']['children'][0]['data'] 99 100 video_url = data['url'] 101 102 # Avoid recursing into the same reddit URL 103 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: 104 raise ExtractorError('No media found', expected=True) 105 106 over_18 = data.get('over_18') 107 if over_18 is True: 108 age_limit = 18 109 elif over_18 is False: 110 age_limit = 0 111 else: 112 age_limit = None 113 114 return { 115 '_type': 'url_transparent', 116 'url': video_url, 117 'title': data.get('title'), 118 'thumbnail': data.get('thumbnail'), 119 'timestamp': float_or_none(data.get('created_utc')), 120 'uploader': data.get('author'), 121 'like_count': int_or_none(data.get('ups')), 122 'dislike_count': int_or_none(data.get('downs')), 123 'comment_count': int_or_none(data.get('num_comments')), 124 'age_limit': age_limit, 125 } 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/reddit.py b/youtube_dl/extractor/reddit.py --- a/youtube_dl/extractor/reddit.py +++ b/youtube_dl/extractor/reddit.py @@ -47,7 +47,7 @@ class RedditRIE(InfoExtractor): - _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' + _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 'info_dict': { @@ -86,6 +86,10 @@ # youtube 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 'only_matching': True, + }, { + # reddit video @ nm reddit + 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/', + 'only_matching': True, }] def _real_extract(self, url):
{"golden_diff": "diff --git a/youtube_dl/extractor/reddit.py b/youtube_dl/extractor/reddit.py\n--- a/youtube_dl/extractor/reddit.py\n+++ b/youtube_dl/extractor/reddit.py\n@@ -47,7 +47,7 @@\n \n \n class RedditRIE(InfoExtractor):\n- _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\\.)?reddit\\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'\n+ _VALID_URL = r'(?P<url>https?://(?:[^/]+\\.)?reddit\\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'\n _TESTS = [{\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',\n 'info_dict': {\n@@ -86,6 +86,10 @@\n # youtube\n 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',\n 'only_matching': True,\n+ }, {\n+ # reddit video @ nm reddit\n+ 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',\n+ 'only_matching': True,\n }]\n \n def _real_extract(self, url):\n", "issue": "Ignore subdomains on reddit URLs\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [ ] Bug report (encountered problems with youtube-dl)\r\n- [x] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/\r\n\r\n\r\nNote that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nVarious subs use dm or nm subdomain which apply different css, there's also np subdomain which does the same. reddit also has a massive number of [2 alphabet subdomains to serve the site in different languages](https://np.reddit.com/r/redditdev/comments/2dh18g/is_there_a_list_of_redditcom_subdomains_not/).\r\n\r\nyoutube-dl already supports www and old, but it's not possible to add support for each and every one of them so it'd be better to ignore it altogether since it doesn't affect reddit detection or .json API availability.\r\n\r\nI'll submit a PR if this solution is acceptable.\nIgnore subdomains on reddit URLs\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [ ] Bug report (encountered problems with youtube-dl)\r\n- [x] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/\r\n\r\n\r\nNote that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nVarious subs use dm or nm subdomain which apply different css, there's also np subdomain which does the same. reddit also has a massive number of [2 alphabet subdomains to serve the site in different languages](https://np.reddit.com/r/redditdev/comments/2dh18g/is_there_a_list_of_redditcom_subdomains_not/).\r\n\r\nyoutube-dl already supports www and old, but it's not possible to add support for each and every one of them so it'd be better to ignore it altogether since it doesn't affect reddit detection or .json API availability.\r\n\r\nI'll submit a PR if this solution is acceptable.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n float_or_none,\n)\n\n\nclass RedditIE(InfoExtractor):\n _VALID_URL = r'https?://v\\.redd\\.it/(?P<id>[^/?#&]+)'\n _TEST = {\n # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/\n 'url': 'https://v.redd.it/zv89llsvexdz',\n 'md5': '0a070c53eba7ec4534d95a5a1259e253',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'title': 'zv89llsvexdz',\n },\n 'params': {\n 'format': 'bestvideo',\n },\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n formats = self._extract_m3u8_formats(\n 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,\n 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)\n\n formats.extend(self._extract_mpd_formats(\n 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,\n mpd_id='dash', fatal=False))\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': video_id,\n 'formats': formats,\n }\n\n\nclass RedditRIE(InfoExtractor):\n _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\\.)?reddit\\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'\n _TESTS = [{\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'title': 'That small heart attack.',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'timestamp': 1501941939,\n 'upload_date': '20170805',\n 'uploader': 'Antw87',\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',\n 'only_matching': True,\n }, {\n # imgur\n 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # imgur @ old reddit\n 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # streamable\n 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',\n 'only_matching': True,\n }, {\n # youtube\n 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n url, video_id = mobj.group('url', 'id')\n\n video_id = self._match_id(url)\n\n data = self._download_json(\n url + '/.json', video_id)[0]['data']['children'][0]['data']\n\n video_url = data['url']\n\n # Avoid recursing into the same reddit URL\n if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:\n raise ExtractorError('No media found', expected=True)\n\n over_18 = data.get('over_18')\n if over_18 is True:\n age_limit = 18\n elif over_18 is False:\n age_limit = 0\n else:\n age_limit = None\n\n return {\n '_type': 'url_transparent',\n 'url': video_url,\n 'title': data.get('title'),\n 'thumbnail': data.get('thumbnail'),\n 'timestamp': float_or_none(data.get('created_utc')),\n 'uploader': data.get('author'),\n 'like_count': int_or_none(data.get('ups')),\n 'dislike_count': int_or_none(data.get('downs')),\n 'comment_count': int_or_none(data.get('num_comments')),\n 'age_limit': age_limit,\n }\n", "path": "youtube_dl/extractor/reddit.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n float_or_none,\n)\n\n\nclass RedditIE(InfoExtractor):\n _VALID_URL = r'https?://v\\.redd\\.it/(?P<id>[^/?#&]+)'\n _TEST = {\n # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/\n 'url': 'https://v.redd.it/zv89llsvexdz',\n 'md5': '0a070c53eba7ec4534d95a5a1259e253',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'title': 'zv89llsvexdz',\n },\n 'params': {\n 'format': 'bestvideo',\n },\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n formats = self._extract_m3u8_formats(\n 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,\n 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)\n\n formats.extend(self._extract_mpd_formats(\n 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,\n mpd_id='dash', fatal=False))\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': video_id,\n 'formats': formats,\n }\n\n\nclass RedditRIE(InfoExtractor):\n _VALID_URL = r'(?P<url>https?://(?:[^/]+\\.)?reddit\\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'\n _TESTS = [{\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'title': 'That small heart attack.',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'timestamp': 1501941939,\n 'upload_date': '20170805',\n 'uploader': 'Antw87',\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n 'params': {\n 'format': 'bestvideo',\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',\n 'only_matching': True,\n }, {\n # imgur\n 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # imgur @ old reddit\n 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # streamable\n 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',\n 'only_matching': True,\n }, {\n # youtube\n 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',\n 'only_matching': True,\n }, {\n # reddit video @ nm reddit\n 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n url, video_id = mobj.group('url', 'id')\n\n video_id = self._match_id(url)\n\n data = self._download_json(\n url + '/.json', video_id)[0]['data']['children'][0]['data']\n\n video_url = data['url']\n\n # Avoid recursing into the same reddit URL\n if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:\n raise ExtractorError('No media found', expected=True)\n\n over_18 = data.get('over_18')\n if over_18 is True:\n age_limit = 18\n elif over_18 is False:\n age_limit = 0\n else:\n age_limit = None\n\n return {\n '_type': 'url_transparent',\n 'url': video_url,\n 'title': data.get('title'),\n 'thumbnail': data.get('thumbnail'),\n 'timestamp': float_or_none(data.get('created_utc')),\n 'uploader': data.get('author'),\n 'like_count': int_or_none(data.get('ups')),\n 'dislike_count': int_or_none(data.get('downs')),\n 'comment_count': int_or_none(data.get('num_comments')),\n 'age_limit': age_limit,\n }\n", "path": "youtube_dl/extractor/reddit.py"}]}
2,929
326
gh_patches_debug_35109
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1823
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- B3 propagator package does not confirm to spec. According to the spec, `OTEL_PROPAGATORS` env var should support two types of B3 propagators, `b3` and `b3multi`. `b3` is supposed to inject a single header with `-` separate values. https://github.com/openzipkin/b3-propagation#single-header `b3multi` is supposed to inject one header per value. https://github.com/openzipkin/b3-propagation#multiple-headers Currently, we have a single implement at `opentelemetry.propgators.b3.B3Format` which actually is the multi-header implementation. This implementation is exposed as an propagator entry-point with the name `b3` instead of `b3multi`. This means setting `OTEL_PROPAGATORS=b3` will actually use a multi-header B3 propagator instead of single header while as setting `OTEL_PROPAGATORS=b3multi` will raise an exception. IMO, this is a bug and should be fixed as follows: 1. Add two new propagators called `B3SingleFormat` and `B3MultiFormat`. 2. Deprecate the existing `B3Format` propagator without changing behavior. Deprecation notice should recommend using `B3MultiFormat`. 3. Change `b3` entrypoint to point to `B3SingleFormat` implementation (*breaking behavioral change*). 4. Add a new `b3multi` entrypoint to point to `B3MultiFormat` implementation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import typing 16 from re import compile as re_compile 17 18 import opentelemetry.trace as trace 19 from opentelemetry.context import Context 20 from opentelemetry.propagators.textmap import ( 21 CarrierT, 22 Getter, 23 Setter, 24 TextMapPropagator, 25 default_getter, 26 default_setter, 27 ) 28 from opentelemetry.trace import format_span_id, format_trace_id 29 30 31 class B3Format(TextMapPropagator): 32 """Propagator for the B3 HTTP header format. 33 34 See: https://github.com/openzipkin/b3-propagation 35 """ 36 37 SINGLE_HEADER_KEY = "b3" 38 TRACE_ID_KEY = "x-b3-traceid" 39 SPAN_ID_KEY = "x-b3-spanid" 40 PARENT_SPAN_ID_KEY = "x-b3-parentspanid" 41 SAMPLED_KEY = "x-b3-sampled" 42 FLAGS_KEY = "x-b3-flags" 43 _SAMPLE_PROPAGATE_VALUES = set(["1", "True", "true", "d"]) 44 _trace_id_regex = re_compile(r"[\da-fA-F]{16}|[\da-fA-F]{32}") 45 _span_id_regex = re_compile(r"[\da-fA-F]{16}") 46 47 def extract( 48 self, 49 carrier: CarrierT, 50 context: typing.Optional[Context] = None, 51 getter: Getter = default_getter, 52 ) -> Context: 53 if context is None: 54 context = Context() 55 trace_id = trace.INVALID_TRACE_ID 56 span_id = trace.INVALID_SPAN_ID 57 sampled = "0" 58 flags = None 59 60 single_header = _extract_first_element( 61 getter.get(carrier, self.SINGLE_HEADER_KEY) 62 ) 63 if single_header: 64 # The b3 spec calls for the sampling state to be 65 # "deferred", which is unspecified. This concept does not 66 # translate to SpanContext, so we set it as recorded. 67 sampled = "1" 68 fields = single_header.split("-", 4) 69 70 if len(fields) == 1: 71 sampled = fields[0] 72 elif len(fields) == 2: 73 trace_id, span_id = fields 74 elif len(fields) == 3: 75 trace_id, span_id, sampled = fields 76 elif len(fields) == 4: 77 trace_id, span_id, sampled, _ = fields 78 else: 79 trace_id = ( 80 _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY)) 81 or trace_id 82 ) 83 span_id = ( 84 _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY)) 85 or span_id 86 ) 87 sampled = ( 88 _extract_first_element(getter.get(carrier, self.SAMPLED_KEY)) 89 or sampled 90 ) 91 flags = ( 92 _extract_first_element(getter.get(carrier, self.FLAGS_KEY)) 93 or flags 94 ) 95 96 if ( 97 trace_id == trace.INVALID_TRACE_ID 98 or span_id == trace.INVALID_SPAN_ID 99 or self._trace_id_regex.fullmatch(trace_id) is None 100 or self._span_id_regex.fullmatch(span_id) is None 101 ): 102 return context 103 104 trace_id = int(trace_id, 16) 105 span_id = int(span_id, 16) 106 options = 0 107 # The b3 spec provides no defined behavior for both sample and 108 # flag values set. Since the setting of at least one implies 109 # the desire for some form of sampling, propagate if either 110 # header is set to allow. 111 if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == "1": 112 options |= trace.TraceFlags.SAMPLED 113 114 return trace.set_span_in_context( 115 trace.NonRecordingSpan( 116 trace.SpanContext( 117 # trace an span ids are encoded in hex, so must be converted 118 trace_id=trace_id, 119 span_id=span_id, 120 is_remote=True, 121 trace_flags=trace.TraceFlags(options), 122 trace_state=trace.TraceState(), 123 ) 124 ), 125 context, 126 ) 127 128 def inject( 129 self, 130 carrier: CarrierT, 131 context: typing.Optional[Context] = None, 132 setter: Setter = default_setter, 133 ) -> None: 134 span = trace.get_current_span(context=context) 135 136 span_context = span.get_span_context() 137 if span_context == trace.INVALID_SPAN_CONTEXT: 138 return 139 140 sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0 141 setter.set( 142 carrier, 143 self.TRACE_ID_KEY, 144 format_trace_id(span_context.trace_id), 145 ) 146 setter.set( 147 carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id) 148 ) 149 span_parent = getattr(span, "parent", None) 150 if span_parent is not None: 151 setter.set( 152 carrier, 153 self.PARENT_SPAN_ID_KEY, 154 format_span_id(span_parent.span_id), 155 ) 156 setter.set(carrier, self.SAMPLED_KEY, "1" if sampled else "0") 157 158 @property 159 def fields(self) -> typing.Set[str]: 160 return { 161 self.TRACE_ID_KEY, 162 self.SPAN_ID_KEY, 163 self.PARENT_SPAN_ID_KEY, 164 self.SAMPLED_KEY, 165 } 166 167 168 def _extract_first_element( 169 items: typing.Iterable[CarrierT], 170 ) -> typing.Optional[CarrierT]: 171 if items is None: 172 return None 173 return next(iter(items), None) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py --- a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py +++ b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py @@ -15,6 +15,8 @@ import typing from re import compile as re_compile +from deprecated import deprecated + import opentelemetry.trace as trace from opentelemetry.context import Context from opentelemetry.propagators.textmap import ( @@ -28,10 +30,11 @@ from opentelemetry.trace import format_span_id, format_trace_id -class B3Format(TextMapPropagator): - """Propagator for the B3 HTTP header format. +class B3MultiFormat(TextMapPropagator): + """Propagator for the B3 HTTP multi-header format. See: https://github.com/openzipkin/b3-propagation + https://github.com/openzipkin/b3-propagation#multiple-headers """ SINGLE_HEADER_KEY = "b3" @@ -165,6 +168,53 @@ } +class B3SingleFormat(B3MultiFormat): + """Propagator for the B3 HTTP single-header format. + + See: https://github.com/openzipkin/b3-propagation + https://github.com/openzipkin/b3-propagation#single-header + """ + + def inject( + self, + carrier: CarrierT, + context: typing.Optional[Context] = None, + setter: Setter = default_setter, + ) -> None: + span = trace.get_current_span(context=context) + + span_context = span.get_span_context() + if span_context == trace.INVALID_SPAN_CONTEXT: + return + + sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0 + + fields = [ + format_trace_id(span_context.trace_id), + format_span_id(span_context.span_id), + "1" if sampled else "0", + ] + + span_parent = getattr(span, "parent", None) + if span_parent: + fields.append(format_span_id(span_parent.span_id)) + + setter.set(carrier, self.SINGLE_HEADER_KEY, "-".join(fields)) + + @property + def fields(self) -> typing.Set[str]: + return {self.SINGLE_HEADER_KEY} + + +class B3Format(B3MultiFormat): + @deprecated( + version="1.2.0", + reason="B3Format is deprecated in favor of B3MultiFormat", + ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _extract_first_element( items: typing.Iterable[CarrierT], ) -> typing.Optional[CarrierT]:
{"golden_diff": "diff --git a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n--- a/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n+++ b/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py\n@@ -15,6 +15,8 @@\n import typing\n from re import compile as re_compile\n \n+from deprecated import deprecated\n+\n import opentelemetry.trace as trace\n from opentelemetry.context import Context\n from opentelemetry.propagators.textmap import (\n@@ -28,10 +30,11 @@\n from opentelemetry.trace import format_span_id, format_trace_id\n \n \n-class B3Format(TextMapPropagator):\n- \"\"\"Propagator for the B3 HTTP header format.\n+class B3MultiFormat(TextMapPropagator):\n+ \"\"\"Propagator for the B3 HTTP multi-header format.\n \n See: https://github.com/openzipkin/b3-propagation\n+ https://github.com/openzipkin/b3-propagation#multiple-headers\n \"\"\"\n \n SINGLE_HEADER_KEY = \"b3\"\n@@ -165,6 +168,53 @@\n }\n \n \n+class B3SingleFormat(B3MultiFormat):\n+ \"\"\"Propagator for the B3 HTTP single-header format.\n+\n+ See: https://github.com/openzipkin/b3-propagation\n+ https://github.com/openzipkin/b3-propagation#single-header\n+ \"\"\"\n+\n+ def inject(\n+ self,\n+ carrier: CarrierT,\n+ context: typing.Optional[Context] = None,\n+ setter: Setter = default_setter,\n+ ) -> None:\n+ span = trace.get_current_span(context=context)\n+\n+ span_context = span.get_span_context()\n+ if span_context == trace.INVALID_SPAN_CONTEXT:\n+ return\n+\n+ sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n+\n+ fields = [\n+ format_trace_id(span_context.trace_id),\n+ format_span_id(span_context.span_id),\n+ \"1\" if sampled else \"0\",\n+ ]\n+\n+ span_parent = getattr(span, \"parent\", None)\n+ if span_parent:\n+ fields.append(format_span_id(span_parent.span_id))\n+\n+ setter.set(carrier, self.SINGLE_HEADER_KEY, \"-\".join(fields))\n+\n+ @property\n+ def fields(self) -> typing.Set[str]:\n+ return {self.SINGLE_HEADER_KEY}\n+\n+\n+class B3Format(B3MultiFormat):\n+ @deprecated(\n+ version=\"1.2.0\",\n+ reason=\"B3Format is deprecated in favor of B3MultiFormat\",\n+ )\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+\n+\n def _extract_first_element(\n items: typing.Iterable[CarrierT],\n ) -> typing.Optional[CarrierT]:\n", "issue": "B3 propagator package does not confirm to spec.\nAccording to the spec, `OTEL_PROPAGATORS` env var should support two types of B3 propagators, `b3` and `b3multi`. \r\n\r\n`b3` is supposed to inject a single header with `-` separate values. https://github.com/openzipkin/b3-propagation#single-header\r\n`b3multi` is supposed to inject one header per value. https://github.com/openzipkin/b3-propagation#multiple-headers\r\n\r\nCurrently, we have a single implement at `opentelemetry.propgators.b3.B3Format` which actually is the multi-header implementation. \r\n\r\nThis implementation is exposed as an propagator entry-point with the name `b3` instead of `b3multi`.\r\n\r\nThis means setting `OTEL_PROPAGATORS=b3` will actually use a multi-header B3 propagator instead of single header while as setting `OTEL_PROPAGATORS=b3multi` will raise an exception.\r\n\r\nIMO, this is a bug and should be fixed as follows:\r\n\r\n1. Add two new propagators called `B3SingleFormat` and `B3MultiFormat`.\r\n2. Deprecate the existing `B3Format` propagator without changing behavior. Deprecation notice should recommend using `B3MultiFormat`.\r\n3. Change `b3` entrypoint to point to `B3SingleFormat` implementation (*breaking behavioral change*).\r\n4. Add a new `b3multi` entrypoint to point to `B3MultiFormat` implementation.\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\nfrom re import compile as re_compile\n\nimport opentelemetry.trace as trace\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n CarrierT,\n Getter,\n Setter,\n TextMapPropagator,\n default_getter,\n default_setter,\n)\nfrom opentelemetry.trace import format_span_id, format_trace_id\n\n\nclass B3Format(TextMapPropagator):\n \"\"\"Propagator for the B3 HTTP header format.\n\n See: https://github.com/openzipkin/b3-propagation\n \"\"\"\n\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n PARENT_SPAN_ID_KEY = \"x-b3-parentspanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n _trace_id_regex = re_compile(r\"[\\da-fA-F]{16}|[\\da-fA-F]{32}\")\n _span_id_regex = re_compile(r\"[\\da-fA-F]{16}\")\n\n def extract(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n getter: Getter = default_getter,\n ) -> Context:\n if context is None:\n context = Context()\n trace_id = trace.INVALID_TRACE_ID\n span_id = trace.INVALID_SPAN_ID\n sampled = \"0\"\n flags = None\n\n single_header = _extract_first_element(\n getter.get(carrier, self.SINGLE_HEADER_KEY)\n )\n if single_header:\n # The b3 spec calls for the sampling state to be\n # \"deferred\", which is unspecified. This concept does not\n # translate to SpanContext, so we set it as recorded.\n sampled = \"1\"\n fields = single_header.split(\"-\", 4)\n\n if len(fields) == 1:\n sampled = fields[0]\n elif len(fields) == 2:\n trace_id, span_id = fields\n elif len(fields) == 3:\n trace_id, span_id, sampled = fields\n elif len(fields) == 4:\n trace_id, span_id, sampled, _ = fields\n else:\n trace_id = (\n _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY))\n or trace_id\n )\n span_id = (\n _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY))\n or span_id\n )\n sampled = (\n _extract_first_element(getter.get(carrier, self.SAMPLED_KEY))\n or sampled\n )\n flags = (\n _extract_first_element(getter.get(carrier, self.FLAGS_KEY))\n or flags\n )\n\n if (\n trace_id == trace.INVALID_TRACE_ID\n or span_id == trace.INVALID_SPAN_ID\n or self._trace_id_regex.fullmatch(trace_id) is None\n or self._span_id_regex.fullmatch(span_id) is None\n ):\n return context\n\n trace_id = int(trace_id, 16)\n span_id = int(span_id, 16)\n options = 0\n # The b3 spec provides no defined behavior for both sample and\n # flag values set. Since the setting of at least one implies\n # the desire for some form of sampling, propagate if either\n # header is set to allow.\n if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == \"1\":\n options |= trace.TraceFlags.SAMPLED\n\n return trace.set_span_in_context(\n trace.NonRecordingSpan(\n trace.SpanContext(\n # trace an span ids are encoded in hex, so must be converted\n trace_id=trace_id,\n span_id=span_id,\n is_remote=True,\n trace_flags=trace.TraceFlags(options),\n trace_state=trace.TraceState(),\n )\n ),\n context,\n )\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n setter.set(\n carrier,\n self.TRACE_ID_KEY,\n format_trace_id(span_context.trace_id),\n )\n setter.set(\n carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)\n )\n span_parent = getattr(span, \"parent\", None)\n if span_parent is not None:\n setter.set(\n carrier,\n self.PARENT_SPAN_ID_KEY,\n format_span_id(span_parent.span_id),\n )\n setter.set(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n\n @property\n def fields(self) -> typing.Set[str]:\n return {\n self.TRACE_ID_KEY,\n self.SPAN_ID_KEY,\n self.PARENT_SPAN_ID_KEY,\n self.SAMPLED_KEY,\n }\n\n\ndef _extract_first_element(\n items: typing.Iterable[CarrierT],\n) -> typing.Optional[CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\nfrom re import compile as re_compile\n\nfrom deprecated import deprecated\n\nimport opentelemetry.trace as trace\nfrom opentelemetry.context import Context\nfrom opentelemetry.propagators.textmap import (\n CarrierT,\n Getter,\n Setter,\n TextMapPropagator,\n default_getter,\n default_setter,\n)\nfrom opentelemetry.trace import format_span_id, format_trace_id\n\n\nclass B3MultiFormat(TextMapPropagator):\n \"\"\"Propagator for the B3 HTTP multi-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#multiple-headers\n \"\"\"\n\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n PARENT_SPAN_ID_KEY = \"x-b3-parentspanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n _trace_id_regex = re_compile(r\"[\\da-fA-F]{16}|[\\da-fA-F]{32}\")\n _span_id_regex = re_compile(r\"[\\da-fA-F]{16}\")\n\n def extract(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n getter: Getter = default_getter,\n ) -> Context:\n if context is None:\n context = Context()\n trace_id = trace.INVALID_TRACE_ID\n span_id = trace.INVALID_SPAN_ID\n sampled = \"0\"\n flags = None\n\n single_header = _extract_first_element(\n getter.get(carrier, self.SINGLE_HEADER_KEY)\n )\n if single_header:\n # The b3 spec calls for the sampling state to be\n # \"deferred\", which is unspecified. This concept does not\n # translate to SpanContext, so we set it as recorded.\n sampled = \"1\"\n fields = single_header.split(\"-\", 4)\n\n if len(fields) == 1:\n sampled = fields[0]\n elif len(fields) == 2:\n trace_id, span_id = fields\n elif len(fields) == 3:\n trace_id, span_id, sampled = fields\n elif len(fields) == 4:\n trace_id, span_id, sampled, _ = fields\n else:\n trace_id = (\n _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY))\n or trace_id\n )\n span_id = (\n _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY))\n or span_id\n )\n sampled = (\n _extract_first_element(getter.get(carrier, self.SAMPLED_KEY))\n or sampled\n )\n flags = (\n _extract_first_element(getter.get(carrier, self.FLAGS_KEY))\n or flags\n )\n\n if (\n trace_id == trace.INVALID_TRACE_ID\n or span_id == trace.INVALID_SPAN_ID\n or self._trace_id_regex.fullmatch(trace_id) is None\n or self._span_id_regex.fullmatch(span_id) is None\n ):\n return context\n\n trace_id = int(trace_id, 16)\n span_id = int(span_id, 16)\n options = 0\n # The b3 spec provides no defined behavior for both sample and\n # flag values set. Since the setting of at least one implies\n # the desire for some form of sampling, propagate if either\n # header is set to allow.\n if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == \"1\":\n options |= trace.TraceFlags.SAMPLED\n\n return trace.set_span_in_context(\n trace.NonRecordingSpan(\n trace.SpanContext(\n # trace an span ids are encoded in hex, so must be converted\n trace_id=trace_id,\n span_id=span_id,\n is_remote=True,\n trace_flags=trace.TraceFlags(options),\n trace_state=trace.TraceState(),\n )\n ),\n context,\n )\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n setter.set(\n carrier,\n self.TRACE_ID_KEY,\n format_trace_id(span_context.trace_id),\n )\n setter.set(\n carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id)\n )\n span_parent = getattr(span, \"parent\", None)\n if span_parent is not None:\n setter.set(\n carrier,\n self.PARENT_SPAN_ID_KEY,\n format_span_id(span_parent.span_id),\n )\n setter.set(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n\n @property\n def fields(self) -> typing.Set[str]:\n return {\n self.TRACE_ID_KEY,\n self.SPAN_ID_KEY,\n self.PARENT_SPAN_ID_KEY,\n self.SAMPLED_KEY,\n }\n\n\nclass B3SingleFormat(B3MultiFormat):\n \"\"\"Propagator for the B3 HTTP single-header format.\n\n See: https://github.com/openzipkin/b3-propagation\n https://github.com/openzipkin/b3-propagation#single-header\n \"\"\"\n\n def inject(\n self,\n carrier: CarrierT,\n context: typing.Optional[Context] = None,\n setter: Setter = default_setter,\n ) -> None:\n span = trace.get_current_span(context=context)\n\n span_context = span.get_span_context()\n if span_context == trace.INVALID_SPAN_CONTEXT:\n return\n\n sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0\n\n fields = [\n format_trace_id(span_context.trace_id),\n format_span_id(span_context.span_id),\n \"1\" if sampled else \"0\",\n ]\n\n span_parent = getattr(span, \"parent\", None)\n if span_parent:\n fields.append(format_span_id(span_parent.span_id))\n\n setter.set(carrier, self.SINGLE_HEADER_KEY, \"-\".join(fields))\n\n @property\n def fields(self) -> typing.Set[str]:\n return {self.SINGLE_HEADER_KEY}\n\n\nclass B3Format(B3MultiFormat):\n @deprecated(\n version=\"1.2.0\",\n reason=\"B3Format is deprecated in favor of B3MultiFormat\",\n )\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef _extract_first_element(\n items: typing.Iterable[CarrierT],\n) -> typing.Optional[CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/__init__.py"}]}
2,327
701
gh_patches_debug_19018
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-512
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Django log levels are ignored by core agent No matter what I did to the Django log levels, I kept getting this message when running Django management tasks: ``` [2020-04-02T09:36:06][core_agent][INFO] Initializing logger with log level: Info ``` This is super annoying, because this causes programs in my crontab to start sending out lots of e-mails because previously quiet tasks would now start producing output. After a lot of trial and error, I figured out that you can set `SCOUT_LOG_LEVEL` in Django settings to suppress this message. This really should be fixed so the agent also honors the Django logging settings, or at least documented under the logging section in the Python documentation. It's really non-obvious that this the agent is a separate thing which ignores the Django logging settings. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/core/core_agent_manager.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import hashlib 5 import json 6 import logging 7 import os 8 import subprocess 9 import tarfile 10 import time 11 12 from urllib3.exceptions import HTTPError 13 14 from scout_apm.compat import urllib3_cert_pool_manager 15 from scout_apm.core.config import scout_config 16 17 logger = logging.getLogger(__name__) 18 19 20 class CoreAgentManager(object): 21 def __init__(self): 22 self.core_agent_bin_path = None 23 self.core_agent_bin_version = None 24 self.core_agent_dir = "{}/{}".format( 25 scout_config.value("core_agent_dir"), 26 scout_config.value("core_agent_full_name"), 27 ) 28 self.downloader = CoreAgentDownloader( 29 self.core_agent_dir, scout_config.value("core_agent_full_name") 30 ) 31 32 def launch(self): 33 if not scout_config.value("core_agent_launch"): 34 logger.debug( 35 "Not attempting to launch Core Agent " 36 "due to 'core_agent_launch' setting." 37 ) 38 return False 39 40 if not self.verify(): 41 if not scout_config.value("core_agent_download"): 42 logger.debug( 43 "Not attempting to download Core Agent due " 44 "to 'core_agent_download' setting." 45 ) 46 return False 47 48 self.download() 49 50 if not self.verify(): 51 logger.debug("Failed to verify Core Agent. Not launching Core Agent.") 52 return False 53 54 return self.run() 55 56 def download(self): 57 self.downloader.download() 58 59 def run(self): 60 try: 61 subprocess.check_call( 62 ( 63 self.agent_binary() 64 + self.daemonize_flag() 65 + self.log_level() 66 + self.log_file() 67 + self.config_file() 68 + self.socket_path() 69 ), 70 close_fds=True, 71 ) 72 except Exception: 73 # TODO detect failure of launch properly 74 logger.exception("Error running Core Agent") 75 return False 76 return True 77 78 def agent_binary(self): 79 return [self.core_agent_bin_path, "start"] 80 81 def daemonize_flag(self): 82 return ["--daemonize", "true"] 83 84 def socket_path(self): 85 # Old deprecated name "socket_path" 86 socket_path = scout_config.value("socket_path") 87 if socket_path is None: 88 socket_path = scout_config.value("core_agent_socket_path") 89 return ["--socket", socket_path] 90 91 def log_level(self): 92 # Old deprecated name "log_level" 93 log_level = scout_config.value("log_level") 94 if log_level is None: 95 log_level = scout_config.value("core_agent_log_level") 96 return ["--log-level", log_level] 97 98 def log_file(self): 99 # Old deprecated name "log_file" 100 path = scout_config.value("log_file") 101 if path is None: 102 path = scout_config.value("core_agent_log_file") 103 104 if path is not None: 105 return ["--log-file", path] 106 else: 107 return [] 108 109 def config_file(self): 110 # Old deprecated name "config_file" 111 path = scout_config.value("config_file") 112 if path is None: 113 path = scout_config.value("core_agent_config_file") 114 115 if path is not None: 116 return ["--config-file", path] 117 else: 118 return [] 119 120 def verify(self): 121 manifest = CoreAgentManifest(self.core_agent_dir + "/manifest.json") 122 if not manifest.is_valid(): 123 logger.debug( 124 "Core Agent verification failed: CoreAgentManifest is not valid." 125 ) 126 self.core_agent_bin_path = None 127 self.core_agent_bin_version = None 128 return False 129 130 bin_path = self.core_agent_dir + "/" + manifest.bin_name 131 if sha256_digest(bin_path) == manifest.sha256: 132 self.core_agent_bin_path = bin_path 133 self.core_agent_bin_version = manifest.bin_version 134 return True 135 else: 136 logger.debug("Core Agent verification failed: SHA mismatch.") 137 self.core_agent_bin_path = None 138 self.core_agent_bin_version = None 139 return False 140 141 142 class CoreAgentDownloader(object): 143 def __init__(self, download_destination, core_agent_full_name): 144 self.stale_download_secs = 120 145 self.destination = download_destination 146 self.core_agent_full_name = core_agent_full_name 147 self.package_location = self.destination + "/{}.tgz".format( 148 self.core_agent_full_name 149 ) 150 self.download_lock_path = self.destination + "/download.lock" 151 self.download_lock_fd = None 152 153 def download(self): 154 self.create_core_agent_dir() 155 self.obtain_download_lock() 156 if self.download_lock_fd is not None: 157 try: 158 downloaded = self.download_package() 159 if downloaded: 160 self.untar() 161 except (OSError, HTTPError): 162 logger.exception("Exception raised while downloading Core Agent") 163 finally: 164 self.release_download_lock() 165 166 def create_core_agent_dir(self): 167 try: 168 os.makedirs(self.destination, scout_config.core_agent_permissions()) 169 except OSError: 170 pass 171 172 def obtain_download_lock(self): 173 self.clean_stale_download_lock() 174 try: 175 self.download_lock_fd = os.open( 176 self.download_lock_path, 177 os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK, 178 ) 179 except OSError as exc: 180 logger.debug( 181 "Could not obtain download lock on %s", 182 self.download_lock_path, 183 exc_info=exc, 184 ) 185 self.download_lock_fd = None 186 187 def clean_stale_download_lock(self): 188 try: 189 delta = time.time() - os.stat(self.download_lock_path).st_ctime 190 if delta > self.stale_download_secs: 191 logger.debug("Clearing stale download lock file.") 192 os.unlink(self.download_lock_path) 193 except OSError: 194 pass 195 196 def release_download_lock(self): 197 if self.download_lock_fd is not None: 198 os.unlink(self.download_lock_path) 199 os.close(self.download_lock_fd) 200 201 def download_package(self): 202 full_url = self.full_url() 203 logger.debug("Downloading: %s to %s", full_url, self.package_location) 204 http = urllib3_cert_pool_manager() 205 response = http.request( 206 "GET", full_url, preload_content=False, timeout=10.0, retries=3 207 ) 208 try: 209 if response.status != 200: 210 return False 211 with open(self.package_location, "wb") as fp: 212 for chunk in response.stream(): 213 fp.write(chunk) 214 finally: 215 response.release_conn() 216 return True 217 218 def untar(self): 219 t = tarfile.open(self.package_location, "r") 220 t.extractall(self.destination) 221 222 def full_url(self): 223 return "{root_url}/{core_agent_full_name}.tgz".format( 224 root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name 225 ) 226 227 def root_url(self): 228 return scout_config.value("download_url") 229 230 231 class CoreAgentManifest(object): 232 def __init__(self, path): 233 self.manifest_path = path 234 self.bin_name = None 235 self.bin_version = None 236 self.sha256 = None 237 self.valid = False 238 try: 239 self.parse() 240 # noqa for this issue: https://github.com/PyCQA/flake8-bugbear/issues/110 241 except (ValueError, TypeError, OSError, IOError) as exc: # noqa: B014 242 logger.debug("Error parsing Core Agent Manifest", exc_info=exc) 243 244 def parse(self): 245 logger.debug("Parsing Core Agent manifest path: %s", self.manifest_path) 246 with open(self.manifest_path) as manifest_file: 247 self.raw = manifest_file.read() 248 self.json = json.loads(self.raw) 249 self.version = self.json["version"] 250 self.bin_version = self.json["core_agent_version"] 251 self.bin_name = self.json["core_agent_binary"] 252 self.sha256 = self.json["core_agent_binary_sha256"] 253 self.valid = True 254 logger.debug("Core Agent manifest json: %s", self.json) 255 256 def is_valid(self): 257 return self.valid 258 259 260 def sha256_digest(filename, block_size=65536): 261 try: 262 sha256 = hashlib.sha256() 263 with open(filename, "rb") as f: 264 for block in iter(lambda: f.read(block_size), b""): 265 sha256.update(block) 266 return sha256.hexdigest() 267 except OSError as exc: 268 logger.debug("Error on digest", exc_info=exc) 269 return None 270 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py --- a/src/scout_apm/core/core_agent_manager.py +++ b/src/scout_apm/core/core_agent_manager.py @@ -58,17 +58,19 @@ def run(self): try: - subprocess.check_call( - ( - self.agent_binary() - + self.daemonize_flag() - + self.log_level() - + self.log_file() - + self.config_file() - + self.socket_path() - ), - close_fds=True, - ) + with open(os.devnull) as devnull: + subprocess.check_call( + ( + self.agent_binary() + + self.daemonize_flag() + + self.log_level() + + self.log_file() + + self.config_file() + + self.socket_path() + ), + close_fds=True, + stdout=devnull, + ) except Exception: # TODO detect failure of launch properly logger.exception("Error running Core Agent")
{"golden_diff": "diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py\n--- a/src/scout_apm/core/core_agent_manager.py\n+++ b/src/scout_apm/core/core_agent_manager.py\n@@ -58,17 +58,19 @@\n \n def run(self):\n try:\n- subprocess.check_call(\n- (\n- self.agent_binary()\n- + self.daemonize_flag()\n- + self.log_level()\n- + self.log_file()\n- + self.config_file()\n- + self.socket_path()\n- ),\n- close_fds=True,\n- )\n+ with open(os.devnull) as devnull:\n+ subprocess.check_call(\n+ (\n+ self.agent_binary()\n+ + self.daemonize_flag()\n+ + self.log_level()\n+ + self.log_file()\n+ + self.config_file()\n+ + self.socket_path()\n+ ),\n+ close_fds=True,\n+ stdout=devnull,\n+ )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n", "issue": "Django log levels are ignored by core agent\nNo matter what I did to the Django log levels, I kept getting this message when running Django management tasks:\r\n\r\n```\r\n[2020-04-02T09:36:06][core_agent][INFO] Initializing logger with log level: Info\r\n```\r\n\r\nThis is super annoying, because this causes programs in my crontab to start sending out lots of e-mails because previously quiet tasks would now start producing output.\r\n\r\nAfter a lot of trial and error, I figured out that you can set `SCOUT_LOG_LEVEL` in Django settings to suppress this message.\r\n\r\nThis really should be fixed so the agent also honors the Django logging settings, or at least documented under the logging section in the Python documentation. It's really non-obvious that this the agent is a separate thing which ignores the Django logging settings.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\n\nfrom urllib3.exceptions import HTTPError\n\nfrom scout_apm.compat import urllib3_cert_pool_manager\nfrom scout_apm.core.config import scout_config\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n scout_config.value(\"core_agent_dir\"),\n scout_config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir, scout_config.value(\"core_agent_full_name\")\n )\n\n def launch(self):\n if not scout_config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not scout_config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n subprocess.check_call(\n (\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n ),\n close_fds=True,\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n # Old deprecated name \"socket_path\"\n socket_path = scout_config.value(\"socket_path\")\n if socket_path is None:\n socket_path = scout_config.value(\"core_agent_socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n # Old deprecated name \"log_level\"\n log_level = scout_config.value(\"log_level\")\n if log_level is None:\n log_level = scout_config.value(\"core_agent_log_level\")\n return [\"--log-level\", log_level]\n\n def log_file(self):\n # Old deprecated name \"log_file\"\n path = scout_config.value(\"log_file\")\n if path is None:\n path = scout_config.value(\"core_agent_log_file\")\n\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n # Old deprecated name \"config_file\"\n path = scout_config.value(\"config_file\")\n if path is None:\n path = scout_config.value(\"core_agent_config_file\")\n\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n downloaded = self.download_package()\n if downloaded:\n self.untar()\n except (OSError, HTTPError):\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(self.destination, scout_config.core_agent_permissions())\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as exc:\n logger.debug(\n \"Could not obtain download lock on %s\",\n self.download_lock_path,\n exc_info=exc,\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n full_url = self.full_url()\n logger.debug(\"Downloading: %s to %s\", full_url, self.package_location)\n http = urllib3_cert_pool_manager()\n response = http.request(\n \"GET\", full_url, preload_content=False, timeout=10.0, retries=3\n )\n try:\n if response.status != 200:\n return False\n with open(self.package_location, \"wb\") as fp:\n for chunk in response.stream():\n fp.write(chunk)\n finally:\n response.release_conn()\n return True\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return scout_config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n # noqa for this issue: https://github.com/PyCQA/flake8-bugbear/issues/110\n except (ValueError, TypeError, OSError, IOError) as exc: # noqa: B014\n logger.debug(\"Error parsing Core Agent Manifest\", exc_info=exc)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as exc:\n logger.debug(\"Error on digest\", exc_info=exc)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\n\nfrom urllib3.exceptions import HTTPError\n\nfrom scout_apm.compat import urllib3_cert_pool_manager\nfrom scout_apm.core.config import scout_config\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n scout_config.value(\"core_agent_dir\"),\n scout_config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir, scout_config.value(\"core_agent_full_name\")\n )\n\n def launch(self):\n if not scout_config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not scout_config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n with open(os.devnull) as devnull:\n subprocess.check_call(\n (\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n ),\n close_fds=True,\n stdout=devnull,\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n # Old deprecated name \"socket_path\"\n socket_path = scout_config.value(\"socket_path\")\n if socket_path is None:\n socket_path = scout_config.value(\"core_agent_socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n # Old deprecated name \"log_level\"\n log_level = scout_config.value(\"log_level\")\n if log_level is None:\n log_level = scout_config.value(\"core_agent_log_level\")\n return [\"--log-level\", log_level]\n\n def log_file(self):\n # Old deprecated name \"log_file\"\n path = scout_config.value(\"log_file\")\n if path is None:\n path = scout_config.value(\"core_agent_log_file\")\n\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n # Old deprecated name \"config_file\"\n path = scout_config.value(\"config_file\")\n if path is None:\n path = scout_config.value(\"core_agent_config_file\")\n\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n downloaded = self.download_package()\n if downloaded:\n self.untar()\n except (OSError, HTTPError):\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(self.destination, scout_config.core_agent_permissions())\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as exc:\n logger.debug(\n \"Could not obtain download lock on %s\",\n self.download_lock_path,\n exc_info=exc,\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n full_url = self.full_url()\n logger.debug(\"Downloading: %s to %s\", full_url, self.package_location)\n http = urllib3_cert_pool_manager()\n response = http.request(\n \"GET\", full_url, preload_content=False, timeout=10.0, retries=3\n )\n try:\n if response.status != 200:\n return False\n with open(self.package_location, \"wb\") as fp:\n for chunk in response.stream():\n fp.write(chunk)\n finally:\n response.release_conn()\n return True\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return scout_config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n # noqa for this issue: https://github.com/PyCQA/flake8-bugbear/issues/110\n except (ValueError, TypeError, OSError, IOError) as exc: # noqa: B014\n logger.debug(\"Error parsing Core Agent Manifest\", exc_info=exc)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as exc:\n logger.debug(\"Error on digest\", exc_info=exc)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}]}
2,990
244
gh_patches_debug_32234
rasdani/github-patches
git_diff
modin-project__modin-2252
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [REFACTOR]: remove dead code in modin/pandas/index folder <!-- General questions should be asked on the mailing list modin-dev@googlegroups.com. Before submitting an issue, please fill out the following form. --> ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: - **Modin installed from (source or binary)**: - **Modin version**: - **Python version**: - **Exact command to reproduce**: <!-- You can obtain the Modin version with python -c "import modin; print(modin.__version__)" --> ### Describe the problem <!-- Describe the problem clearly here. --> ### Source code / logs <!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modin/pandas/index/partitioned_index.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 15 class PartitionedIndex(object): 16 17 _index_lengths_cache = None 18 19 def _get_partition_lengths(self): 20 if self._index_lengths_cache is None: 21 self._index_lengths_cache = [ 22 obj.apply(len).get() for obj in self.index_partitions[:0] 23 ] 24 return self._index_lengths_cache 25 26 def _set_partition_lengths(self, new_value): 27 self._partition_length_cache = new_value 28 29 index_lengths = property(_get_partition_lengths, _set_partition_lengths) 30 31 def __getitem__(self, key): 32 cls = type(self) 33 return cls(self.index_partitions[key]) 34 35 36 class RayPartitionedIndex(PartitionedIndex): 37 def __init__(self, index_partitions): 38 self.index_partitions = index_partitions 39 ``` Path: `modin/pandas/index/__init__.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py deleted file mode 100644 --- a/modin/pandas/index/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Licensed to Modin Development Team under one or more contributor license agreements. -# See the NOTICE file distributed with this work for additional information regarding -# copyright ownership. The Modin Development Team licenses this file to you under the -# Apache License, Version 2.0 (the "License"); you may not use this file except in -# compliance with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. diff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py deleted file mode 100644 --- a/modin/pandas/index/partitioned_index.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to Modin Development Team under one or more contributor license agreements. -# See the NOTICE file distributed with this work for additional information regarding -# copyright ownership. The Modin Development Team licenses this file to you under the -# Apache License, Version 2.0 (the "License"); you may not use this file except in -# compliance with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. - - -class PartitionedIndex(object): - - _index_lengths_cache = None - - def _get_partition_lengths(self): - if self._index_lengths_cache is None: - self._index_lengths_cache = [ - obj.apply(len).get() for obj in self.index_partitions[:0] - ] - return self._index_lengths_cache - - def _set_partition_lengths(self, new_value): - self._partition_length_cache = new_value - - index_lengths = property(_get_partition_lengths, _set_partition_lengths) - - def __getitem__(self, key): - cls = type(self) - return cls(self.index_partitions[key]) - - -class RayPartitionedIndex(PartitionedIndex): - def __init__(self, index_partitions): - self.index_partitions = index_partitions
{"golden_diff": "diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py\ndeleted file mode 100644\n--- a/modin/pandas/index/__init__.py\n+++ /dev/null\n@@ -1,12 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\ndiff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py\ndeleted file mode 100644\n--- a/modin/pandas/index/partitioned_index.py\n+++ /dev/null\n@@ -1,38 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\n-\n-\n-class PartitionedIndex(object):\n-\n- _index_lengths_cache = None\n-\n- def _get_partition_lengths(self):\n- if self._index_lengths_cache is None:\n- self._index_lengths_cache = [\n- obj.apply(len).get() for obj in self.index_partitions[:0]\n- ]\n- return self._index_lengths_cache\n-\n- def _set_partition_lengths(self, new_value):\n- self._partition_length_cache = new_value\n-\n- index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n-\n- def __getitem__(self, key):\n- cls = type(self)\n- return cls(self.index_partitions[key])\n-\n-\n-class RayPartitionedIndex(PartitionedIndex):\n- def __init__(self, index_partitions):\n- self.index_partitions = index_partitions\n", "issue": "[REFACTOR]: remove dead code in modin/pandas/index folder\n<!--\r\nGeneral questions should be asked on the mailing list modin-dev@googlegroups.com.\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Modin installed from (source or binary)**:\r\n- **Modin version**:\r\n- **Python version**:\r\n- **Exact command to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\nclass PartitionedIndex(object):\n\n _index_lengths_cache = None\n\n def _get_partition_lengths(self):\n if self._index_lengths_cache is None:\n self._index_lengths_cache = [\n obj.apply(len).get() for obj in self.index_partitions[:0]\n ]\n return self._index_lengths_cache\n\n def _set_partition_lengths(self, new_value):\n self._partition_length_cache = new_value\n\n index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n\n def __getitem__(self, key):\n cls = type(self)\n return cls(self.index_partitions[key])\n\n\nclass RayPartitionedIndex(PartitionedIndex):\n def __init__(self, index_partitions):\n self.index_partitions = index_partitions\n", "path": "modin/pandas/index/partitioned_index.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n", "path": "modin/pandas/index/__init__.py"}], "after_files": [{"content": null, "path": "modin/pandas/index/partitioned_index.py"}, {"content": null, "path": "modin/pandas/index/__init__.py"}]}
1,072
659
gh_patches_debug_2951
rasdani/github-patches
git_diff
e2nIEE__pandapower-1738
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- plotting.geo convert_gis_to_geodata leads to issue if run after convert_geodata_to_gis ```python import pandapower.plotting.geo as geo import pandapower.networks as pn net = pn.mv_oberrhein() geo.convert_geodata_to_gis(net) geo.convert_gis_to_geodata(net) ``` results in `AttributeError: 'Series' object has no attribute 'geometry'` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pandapower/plotting/geo.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics 4 # and Energy System Technology (IEE), Kassel. All rights reserved. 5 6 import sys 7 from numpy import array, setdiff1d 8 9 from pandapower.auxiliary import soft_dependency_error 10 11 try: 12 from shapely.geometry import Point, LineString 13 shapely_INSTALLED = True 14 except ImportError: 15 shapely_INSTALLED = False 16 17 try: 18 from geopandas import GeoDataFrame, GeoSeries 19 geopandas_INSTALLED = True 20 except ImportError: 21 geopandas_INSTALLED = False 22 23 try: 24 from pyproj import Proj, transform 25 pyproj_INSTALLED = True 26 except ImportError: 27 pyproj_INSTALLED = False 28 29 30 def _node_geometries_from_geodata(node_geo, epsg=31467): 31 """ 32 Creates a geopandas geodataframe from a given dataframe of with node coordinates as x and y 33 values. 34 35 :param node_geo: The dataframe containing the node coordinates (x and y values) 36 :type node_geo: pandas.dataframe 37 :param epsg: The epsg projection of the node coordinates 38 :type epsg: int, default 31467 (= Gauss-Krüger Zone 3) 39 :return: node_geodata - a geodataframe containing the node_geo and Points in the geometry column 40 """ 41 missing_packages = array(["shapely", "geopandas"])[~array([ 42 shapely_INSTALLED, geopandas_INSTALLED])] 43 if len(missing_packages): 44 soft_dependency_error(str(sys._getframe().f_code.co_name)+"()", missing_packages) 45 geoms = [Point(x, y) for x, y in node_geo[["x", "y"]].values] 46 return GeoDataFrame(node_geo, crs=f"epsg:{epsg}", geometry=geoms, index=node_geo.index) 47 48 49 def _branch_geometries_from_geodata(branch_geo, epsg=31467): 50 missing_packages = array(["shapely", "geopandas"])[~array([ 51 shapely_INSTALLED, geopandas_INSTALLED])] 52 if len(missing_packages): 53 soft_dependency_error(str(sys._getframe().f_code.co_name)+"()", missing_packages) 54 geoms = GeoSeries([LineString(x) for x in branch_geo.coords.values], index=branch_geo.index, 55 crs=f"epsg:{epsg}") 56 return GeoDataFrame(branch_geo, crs=f"epsg:{epsg}", geometry=geoms, index=branch_geo.index) 57 58 59 def _transform_node_geometry_to_geodata(node_geo): 60 """ 61 Create x and y values from geodataframe 62 63 :param node_geo: The dataframe containing the node geometries (as shapely points) 64 :type node_geo: geopandas.GeoDataFrame 65 :return: bus_geo - The given geodataframe with x and y values 66 """ 67 node_geo["x"] = [p.x for p in node_geo.geometry] 68 node_geo["y"] = [p.y for p in node_geo.geometry] 69 return node_geo 70 71 72 def _transform_branch_geometry_to_coords(branch_geo): 73 """ 74 Create coords entries from geodataframe geometries 75 76 :param branch_geo: The dataframe containing the branch geometries (as shapely LineStrings) 77 :type branch_geo: geopandas.GeoDataFrame 78 :return: branch_geo - The given geodataframe with coords 79 """ 80 branch_geo["coords"] = branch_geo["coords"].geometry.apply(lambda x: list(x.coords)) 81 return branch_geo 82 83 84 def _convert_xy_epsg(x, y, epsg_in=4326, epsg_out=31467): 85 """ 86 Converts the given x and y coordinates according to the defined epsg projections. 87 88 :param x: x-values of coordinates 89 :type x: iterable 90 :param y: y-values of coordinates 91 :type y: iterable 92 :param epsg_in: current epsg projection 93 :type epsg_in: int, default 4326 (= WGS84) 94 :param epsg_out: epsg projection to be transformed to 95 :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3) 96 :return: transformed_coords - x and y values in new coordinate system 97 """ 98 if not pyproj_INSTALLED: 99 soft_dependency_error(str(sys._getframe().f_code.co_name)+"()", "pyproj") 100 in_proj = Proj(init='epsg:%i' % epsg_in) 101 out_proj = Proj(init='epsg:%i' % epsg_out) 102 return transform(in_proj, out_proj, x, y) 103 104 105 def convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True): 106 """ 107 Extracts information on bus and line geodata from the geometries of a geopandas geodataframe. 108 109 :param net: The net for which to convert the geodata 110 :type net: pandapowerNet 111 :param node_geodata: flag if to extract x and y values for bus geodata 112 :type node_geodata: bool, default True 113 :param branch_geodata: flag if to extract coordinates values for line geodata 114 :type branch_geodata: bool, default True 115 :return: No output. 116 """ 117 if node_geodata: 118 _transform_node_geometry_to_geodata(net.bus_geodata) 119 if branch_geodata: 120 _transform_branch_geometry_to_coords(net.line_geodata) 121 122 123 def convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True): 124 """ 125 Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective 126 geometries. 127 128 :param net: The net for which to convert the geodata 129 :type net: pandapowerNet 130 :param epsg: current epsg projection 131 :type epsg: int, default 4326 (= WGS84) 132 :param node_geodata: flag if to transform the bus geodata table 133 :type node_geodata: bool, default True 134 :param branch_geodata: flag if to transform the line geodata table 135 :type branch_geodata: bool, default True 136 :return: No output. 137 """ 138 if node_geodata: 139 net["bus_geodata"] = _node_geometries_from_geodata(net["bus_geodata"], epsg) 140 if branch_geodata: 141 net["line_geodata"] = _branch_geometries_from_geodata(net["line_geodata"], epsg) 142 net["gis_epsg_code"] = epsg 143 144 145 def convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467): 146 """ 147 Converts bus geodata in net from epsg_in to epsg_out 148 149 :param net: The pandapower network 150 :type net: pandapowerNet 151 :param epsg_in: current epsg projection 152 :type epsg_in: int, default 4326 (= WGS84) 153 :param epsg_out: epsg projection to be transformed to 154 :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3) 155 :return: net - the given pandapower network (no copy!) 156 """ 157 net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"] = _convert_xy_epsg( 158 net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"], epsg_in, epsg_out) 159 return net 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pandapower/plotting/geo.py b/pandapower/plotting/geo.py --- a/pandapower/plotting/geo.py +++ b/pandapower/plotting/geo.py @@ -77,7 +77,7 @@ :type branch_geo: geopandas.GeoDataFrame :return: branch_geo - The given geodataframe with coords """ - branch_geo["coords"] = branch_geo["coords"].geometry.apply(lambda x: list(x.coords)) + branch_geo["coords"] = branch_geo.geometry.apply(lambda x: list(x.coords)) return branch_geo
{"golden_diff": "diff --git a/pandapower/plotting/geo.py b/pandapower/plotting/geo.py\n--- a/pandapower/plotting/geo.py\n+++ b/pandapower/plotting/geo.py\n@@ -77,7 +77,7 @@\n :type branch_geo: geopandas.GeoDataFrame\n :return: branch_geo - The given geodataframe with coords\n \"\"\"\n- branch_geo[\"coords\"] = branch_geo[\"coords\"].geometry.apply(lambda x: list(x.coords))\n+ branch_geo[\"coords\"] = branch_geo.geometry.apply(lambda x: list(x.coords))\n return branch_geo\n", "issue": "plotting.geo convert_gis_to_geodata leads to issue if run after convert_geodata_to_gis\n```python\r\nimport pandapower.plotting.geo as geo\r\nimport pandapower.networks as pn\r\n\r\nnet = pn.mv_oberrhein()\r\n\r\ngeo.convert_geodata_to_gis(net)\r\ngeo.convert_gis_to_geodata(net)\r\n```\r\n\r\nresults in `AttributeError: 'Series' object has no attribute 'geometry'`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport sys\nfrom numpy import array, setdiff1d\n\nfrom pandapower.auxiliary import soft_dependency_error\n\ntry:\n from shapely.geometry import Point, LineString\n shapely_INSTALLED = True\nexcept ImportError:\n shapely_INSTALLED = False\n\ntry:\n from geopandas import GeoDataFrame, GeoSeries\n geopandas_INSTALLED = True\nexcept ImportError:\n geopandas_INSTALLED = False\n\ntry:\n from pyproj import Proj, transform\n pyproj_INSTALLED = True\nexcept ImportError:\n pyproj_INSTALLED = False\n\n\ndef _node_geometries_from_geodata(node_geo, epsg=31467):\n \"\"\"\n Creates a geopandas geodataframe from a given dataframe of with node coordinates as x and y\n values.\n\n :param node_geo: The dataframe containing the node coordinates (x and y values)\n :type node_geo: pandas.dataframe\n :param epsg: The epsg projection of the node coordinates\n :type epsg: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: node_geodata - a geodataframe containing the node_geo and Points in the geometry column\n \"\"\"\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = [Point(x, y) for x, y in node_geo[[\"x\", \"y\"]].values]\n return GeoDataFrame(node_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=node_geo.index)\n\n\ndef _branch_geometries_from_geodata(branch_geo, epsg=31467):\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = GeoSeries([LineString(x) for x in branch_geo.coords.values], index=branch_geo.index,\n crs=f\"epsg:{epsg}\")\n return GeoDataFrame(branch_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=branch_geo.index)\n\n\ndef _transform_node_geometry_to_geodata(node_geo):\n \"\"\"\n Create x and y values from geodataframe\n\n :param node_geo: The dataframe containing the node geometries (as shapely points)\n :type node_geo: geopandas.GeoDataFrame\n :return: bus_geo - The given geodataframe with x and y values\n \"\"\"\n node_geo[\"x\"] = [p.x for p in node_geo.geometry]\n node_geo[\"y\"] = [p.y for p in node_geo.geometry]\n return node_geo\n\n\ndef _transform_branch_geometry_to_coords(branch_geo):\n \"\"\"\n Create coords entries from geodataframe geometries\n\n :param branch_geo: The dataframe containing the branch geometries (as shapely LineStrings)\n :type branch_geo: geopandas.GeoDataFrame\n :return: branch_geo - The given geodataframe with coords\n \"\"\"\n branch_geo[\"coords\"] = branch_geo[\"coords\"].geometry.apply(lambda x: list(x.coords))\n return branch_geo\n\n\ndef _convert_xy_epsg(x, y, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts the given x and y coordinates according to the defined epsg projections.\n\n :param x: x-values of coordinates\n :type x: iterable\n :param y: y-values of coordinates\n :type y: iterable\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: transformed_coords - x and y values in new coordinate system\n \"\"\"\n if not pyproj_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"pyproj\")\n in_proj = Proj(init='epsg:%i' % epsg_in)\n out_proj = Proj(init='epsg:%i' % epsg_out)\n return transform(in_proj, out_proj, x, y)\n\n\ndef convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):\n \"\"\"\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n _transform_node_geometry_to_geodata(net.bus_geodata)\n if branch_geodata:\n _transform_branch_geometry_to_coords(net.line_geodata)\n\n\ndef convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):\n \"\"\"\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n net[\"bus_geodata\"] = _node_geometries_from_geodata(net[\"bus_geodata\"], epsg)\n if branch_geodata:\n net[\"line_geodata\"] = _branch_geometries_from_geodata(net[\"line_geodata\"], epsg)\n net[\"gis_epsg_code\"] = epsg\n\n\ndef convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: net - the given pandapower network (no copy!)\n \"\"\"\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"] = _convert_xy_epsg(\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"], epsg_in, epsg_out)\n return net\n", "path": "pandapower/plotting/geo.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport sys\nfrom numpy import array, setdiff1d\n\nfrom pandapower.auxiliary import soft_dependency_error\n\ntry:\n from shapely.geometry import Point, LineString\n shapely_INSTALLED = True\nexcept ImportError:\n shapely_INSTALLED = False\n\ntry:\n from geopandas import GeoDataFrame, GeoSeries\n geopandas_INSTALLED = True\nexcept ImportError:\n geopandas_INSTALLED = False\n\ntry:\n from pyproj import Proj, transform\n pyproj_INSTALLED = True\nexcept ImportError:\n pyproj_INSTALLED = False\n\n\ndef _node_geometries_from_geodata(node_geo, epsg=31467):\n \"\"\"\n Creates a geopandas geodataframe from a given dataframe of with node coordinates as x and y\n values.\n\n :param node_geo: The dataframe containing the node coordinates (x and y values)\n :type node_geo: pandas.dataframe\n :param epsg: The epsg projection of the node coordinates\n :type epsg: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: node_geodata - a geodataframe containing the node_geo and Points in the geometry column\n \"\"\"\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = [Point(x, y) for x, y in node_geo[[\"x\", \"y\"]].values]\n return GeoDataFrame(node_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=node_geo.index)\n\n\ndef _branch_geometries_from_geodata(branch_geo, epsg=31467):\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = GeoSeries([LineString(x) for x in branch_geo.coords.values], index=branch_geo.index,\n crs=f\"epsg:{epsg}\")\n return GeoDataFrame(branch_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=branch_geo.index)\n\n\ndef _transform_node_geometry_to_geodata(node_geo):\n \"\"\"\n Create x and y values from geodataframe\n\n :param node_geo: The dataframe containing the node geometries (as shapely points)\n :type node_geo: geopandas.GeoDataFrame\n :return: bus_geo - The given geodataframe with x and y values\n \"\"\"\n node_geo[\"x\"] = [p.x for p in node_geo.geometry]\n node_geo[\"y\"] = [p.y for p in node_geo.geometry]\n return node_geo\n\n\ndef _transform_branch_geometry_to_coords(branch_geo):\n \"\"\"\n Create coords entries from geodataframe geometries\n\n :param branch_geo: The dataframe containing the branch geometries (as shapely LineStrings)\n :type branch_geo: geopandas.GeoDataFrame\n :return: branch_geo - The given geodataframe with coords\n \"\"\"\n branch_geo[\"coords\"] = branch_geo.geometry.apply(lambda x: list(x.coords))\n return branch_geo\n\n\ndef _convert_xy_epsg(x, y, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts the given x and y coordinates according to the defined epsg projections.\n\n :param x: x-values of coordinates\n :type x: iterable\n :param y: y-values of coordinates\n :type y: iterable\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: transformed_coords - x and y values in new coordinate system\n \"\"\"\n if not pyproj_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"pyproj\")\n in_proj = Proj(init='epsg:%i' % epsg_in)\n out_proj = Proj(init='epsg:%i' % epsg_out)\n return transform(in_proj, out_proj, x, y)\n\n\ndef convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):\n \"\"\"\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n _transform_node_geometry_to_geodata(net.bus_geodata)\n if branch_geodata:\n _transform_branch_geometry_to_coords(net.line_geodata)\n\n\ndef convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):\n \"\"\"\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n net[\"bus_geodata\"] = _node_geometries_from_geodata(net[\"bus_geodata\"], epsg)\n if branch_geodata:\n net[\"line_geodata\"] = _branch_geometries_from_geodata(net[\"line_geodata\"], epsg)\n net[\"gis_epsg_code\"] = epsg\n\n\ndef convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Kr\u00fcger Zone 3)\n :return: net - the given pandapower network (no copy!)\n \"\"\"\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"] = _convert_xy_epsg(\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"], epsg_in, epsg_out)\n return net\n", "path": "pandapower/plotting/geo.py"}]}
2,449
140
gh_patches_debug_9364
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-2258
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Azure VM - We are not getting power state The VM data we are getting back does not tell you if the VM is running or not. I think perhaps you have to tell the `list_all` api what you want - we want `instanceview` https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview Not sure how this happens via SDK. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/c7n_azure/c7n_azure/resources/vm.py` Content: ``` 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from c7n_azure.query import QueryResourceManager 16 from c7n_azure.provider import resources 17 from c7n.filters.core import ValueFilter, type_schema 18 19 @resources.register('vm') 20 class VirtualMachine(QueryResourceManager): 21 22 class resource_type(object): 23 service = 'azure.mgmt.compute' 24 client = 'ComputeManagementClient' 25 enum_spec = ('virtual_machines', 'list_all') 26 id = 'id' 27 name = 'name' 28 default_report_fields = ( 29 'name', 30 'location', 31 'resourceGroup', 32 'properties.hardwareProfile.vmSize', 33 ) 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py --- a/tools/c7n_azure/c7n_azure/resources/vm.py +++ b/tools/c7n_azure/c7n_azure/resources/vm.py @@ -31,3 +31,15 @@ 'resourceGroup', 'properties.hardwareProfile.vmSize', ) + +@VirtualMachine.filter_registry.register('instance-view') +class InstanceViewFilter(ValueFilter): + schema = type_schema('instance-view', rinherit=ValueFilter.schema) + + def __call__(self, i): + if 'instanceView' not in i: + client = self.manager.get_client() + instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view + i['instanceView'] = instance.serialize() + + return super(InstanceViewFilter, self).__call__(i['instanceView'])
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py\n--- a/tools/c7n_azure/c7n_azure/resources/vm.py\n+++ b/tools/c7n_azure/c7n_azure/resources/vm.py\n@@ -31,3 +31,15 @@\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n+\n+@VirtualMachine.filter_registry.register('instance-view')\n+class InstanceViewFilter(ValueFilter):\n+ schema = type_schema('instance-view', rinherit=ValueFilter.schema)\n+\n+ def __call__(self, i):\n+ if 'instanceView' not in i:\n+ client = self.manager.get_client()\n+ instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view\n+ i['instanceView'] = instance.serialize()\n+\n+ return super(InstanceViewFilter, self).__call__(i['instanceView'])\n", "issue": "Azure VM - We are not getting power state\nThe VM data we are getting back does not tell you if the VM is running or not.\r\n\r\nI think perhaps you have to tell the `list_all` api what you want - we want `instanceview`\r\n\r\nhttps://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview\r\n\r\nNot sure how this happens via SDK.\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.query import QueryResourceManager\nfrom c7n_azure.provider import resources\nfrom c7n.filters.core import ValueFilter, type_schema\n\n@resources.register('vm')\nclass VirtualMachine(QueryResourceManager):\n\n class resource_type(object):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('virtual_machines', 'list_all')\n id = 'id'\n name = 'name'\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n", "path": "tools/c7n_azure/c7n_azure/resources/vm.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.query import QueryResourceManager\nfrom c7n_azure.provider import resources\nfrom c7n.filters.core import ValueFilter, type_schema\n\n@resources.register('vm')\nclass VirtualMachine(QueryResourceManager):\n\n class resource_type(object):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('virtual_machines', 'list_all')\n id = 'id'\n name = 'name'\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n\n@VirtualMachine.filter_registry.register('instance-view')\nclass InstanceViewFilter(ValueFilter):\n schema = type_schema('instance-view', rinherit=ValueFilter.schema)\n\n def __call__(self, i):\n if 'instanceView' not in i:\n client = self.manager.get_client()\n instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view\n i['instanceView'] = instance.serialize()\n\n return super(InstanceViewFilter, self).__call__(i['instanceView'])\n", "path": "tools/c7n_azure/c7n_azure/resources/vm.py"}]}
666
223