repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
gaa-uam/scikit-fda
tests/test_pandas_fdatagrid.py
data_for_grouping
python
def data_for_grouping() -> NoReturn: raise NotImplementedError
Return data for factorization, grouping, and unique tests. Expected to be like [B, B, NA, NA, A, A, B, C] Where A < B < C and NA is missing
https://github.com/gaa-uam/scikit-fda/blob/1a6fc2c01e39871e09fd2ec6d0b14d378d6b069f/tests/test_pandas_fdatagrid.py#L166-L173
from __future__ import annotations from typing import Any, Callable, Generator, NoReturn, Union import numpy as np import pandas import pytest from pandas import Series from pandas.api.extensions import ExtensionArray, ExtensionDtype from pandas.tests.extension import base import skfda from skfda.representation.grid import FDataGrid @pytest.fixture def dtype() -> ExtensionDtype: return skfda.representation.grid.FDataGridDType( grid_points=[ np.arange(10), np.arange(10) / 10, ], dim_codomain=3, ) @pytest.fixture def data() -> ExtensionArray: data_matrix = np.arange(1, 100 * 10 * 10 * 3 + 1).reshape(100, 10, 10, 3) grid_points = [ np.arange(10), np.arange(10) / 10, ] return skfda.FDataGrid(data_matrix, grid_points=grid_points) @pytest.fixture def data_for_twos() -> ExtensionArray: data_matrix = np.full( 100 * 10 * 10 * 3, fill_value=2, ).reshape(100, 10, 10, 3) grid_points = [ np.arange(10), np.arange(10) / 10, ] return skfda.FDataGrid(data_matrix, grid_points=grid_points) @pytest.fixture def data_missing() -> ExtensionArray: data_matrix = np.arange( 2 * 10 * 10 * 3, dtype=np.float_, ).reshape(2, 10, 10, 3) data_matrix[0, ...] = np.NaN grid_points = [ np.arange(10), np.arange(10) / 10, ] return skfda.FDataGrid(data_matrix, grid_points=grid_points) @pytest.fixture(params=["data", "data_missing"]) def all_data( request, data: ExtensionArray, data_missing: ExtensionArray, ) -> ExtensionArray: if request.param == "data": return data elif request.param == "data_missing": return data_missing @pytest.fixture def data_repeated( data: ExtensionArray, ) -> Callable[[int], Generator[ExtensionArray, None, None]]: def gen(count: int) -> Generator[ExtensionArray, None, None]: yield from ( data for _ in range(count) ) return gen @pytest.fixture def data_for_sorting() -> NoReturn: raise NotImplementedError @pytest.fixture def data_missing_for_sorting() -> NoReturn: raise NotImplementedError @pytest.fixture def na_cmp() -> Callable[..., bool]: def isna( x: Union[pandas.NA, FDataGrid], y: Union[pandas.NA, FDataGrid], ) -> bool: return ( (x is pandas.NA or all(x.isna())) and (y is pandas.NA or all(y.isna())) ) return isna @pytest.fixture def na_value() -> pandas.NA: return pandas.NA @pytest.fixture
BSD 3-Clause New or Revised License
redlure/redlure-console
app/campaign.py
campaign
python
def campaign(workspace_id, campaign_id): if not validate_workspace(workspace_id): return 'workspace does not exist', 404 campaign = Campaign.query.filter_by(id=campaign_id, workspace_id=workspace_id).first() if campaign is None: return 'campaign does not exist', 404 if request.method == 'GET': schema = CampaignSchema() campaign_data = schema.dump(campaign) return jsonify(campaign_data) elif request.method == 'DELETE': if campaign.status == 'Active': campaign.kill() if campaign.status == 'Scheduled': campaign.remove_job() app.logger.info(f'Deleted campaign {campaign.name} (ID: {campaign.id}) - Deleted by {current_user.username} - Client IP address {request.remote_addr}') db.session.delete(campaign) update_workspace_ts(Workspace.query.filter_by(id=workspace_id).first()) db.session.commit() return 'campaign deleted', 204 elif request.method == 'PUT': name = request.form.get('Name') email_name = request.form.get('Email_Name') profile_name = request.form.get('Profile_Name') list_name = request.form.get('List_Name') domain_name = request.form.get('Domain_Name') server_alias = request.form.get('Server_Alias') port = request.form.get('Port') ssl = request.form.get('SSL') redirect_url = request.form.get('Redirect_URL') same_campaign = Campaign.query.filter_by(name=name).first() if same_campaign is not None and str(same_campaign.id) != campaign_id: return json.dumps({'success': False}), 200, {'ContentType':'application/json'} ssl_bool = convert_to_bool(ssl) if type(ssl_bool) != bool: return 'ssl must be either true or false', 400 email = Email.query.filter_by(name=email_name, workspace_id=workspace_id).first() profile = Profile.query.filter_by(name=profile_name, workspace_id=workspace_id).first() targetlist = List.query.filter_by(name=list_name, workspace_id=workspace_id).first() domain = Domain.query.filter_by(domain=domain_name).first() server = Server.query.filter_by(alias=server_alias).first() makeup = validate_campaign_makeup(email, page, profile, targetlist, domain, server) if makeup: return makeup campaign.name = name campaign.email_id = email.id campaign.profile_id = profile.id campaign.list_id = targetlist.id campaign.domain_id = domain.id campaign.server_id = server.id campaign.port = port campaign.ssl = ssl_bool campaign.redirect_url = redirect_url update_workspace_ts(Workspace.query.filter_by(id=workspace_id).first()) db.session.commit() return 'campaign updated'
For GET requests, return the given campaign. For DELETE requests, delete the given campaign. For PUT requests, update the given campaign.
https://github.com/redlure/redlure-console/blob/e59dabaea240976bffe645148a0eb95d1075143d/app/campaign.py#L319-L394
from app import app, db, sched from marshmallow import Schema, fields, post_dump from datetime import datetime, timedelta from flask import request, jsonify from flask_mail import Message from flask_login import login_required, current_user import json import html2text import requests import string from magic import Magic from app.cipher import decrypt from app.workspace import Workspace, validate_workspace, update_workspace_ts from app.email import Email, EmailSchema from app.domain import Domain, DomainSchema from app.server import Server, ServerSchema from app.list import List, ListSchema from app.page import Page, PageSchema from app.profile import Profile, ProfileSchema from app.apikey import APIKey from app.functions import user_login_required, convert_to_bool class Form(db.Model): id = db.Column(db.Integer, primary_key=True) event_id = db.Column(db.Integer, db.ForeignKey('event.id')) data = db.Column(db.String(128)) class FormSchema(Schema): id = fields.Number() event_id = fields.Number() data = fields.Dict() @post_dump def serialize_form(self, data, **kwargs): decrypted_data = decrypt(data['data']).decode() data['data'] = json.loads(decrypted_data) return data class Event(db.Model): id = db.Column(db.Integer, primary_key=True) result_id = db.Column(db.Integer, db.ForeignKey('result.id'), nullable=True) ip_address = db.Column(db.String(32)) user_agent = db.Column(db.String(128)) action = db.Column(db.String(32)) time = db.Column(db.DateTime) form = db.relationship('Form', backref='event', uselist=False, lazy=True, cascade='all,delete') class EventSchema(Schema): id = fields.Number() result_id = fields.Number() ip_address = fields.Str() user_agent = fields.Str() action = fields.Str() time = fields.DateTime(format='%m-%d-%y %H:%M:%S') form = fields.Nested(FormSchema, strict=True) class Campaignpages(db.Model): campaign_id = db.Column(db.Integer, db.ForeignKey('campaign.id'), primary_key=True) page_id = db.Column(db.Integer, db.ForeignKey('page.id'), primary_key=True) index = db.Column(db.Integer) class CampaignpagesSchema(Schema): index = fields.Number() page = fields.Nested(PageSchema, strict=True) class Campaign(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), nullable=False) workspace_id = db.Column(db.Integer, db.ForeignKey('workspace.id'), nullable=False) email_id = db.Column(db.Integer, db.ForeignKey('email.id'), nullable=True) redirect_url = db.Column(db.String(64), nullable=True) profile_id = db.Column(db.Integer, db.ForeignKey('profile.id'), nullable=True) list_id = db.Column(db.Integer, db.ForeignKey('list.id'), nullable=True) domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'), nullable=True) server_id = db.Column(db.Integer, db.ForeignKey('server.id'), nullable=True) port = db.Column(db.Integer, nullable=False) ssl = db.Column(db.Boolean, nullable=False) results = db.relationship('Result', backref='campaign', lazy=True, cascade='all,delete') created_at = db.Column(db.DateTime, default=datetime.now) updated_at = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now) status = db.Column(db.String(32), nullable=False) start_time = db.Column(db.DateTime, nullable=True, default='') end_time = db.Column(db.DateTime, nullable=True, default=datetime(1, 1, 1, 0, 0, 0)) send_interval = db.Column(db.Integer, default=0) batch_size = db.Column(db.Integer) payload_url = db.Column(db.String(64)) payload_file = db.Column(db.String(64)) attachment = db.Column(db.LargeBinary, nullable=True) attachment_name = db.Column(db.String(64), nullable=True) pages = db.relationship('Campaignpages', backref='campaign', cascade='all, delete-orphan') def __init__(self, **kwargs): self.status = 'Inactive' self.__dict__.update(kwargs) def start_worker(self): schema = WorkerCampaignSchema() data = schema.dump(self) params = {'key': APIKey.query.first().key} r = requests.post(f'https://{self.server.ip}:{self.server.port}/campaigns/start', json=data, params=params, verify=False) return r.json() def cast(self): url = Campaignpages.query.filter_by(campaign_id=self.id, index=0).first().page.url base_url = 'https://%s' % self.domain.domain if self.ssl else 'http://%s' % self.domain.domain mail = self.profile.get_mailer() job_id = str(self.id) current_jobs = sched.get_jobs() if not self.batch_size: self.batch_size = len(self.list.targets) if not self.send_interval: self.send_interval = 0 db.session.commit() try: if self.start_time < datetime.now(): sched.add_job(func=self.run_campaign, trigger='interval', minutes=int(self.send_interval), id=job_id, start_date=datetime.now() + timedelta(0,8), replace_existing=True, args=[mail, base_url, url]) else: sched.add_job(func=self.run_campaign, trigger='interval', minutes=int(self.send_interval), id=job_id, start_date=self.start_time, replace_existing=True, args=[mail, base_url, url]) except Exception: app.logger.exception(f'Error scheduling campaign {self.name} (ID: {self.id})') else: app.logger.info(f'Scheduled campaign {self.name} (ID: {self.id}) to start at {self.start_time} - Sending {len(self.list.targets)} emails in batches of {self.batch_size} every {self.send_interval} minutes') self.status = 'Scheduled' db.session.commit() def run_campaign(self, mail, base_url, url): with app.app_context(): unsent_results = [x for x in Campaign.query.filter_by(id=self.id).first().results if x.status == 'Scheduled'] campaign = Campaign.query.filter_by(id=self.id).first() job_id = str(self.id) if self is None: sched.remove_job(job_id) app.logger.info(f'Campaign ID {job_id} does not exist - Campaign will not start, scheduled job will be removed') return if campaign.status == 'Scheduled': worker_response = self.start_worker() if not worker_response['success']: msg = worker_response['msg'] campaign.status = msg db.session.commit() app.logger.error(f'Failed to start campaign {self.name} (ID: {self.id}) - Worker web server failed to start on server {self.server.alias} (IP: {self.server.ip}) - Reason: {msg}') sched.remove_job(job_id) return else: app.logger.info(f'Campaign {self.name} (ID: {self.id}) successfully started web server on {self.server.alias} (IP: {self.server.ip})') campaign.status = 'Active' db.session.commit() for _ in range(int(self.batch_size)): if unsent_results: result = unsent_results.pop() recipient = result.person msg = Message(subject=self.email.subject, sender=self.profile.from_address, recipients=[recipient.email]) msg.html = self.email.prep_html(base_url=base_url, target=recipient, result=result, url=url) msg.body = html2text.html2text(msg.html.decode()) if self.attachment: mime = Magic(mime=True) mimetype = mime.from_buffer(self.attachment) msg.attach(self.attachment_name, mimetype, self.attachment) status = '' ts = datetime.now().strftime('%y%m%d.%H%M%S') domain = app.config['MAIL_USERNAME'].split('@')[1] msg.msgId = f'<{ts}@{domain}>' try: mail.send(msg) except Exception as e: status = 'Error' app.logger.exception(f'Error sending email to {recipient.email} for {self.name} (ID: {self.id}) - {e}') else: status = 'Sent' app.logger.info(f'Email succesflly sent to {recipient.email} for campaign {self.name} (ID: {self.id})') result.status = status event = Event(action=status, time=datetime.now(), ip_address='N/A') result.events.append(event) db.session.commit() else: sched.remove_job(job_id=job_id) app.logger.info(f'Finished sending emails for campaign {self.name} (ID: {self.id})') return return def kill(self): payload = {'id': self.id, 'port': self.port} params = {'key': APIKey.query.first().key} r = requests.post('https://%s:%d/campaigns/kill' % (self.server.ip, self.server.port), data=payload, params=params, verify=False) if r.status_code == 200: self.remove_job() self.end_time = datetime.now() self.status = 'Complete' db.session.commit() return r.status_code def remove_job(self): try: sched.remove_job(str(self.id)) except: pass return class CampaignSchema(Schema): id = fields.Number() name = fields.Str() workspace_id = fields.Number() email = fields.Nested(EmailSchema, strict=True) pages = fields.Nested(CampaignpagesSchema, strict=True, many=True) redirect_url = fields.Str() profile = fields.Nested(ProfileSchema, strict=True) list = fields.Nested(ListSchema, strict=True) domain = fields.Nested(DomainSchema, strict=True) server = fields.Nested(ServerSchema, strict=True) port = fields.Number() ssl = fields.Boolean() created_at = fields.DateTime(format='%m-%d-%y %H:%M') updated_at = fields.DateTime(format='%m-%d-%y %H:%M') status = fields.Str() payload_url = fields.Str() start_time = fields.DateTime(format='%m-%d-%y %H:%M') payload_file = fields.Str() attachment_name = fields.Str() class WorkerCampaignSchema(Schema): id = fields.Number() name = fields.Str() pages = fields.Nested(CampaignpagesSchema, strict=True, many=True) redirect_url = fields.Str() domain = fields.Nested(DomainSchema, strict=True) server = fields.Nested(ServerSchema, strict=True) port = fields.Number() ssl = fields.Boolean() payload_url = fields.Str() payload_file = fields.Str() @post_dump def order_pages(self, data, **kwargs): data['pages'].sort(key=lambda x: x['index']) return data @app.route('/workspaces/<workspace_id>/campaigns/<campaign_id>', methods=['GET', 'DELETE', 'PUT']) @login_required @user_login_required
BSD 3-Clause New or Revised License
cisco-en-programmability/dnacentersdk
dnacentersdk/api/authentication.py
Authentication.authentication_api
python
def authentication_api(self, username, password, encoded_auth=None): temp_url = '/dna/system/api/v1/auth/token' self._endpoint_url = urllib.parse.urljoin(self._base_url, temp_url) if encoded_auth is not None: check_type(encoded_auth, basestring, may_be_none=False) if isinstance(encoded_auth, str): encoded_auth = bytes(encoded_auth, 'utf-8') response = requests.post(self._endpoint_url, data=None, headers={'authorization': b'Basic ' + encoded_auth}, **self._request_kwargs) else: check_type(username, basestring, may_be_none=False) check_type(password, basestring, may_be_none=False) response = requests.post(self._endpoint_url, data=None, auth=(username, password), **self._request_kwargs) check_response_code(response, EXPECTED_RESPONSE_CODE['POST']) json_data = extract_and_parse_json(response) return self._object_factory('bpm_ac8ae94c4e69a09d', json_data)
Exchange basic auth data for an Access Token(x-auth-token) that can be used to invoke the APIs. Args: username(basestring): HTTP Basic Auth username. password(basestring): HTTP Basic Auth password. encoded_auth(basestring): HTTP Basic Auth base64 encoded string. Returns: AccessToken: An AccessToken object with the access token provided by the DNA Center cloud. Raises: TypeError: If the parameter types are incorrect. ApiError: If the DNA Center cloud returns an error.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/api/authentication.py#L140-L182
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from future import standard_library standard_library.install_aliases() from builtins import * import urllib.parse from past.builtins import basestring import requests from ..response_codes import EXPECTED_RESPONSE_CODE from ..utils import ( check_response_code, check_type, dict_from_items_with_values, extract_and_parse_json, validate_base_url, ) __author__ = "" __author_email__ = "" __copyright__ = "Copyright (c) 2019-2021 Cisco Systems." __license__ = "MIT" class Authentication(object): def __init__(self, base_url, object_factory, single_request_timeout=None, verify=True): check_type(base_url, basestring, may_be_none=False) check_type(single_request_timeout, int) check_type(verify, (bool, basestring), may_be_none=False) super(Authentication, self).__init__() self._base_url = str(validate_base_url(base_url)) self._single_request_timeout = single_request_timeout self._verify = verify self._request_kwargs = {"timeout": single_request_timeout, "verify": verify} self._object_factory = object_factory if verify is False: requests.packages.urllib3.disable_warnings() @property def verify(self): return self._verify @property def base_url(self): return self._base_url @property def single_request_timeout(self): return self._single_request_timeout @verify.setter def verify(self, value): check_type(value, (bool, basestring), may_be_none=False) self._verify = value self._request_kwargs = {"timeout": self._single_request_timeout, "verify": self._verify} @base_url.setter def base_url(self, value): check_type(value, basestring, may_be_none=False) self._base_url = str(validate_base_url(value)) @single_request_timeout.setter def single_request_timeout(self, value): check_type(value, int) assert value is None or value > 0 self._single_request_timeout = value self._request_kwargs = {"timeout": self._single_request_timeout, "verify": self._verify}
MIT License
inducer/loopy
loopy/transform/iname.py
untag_inames
python
def untag_inames(kernel, iname_to_untag, tag_type): from loopy.kernel.data import filter_iname_tags_by_type tags_to_remove = filter_iname_tags_by_type( kernel.inames[iname_to_untag].tags, tag_type) new_inames = kernel.inames.copy() new_inames[iname_to_untag] = kernel.inames[iname_to_untag].without_tags( tags_to_remove, verify_existence=False) return kernel.copy(inames=new_inames)
Remove tags on *iname_to_untag* which matches *tag_type*. :arg iname_to_untag: iname as string. :arg tag_type: a subclass of :class:`pytools.tag.Tag`, for example a subclass of :class:`loopy.kernel.data.InameImplementationTag`. .. versionadded:: 2018.1
https://github.com/inducer/loopy/blob/3e687c78fb8f4701b2c117a17ec75be768eef2d1/loopy/transform/iname.py#L668-L685
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import islpy as isl from islpy import dim_type from loopy.symbolic import ( RuleAwareIdentityMapper, RuleAwareSubstitutionMapper, SubstitutionRuleMappingContext) from loopy.diagnostic import LoopyError from loopy.translation_unit import (TranslationUnit, for_each_kernel) from loopy.kernel import LoopKernel from loopy.kernel.function_interface import CallableKernel __doc__ = """ .. currentmodule:: loopy .. autofunction:: split_iname .. autofunction:: chunk_iname .. autofunction:: join_inames .. autofunction:: untag_inames .. autofunction:: tag_inames .. autofunction:: duplicate_inames .. autofunction:: get_iname_duplication_options .. autofunction:: has_schedulable_iname_nesting .. autofunction:: prioritize_loops .. autofunction:: rename_iname .. autofunction:: remove_unused_inames .. autofunction:: split_reduction_inward .. autofunction:: split_reduction_outward .. autofunction:: affine_map_inames .. autofunction:: find_unused_axis_tag .. autofunction:: make_reduction_inames_unique .. autofunction:: add_inames_to_insn .. autofunction:: map_domain .. autofunction:: add_inames_for_unused_hw_axes """ @for_each_kernel def set_loop_priority(kernel, loop_priority): from warnings import warn warn("set_loop_priority is deprecated. Use prioritize_loops instead. " "Attention: A call to set_loop_priority will overwrite any previously " "set priorities!", DeprecationWarning, stacklevel=2) if isinstance(loop_priority, str): loop_priority = tuple(s.strip() for s in loop_priority.split(",") if s.strip()) loop_priority = tuple(loop_priority) return kernel.copy(loop_priority=frozenset([loop_priority])) @for_each_kernel def prioritize_loops(kernel, loop_priority): assert isinstance(kernel, LoopKernel) if isinstance(loop_priority, str): loop_priority = tuple(s.strip() for s in loop_priority.split(",") if s.strip()) loop_priority = tuple(loop_priority) return kernel.copy(loop_priority=kernel.loop_priority.union([loop_priority])) class _InameSplitter(RuleAwareIdentityMapper): def __init__(self, rule_mapping_context, within, iname_to_split, outer_iname, inner_iname, replacement_index): super().__init__(rule_mapping_context) self.within = within self.iname_to_split = iname_to_split self.outer_iname = outer_iname self.inner_iname = inner_iname self.replacement_index = replacement_index def map_reduction(self, expr, expn_state): if (self.iname_to_split in expr.inames and self.iname_to_split not in expn_state.arg_context and self.within( expn_state.kernel, expn_state.instruction)): new_inames = list(expr.inames) new_inames.remove(self.iname_to_split) new_inames.extend([self.outer_iname, self.inner_iname]) from loopy.symbolic import Reduction return Reduction(expr.operation, tuple(new_inames), self.rec(expr.expr, expn_state), expr.allow_simultaneous) else: return super().map_reduction(expr, expn_state) def map_variable(self, expr, expn_state): if (expr.name == self.iname_to_split and self.iname_to_split not in expn_state.arg_context and self.within( expn_state.kernel, expn_state.instruction)): return self.replacement_index else: return super().map_variable(expr, expn_state) def _split_iname_in_set(s, iname_to_split, inner_iname, outer_iname, fixed_length, fixed_length_is_inner): var_dict = s.get_var_dict() if iname_to_split not in var_dict: return s orig_dim_type, _ = var_dict[iname_to_split] from pytools import generate_unique_names for dup_iname_to_split in generate_unique_names(f"dup_{iname_to_split}"): if dup_iname_to_split not in var_dict: break from loopy.isl_helpers import duplicate_axes s = duplicate_axes(s, (iname_to_split,), (dup_iname_to_split,)) outer_var_nr = s.dim(orig_dim_type) inner_var_nr = s.dim(orig_dim_type)+1 s = s.add_dims(orig_dim_type, 2) s = s.set_dim_name(orig_dim_type, outer_var_nr, outer_iname) s = s.set_dim_name(orig_dim_type, inner_var_nr, inner_iname) from loopy.isl_helpers import make_slab if fixed_length_is_inner: fixed_iname, var_length_iname = inner_iname, outer_iname else: fixed_iname, var_length_iname = outer_iname, inner_iname space = s.get_space() s = s & ( make_slab(space, fixed_iname, 0, fixed_length) .add_constraint(isl.Constraint.eq_from_names( space, { dup_iname_to_split: 1, fixed_iname: -1, var_length_iname: -fixed_length}))) dup_iname_dim_type, dup_name_idx = space.get_var_dict()[dup_iname_to_split] s = s.project_out(dup_iname_dim_type, dup_name_idx, 1) return s def _split_iname_backend(kernel, iname_to_split, fixed_length, fixed_length_is_inner, make_new_loop_index, outer_iname=None, inner_iname=None, outer_tag=None, inner_tag=None, slabs=(0, 0), do_tagged_check=True, within=None): from loopy.match import parse_match within = parse_match(within) if not any(within(kernel, insn) for insn in kernel.instructions): return kernel from loopy.kernel.data import InameImplementationTag existing_tags = [tag for tag in kernel.iname_tags(iname_to_split) if isinstance(tag, InameImplementationTag)] from loopy.kernel.data import ForceSequentialTag, filter_iname_tags_by_type if (do_tagged_check and existing_tags and not filter_iname_tags_by_type(existing_tags, ForceSequentialTag)): raise LoopyError(f"cannot split already tagged iname '{iname_to_split}'") if iname_to_split not in kernel.all_inames(): raise ValueError( f"cannot split loop for unknown variable '{iname_to_split}'") applied_iname_rewrites = kernel.applied_iname_rewrites[:] vng = kernel.get_var_name_generator() if outer_iname is None: outer_iname = vng(iname_to_split+"_outer") if inner_iname is None: inner_iname = vng(iname_to_split+"_inner") new_domains = [ _split_iname_in_set(dom, iname_to_split, inner_iname, outer_iname, fixed_length, fixed_length_is_inner) for dom in kernel.domains] from pymbolic import var inner = var(inner_iname) outer = var(outer_iname) new_loop_index = make_new_loop_index(inner, outer) subst_map = {var(iname_to_split): new_loop_index} applied_iname_rewrites.append(subst_map) new_insns = [] for insn in kernel.instructions: if iname_to_split in insn.within_inames and ( within(kernel, insn)): new_within_inames = ( (insn.within_inames.copy() - frozenset([iname_to_split])) | frozenset([outer_iname, inner_iname])) insn = insn.copy( within_inames=new_within_inames) new_insns.append(insn) iname_slab_increments = kernel.iname_slab_increments.copy() iname_slab_increments[outer_iname] = slabs new_priorities = [] for prio in kernel.loop_priority: new_prio = () for prio_iname in prio: if prio_iname == iname_to_split: new_prio = new_prio + (outer_iname, inner_iname) else: new_prio = new_prio + (prio_iname,) new_priorities.append(new_prio) kernel = kernel.copy( domains=new_domains, iname_slab_increments=iname_slab_increments, instructions=new_insns, applied_iname_rewrites=applied_iname_rewrites, loop_priority=frozenset(new_priorities)) rule_mapping_context = SubstitutionRuleMappingContext( kernel.substitutions, vng) ins = _InameSplitter(rule_mapping_context, within, iname_to_split, outer_iname, inner_iname, new_loop_index) from loopy.kernel.instruction import MultiAssignmentBase def check_insn_has_iname(kernel, insn, *args): return (not isinstance(insn, MultiAssignmentBase) or iname_to_split in insn.dependency_names() or iname_to_split in insn.reduction_inames()) kernel = ins.map_kernel(kernel, within=check_insn_has_iname, map_tvs=False, map_args=False) kernel = rule_mapping_context.finish_kernel(kernel) for existing_tag in existing_tags: kernel = tag_inames(kernel, {outer_iname: existing_tag, inner_iname: existing_tag}) kernel = tag_inames(kernel, {outer_iname: outer_tag, inner_iname: inner_tag}) kernel = remove_unused_inames(kernel, [iname_to_split]) return kernel @for_each_kernel def split_iname(kernel, split_iname, inner_length, *, outer_iname=None, inner_iname=None, outer_tag=None, inner_tag=None, slabs=(0, 0), do_tagged_check=True, within=None): assert isinstance(kernel, LoopKernel) def make_new_loop_index(inner, outer): return inner + outer*inner_length return _split_iname_backend(kernel, split_iname, fixed_length=inner_length, fixed_length_is_inner=True, make_new_loop_index=make_new_loop_index, outer_iname=outer_iname, inner_iname=inner_iname, outer_tag=outer_tag, inner_tag=inner_tag, slabs=slabs, do_tagged_check=do_tagged_check, within=within) @for_each_kernel def chunk_iname(kernel, split_iname, num_chunks, outer_iname=None, inner_iname=None, outer_tag=None, inner_tag=None, slabs=(0, 0), do_tagged_check=True, within=None): size = kernel.get_iname_bounds(split_iname).size k0 = isl.Aff.zero_on_domain(size.domain().space) chunk_ceil = size.div(k0+num_chunks).ceil() chunk_floor = size.div(k0+num_chunks).floor() chunk_diff = chunk_ceil - chunk_floor chunk_mod = size.mod_val(num_chunks) from loopy.symbolic import pw_aff_to_expr from pymbolic.primitives import Min def make_new_loop_index(inner, outer): if 0: return ( inner + pw_aff_to_expr(chunk_floor) * outer + pw_aff_to_expr(chunk_diff) * Min( (outer, pw_aff_to_expr(chunk_mod)))) else: return ( inner + pw_aff_to_expr(chunk_ceil) * Min( (outer, pw_aff_to_expr(chunk_mod))) + pw_aff_to_expr(chunk_floor) * ( outer - Min((outer, pw_aff_to_expr(chunk_mod))))) for dom in kernel.domains: var_dict = dom.get_var_dict() if split_iname not in var_dict: continue dt, idx = var_dict[split_iname] assert dt == dim_type.set aff_zero = isl.Aff.zero_on_domain(dom.space) aff_split_iname = aff_zero.set_coefficient_val(dim_type.in_, idx, 1) aligned_size = isl.align_spaces(size, aff_zero) box_dom = ( dom .eliminate(dt, idx, 1) & aff_zero.le_set(aff_split_iname) & aff_split_iname.lt_set(aligned_size) ) if not ( box_dom <= dom and dom <= box_dom): raise LoopyError("domain '%s' is not box-shape about iname " "'%s', cannot use chunk_iname()" % (dom, split_iname)) return _split_iname_backend(kernel, split_iname, fixed_length=num_chunks, fixed_length_is_inner=False, make_new_loop_index=make_new_loop_index, outer_iname=outer_iname, inner_iname=inner_iname, outer_tag=outer_tag, inner_tag=inner_tag, slabs=slabs, do_tagged_check=do_tagged_check, within=within) class _InameJoiner(RuleAwareSubstitutionMapper): def __init__(self, rule_mapping_context, within, subst_func, joined_inames, new_iname): super().__init__(rule_mapping_context, subst_func, within) self.joined_inames = set(joined_inames) self.new_iname = new_iname def map_reduction(self, expr, expn_state): expr_inames = set(expr.inames) overlap = (self.joined_inames & expr_inames - set(expn_state.arg_context)) if overlap and self.within( expn_state.kernel, expn_state.instruction, expn_state.stack): if overlap != expr_inames: raise LoopyError( "Cannot join inames '%s' if there is a reduction " "that does not use all of the inames being joined. " "(Found one with just '%s'.)" % ( ", ".join(self.joined_inames), ", ".join(expr_inames))) new_inames = expr_inames - self.joined_inames new_inames.add(self.new_iname) from loopy.symbolic import Reduction return Reduction(expr.operation, tuple(new_inames), self.rec(expr.expr, expn_state), expr.allow_simultaneous) else: return super().map_reduction(expr, expn_state) @for_each_kernel def join_inames(kernel, inames, new_iname=None, tag=None, within=None): from loopy.match import parse_match within = parse_match(within) if not any(within(kernel, insn) for insn in kernel.instructions): return kernel inames = inames[::-1] if new_iname is None: new_iname = kernel.get_var_name_generator()("_and_".join(inames)) from loopy.kernel.tools import DomainChanger domch = DomainChanger(kernel, frozenset(inames)) for iname in inames: if kernel.get_home_domain_index(iname) != domch.leaf_domain_index: raise LoopyError("iname '%s' is not 'at home' in the " "join's leaf domain" % iname) new_domain = domch.domain new_dim_idx = new_domain.dim(dim_type.set) new_domain = new_domain.add_dims(dim_type.set, 1) new_domain = new_domain.set_dim_name(dim_type.set, new_dim_idx, new_iname) joint_aff = zero = isl.Aff.zero_on_domain(new_domain.space) subst_dict = {} base_divisor = 1 from pymbolic import var for i, iname in enumerate(inames): iname_dt, iname_idx = zero.get_space().get_var_dict()[iname] iname_aff = zero.add_coefficient_val(iname_dt, iname_idx, 1) joint_aff = joint_aff + base_divisor*iname_aff bounds = kernel.get_iname_bounds(iname, constants_only=True) from loopy.isl_helpers import ( static_max_of_pw_aff, static_value_of_pw_aff) from loopy.symbolic import pw_aff_to_expr length = int(pw_aff_to_expr( static_max_of_pw_aff(bounds.size, constants_only=True))) try: lower_bound_aff = static_value_of_pw_aff( bounds.lower_bound_pw_aff.coalesce(), constants_only=False) except Exception as e: raise type(e)("while finding lower bound of '%s': " % iname) my_val = var(new_iname) // base_divisor if i+1 < len(inames): my_val %= length my_val += pw_aff_to_expr(lower_bound_aff) subst_dict[iname] = my_val base_divisor *= length from loopy.isl_helpers import iname_rel_aff new_domain = new_domain.add_constraint( isl.Constraint.equality_from_aff( iname_rel_aff(new_domain.get_space(), new_iname, "==", joint_aff))) for iname in inames: iname_to_dim = new_domain.get_space().get_var_dict() iname_dt, iname_idx = iname_to_dim[iname] if within is None: new_domain = new_domain.project_out(iname_dt, iname_idx, 1) def subst_within_inames(fid): result = set() for iname in fid: if iname in inames: result.add(new_iname) else: result.add(iname) return frozenset(result) new_insns = [ insn.copy( within_inames=subst_within_inames(insn.within_inames)) if within(kernel, insn) else insn for insn in kernel.instructions] kernel = (kernel .copy( instructions=new_insns, domains=domch.get_domains_with(new_domain), applied_iname_rewrites=kernel.applied_iname_rewrites + [subst_dict] )) from loopy.match import parse_stack_match within = parse_stack_match(within) from pymbolic.mapper.substitutor import make_subst_func rule_mapping_context = SubstitutionRuleMappingContext( kernel.substitutions, kernel.get_var_name_generator()) ijoin = _InameJoiner(rule_mapping_context, within, make_subst_func(subst_dict), inames, new_iname) kernel = rule_mapping_context.finish_kernel( ijoin.map_kernel(kernel)) if tag is not None: kernel = tag_inames(kernel, {new_iname: tag}) return remove_unused_inames(kernel, inames)
MIT License
ithinksw/philo
philo/contrib/penfield/models.py
BlogView.get_tag_queryset
python
def get_tag_queryset(self, obj): return obj.entry_tags
Returns the default :class:`QuerySet` of :class:`.Tag`\ s for the :class:`BlogView`'s :meth:`get_entries_by_tag` and :meth:`tag_archive_view`.
https://github.com/ithinksw/philo/blob/8a772dd4761e3a4b926358d6ebf87c9fc7033ba5/philo/contrib/penfield/models.py#L209-L211
from datetime import date, datetime from django.conf import settings from django.conf.urls.defaults import url, patterns, include from django.db import models from django.http import Http404, HttpResponse from taggit.managers import TaggableManager from taggit.models import Tag, TaggedItem from philo.contrib.winer.models import FeedView from philo.exceptions import ViewCanNotProvideSubpath from philo.models import Entity, Page, register_value_model from philo.models.fields import TemplateField from philo.utils import paginate class Blog(Entity): title = models.CharField(max_length=255) slug = models.SlugField(max_length=255) def __unicode__(self): return self.title @property def entry_tags(self): entry_pks = list(self.entries.values_list('pk', flat=True)) kwargs = { '%s__object_id__in' % TaggedItem.tag_relname(): entry_pks } return TaggedItem.tags_for(BlogEntry).filter(**kwargs) @property def entry_dates(self): dates = {'year': self.entries.dates('date', 'year', order='DESC'), 'month': self.entries.dates('date', 'month', order='DESC'), 'day': self.entries.dates('date', 'day', order='DESC')} return dates register_value_model(Blog) class BlogEntry(Entity): title = models.CharField(max_length=255) slug = models.SlugField(max_length=255) blog = models.ForeignKey(Blog, related_name='entries', blank=True, null=True) author = models.ForeignKey(getattr(settings, 'PHILO_PERSON_MODULE', 'auth.User'), related_name='blogentries') date = models.DateTimeField(default=None) content = TemplateField() excerpt = TemplateField(blank=True, null=True) tags = TaggableManager() def save(self, *args, **kwargs): if self.date is None: self.date = datetime.now() super(BlogEntry, self).save(*args, **kwargs) def __unicode__(self): return self.title class Meta: ordering = ['-date'] verbose_name_plural = "blog entries" get_latest_by = "date" register_value_model(BlogEntry) class BlogView(FeedView): ENTRY_PERMALINK_STYLE_CHOICES = ( ('D', 'Year, month, and day'), ('M', 'Year and month'), ('Y', 'Year'), ('B', 'Custom base'), ('N', 'No base') ) blog = models.ForeignKey(Blog, related_name='blogviews') index_page = models.ForeignKey(Page, related_name='blog_index_related') entry_page = models.ForeignKey(Page, related_name='blog_entry_related') entry_archive_page = models.ForeignKey(Page, related_name='blog_entry_archive_related', null=True, blank=True) tag_page = models.ForeignKey(Page, related_name='blog_tag_related') tag_archive_page = models.ForeignKey(Page, related_name='blog_tag_archive_related', null=True, blank=True) entries_per_page = models.IntegerField(blank=True, null=True) entry_permalink_style = models.CharField(max_length=1, choices=ENTRY_PERMALINK_STYLE_CHOICES) entry_permalink_base = models.CharField(max_length=255, blank=False, default='entries') tag_permalink_base = models.CharField(max_length=255, blank=False, default='tags') item_context_var = 'entries' def __unicode__(self): return u'BlogView for %s' % self.blog.title def get_reverse_params(self, obj): if isinstance(obj, BlogEntry): if obj.blog_id == self.blog_id: kwargs = {'slug': obj.slug} if self.entry_permalink_style in 'DMY': kwargs.update({'year': str(obj.date.year).zfill(4)}) if self.entry_permalink_style in 'DM': kwargs.update({'month': str(obj.date.month).zfill(2)}) if self.entry_permalink_style == 'D': kwargs.update({'day': str(obj.date.day).zfill(2)}) return self.entry_view, [], kwargs elif isinstance(obj, Tag) or (isinstance(obj, models.query.QuerySet) and obj.model == Tag and obj): if isinstance(obj, Tag): obj = [obj] slugs = [tag.slug for tag in obj if tag in self.get_tag_queryset(self.blog)] if slugs: return 'entries_by_tag', [], {'tag_slugs': "/".join(slugs)} elif isinstance(obj, (date, datetime)): kwargs = { 'year': str(obj.year).zfill(4), 'month': str(obj.month).zfill(2), 'day': str(obj.day).zfill(2) } return 'entries_by_day', [], kwargs raise ViewCanNotProvideSubpath @property def urlpatterns(self): urlpatterns = self.feed_patterns(r'^', 'get_entries', 'index_page', 'index') + self.feed_patterns(r'^%s/(?P<tag_slugs>[-\w]+[-+/\w]*)' % self.tag_permalink_base, 'get_entries', 'tag_page', 'entries_by_tag') if self.tag_archive_page_id: urlpatterns += patterns('', url((r'^%s$' % self.tag_permalink_base), self.tag_archive_view, name='tag_archive') ) if self.entry_archive_page_id: if self.entry_permalink_style in 'DMY': urlpatterns += self.feed_patterns(r'^(?P<year>\d{4})', 'get_entries', 'entry_archive_page', 'entries_by_year') if self.entry_permalink_style in 'DM': urlpatterns += self.feed_patterns(r'^(?P<year>\d{4})/(?P<month>\d{2})', 'get_entries', 'entry_archive_page', 'entries_by_month') if self.entry_permalink_style == 'D': urlpatterns += self.feed_patterns(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})', 'get_entries', 'entry_archive_page', 'entries_by_day') if self.entry_permalink_style == 'D': urlpatterns += patterns('', url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)$', self.entry_view) ) elif self.entry_permalink_style == 'M': urlpatterns += patterns('', url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<slug>[-\w]+)$', self.entry_view) ) elif self.entry_permalink_style == 'Y': urlpatterns += patterns('', url(r'^(?P<year>\d{4})/(?P<slug>[-\w]+)$', self.entry_view) ) elif self.entry_permalink_style == 'B': urlpatterns += patterns('', url((r'^%s/(?P<slug>[-\w]+)$' % self.entry_permalink_base), self.entry_view) ) else: urlpatterns += patterns('', url(r'^(?P<slug>[-\w]+)$', self.entry_view) ) return urlpatterns def get_entry_queryset(self, obj): return obj.entries.filter(date__lte=datetime.now())
ISC License
upkoding/upkoding
projects/models.py
UserProject.requirements_to_progress
python
def requirements_to_progress(requirements): reqs_progress = sum( map(lambda r: 1 if 'complete' in r else 0, requirements)) reqs_progress_percent = (reqs_progress / len(requirements)) * 100.0 return float(format(reqs_progress_percent, '.2f'))
Returns progress in percent.
https://github.com/upkoding/upkoding/blob/f9fb1095e8e53dc156213ac590154a9759ae6899/projects/models.py#L348-L355
from datetime import timedelta from django.conf import settings from django.contrib.postgres.indexes import GinIndex from django.contrib.postgres.search import SearchVectorField from django.db import models from django.db.models.deletion import CASCADE from django.template.defaultfilters import slugify, time from django.urls import reverse from django.utils.timezone import now from sorl.thumbnail import ImageField from stream_django.activity import Activity, create_model_reference from account.models import User from codeblocks.models import CodeBlock from .managers import ProjectManager, PROJECT_SEARCH_VECTORS def project_cover_path(instance, filename): ts = int(now().timestamp()) return 'projects/cover/{}-{}'.format(ts, filename) def project_image_path(instance, filename): ts = int(now().timestamp()) return 'projects/images/{}-{}'.format(ts, filename) class Project(models.Model): STATUS_DRAFT = 0 STATUS_PENDING = 1 STATUS_ACTIVE = 2 STATUS_DELETED = 3 STATUS_ARCHIVED = 4 STATUSES = [ (STATUS_DRAFT, 'Draft'), (STATUS_PENDING, 'Pending'), (STATUS_ACTIVE, 'Active'), (STATUS_DELETED, 'Deleted'), (STATUS_ARCHIVED, 'Archived'), ] LEVEL_EASY = 1 LEVEL_MEDIUM = 2 LEVEL_HARD = 3 LEVEL_PROJECT = 99 LEVELS = [ (LEVEL_EASY, 'easy'), (LEVEL_MEDIUM, 'medium'), (LEVEL_HARD, 'hard'), (LEVEL_PROJECT, 'project'), ] POINT_EASY = 1 POINT_MEDIUM = 5 POINT_HARD = 10 user = models.ForeignKey( User, on_delete=models.SET_NULL, null=True, related_name='projects') level = models.PositiveIntegerField(choices=LEVELS, default=LEVEL_EASY) slug = models.SlugField(max_length=150, blank=True) title = models.CharField('Judul', max_length=100) description_short = models.CharField( 'Deskripsi Pendek', max_length=100, default='') description = models.TextField('Deskripsi') codeblock = models.OneToOneField( CodeBlock, on_delete=models.SET_NULL, blank=True, null=True) requirements = models.JSONField('Requirements', blank=True, null=True) cover = ImageField( upload_to=project_cover_path, blank=True, null=True ) point = models.IntegerField(default=0) tags = models.CharField('Tags', max_length=50, blank=True, default='') is_featured = models.BooleanField(default=False) is_premium = models.BooleanField(default=False) status = models.PositiveSmallIntegerField( 'Status', choices=STATUSES, default=STATUS_DRAFT) require_demo_url = models.BooleanField(default=False) require_sourcecode_url = models.BooleanField(default=False) taken_count = models.IntegerField(default=0) completed_count = models.IntegerField(default=0) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) search_vector = SearchVectorField(null=True, blank=True) objects = ProjectManager() class Meta: indexes = [ models.Index(fields=['slug'], name='project_slug_idx'), models.Index(fields=['status'], name='project_status_idx'), models.Index(fields=['level'], name='project_level_idx'), models.Index(fields=['is_premium'], name='project_is_premium_idx'), GinIndex(fields=['search_vector'], name='project_search_vector_idx'), ] ordering = ['-pk'] def __str__(self, *args, **kwargs): return self.title def get_level_color(self): colors = { self.LEVEL_PROJECT: 'dark', self.LEVEL_EASY: 'success', self.LEVEL_MEDIUM: 'warning', self.LEVEL_HARD: 'danger', } return colors.get(self.level) def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.title) if self.tags: self.tags = ','.join([ tag.strip().lower() for tag in self.tags.split(',')]) if self.level != self.LEVEL_PROJECT: if self.level == self.LEVEL_EASY: self.point = self.POINT_EASY elif self.level == self.LEVEL_MEDIUM: self.point = self.POINT_MEDIUM elif self.level == self.LEVEL_HARD: self.point = self.POINT_HARD if self.pk: self.search_vector = PROJECT_SEARCH_VECTORS super().save(*args, **kwargs) def is_active(self): return self.status == self.STATUS_ACTIVE def is_archived(self): return self.status == self.STATUS_ARCHIVED def has_codeblock(self): return self.codeblock_id is not None def get_absolute_url(self): return reverse('projects:detail', args=[self.slug, str(self.pk)]) def get_point_display(self): return '{}{}'.format(self.point, settings.POINT_UNIT) def inc_taken_count(self): self.taken_count = models.F('taken_count') + 1 self.save() def inc_completed_count(self): self.completed_count = models.F('completed_count') + 1 self.save() def dec_completed_count(self): self.completed_count = models.F('completed_count') - 1 self.save() def has_codeblock(self): return self.codeblock_id is not None def assign_to(self, user): obj, created = UserProject.objects.get_or_create( user=user, project=self, defaults={ 'requirements': self.requirements, 'point': self.point, 'require_demo_url': self.require_demo_url, 'require_sourcecode_url': self.require_sourcecode_url, }) if created: user_codeblock = None if self.codeblock: user_codeblock = self.codeblock user_codeblock.pk = None user_codeblock.save() obj.codeblock = user_codeblock obj.save() UserProjectParticipant.objects.get_or_create( user_project=obj, user=self.user) UserProjectParticipant.objects.get_or_create( user_project=obj, user=user) self.inc_taken_count() return obj, created class ProjectImage(models.Model): project = models.ForeignKey( Project, on_delete=models.CASCADE, related_name='images') title = models.CharField(max_length=250, blank=True, default='') order = models.SmallIntegerField(default=0) image = ImageField( upload_to=project_image_path, ) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) class UserProject(models.Model): STATUS_IN_PROGRESS = 0 STATUS_PENDING_REVIEW = 1 STATUS_COMPLETE = 2 STATUS_INCOMPLETE = 3 STATUSES = [ (STATUS_IN_PROGRESS, 'In Progress'), (STATUS_PENDING_REVIEW, 'Pending Review'), (STATUS_COMPLETE, 'Complete'), (STATUS_INCOMPLETE, 'Incomplete'), ] user = models.ForeignKey( User, on_delete=models.CASCADE, related_name='user_projects') project = models.ForeignKey( Project, on_delete=models.CASCADE, related_name='user_projects') codeblock = models.OneToOneField( CodeBlock, on_delete=models.SET_NULL, blank=True, null=True) requirements = models.JSONField('Requirements', blank=True, null=True) requirements_completed_percent = models.DecimalField( default=0.0, decimal_places=2, max_digits=5) requirements_completed_percent_max = models.DecimalField( default=0.0, decimal_places=2, max_digits=5) point = models.IntegerField(default=0) status = models.PositiveSmallIntegerField( 'Status', choices=STATUSES, default=STATUS_IN_PROGRESS) demo_url = models.CharField( 'URL demo proyek', max_length=250, blank=True, default='') sourcecode_url = models.CharField( 'URL kode sumber proyek', max_length=250, blank=True, default='') note = models.TextField('Catatan', blank=True, default='') require_demo_url = models.BooleanField(default=False) require_sourcecode_url = models.BooleanField(default=False) likes_count = models.IntegerField(default=0) comments_count = models.IntegerField(default=0) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) class Meta: indexes = [ models.Index(fields=['user', 'project'], name='user_project_idx'), models.Index(fields=['status'], name='user_project_status_idx'), ] constraints = [ models.UniqueConstraint( fields=['user', 'project'], name='unique_user_project') ] ordering = ['-pk'] def __str__(self): return '{} - {} ({})'.format(self.user.username, self.project.slug, self.point) def get_absolute_url(self): return reverse('projects:detail_user', args=[self.project.slug, self.project.pk, self.user.username]) def get_project_url(self): return self.project.get_absolute_url() def get_point_display(self): return '{}{}'.format(self.point, settings.POINT_UNIT) def get_color_class(self): if self.is_pending_review() or self.is_incomplete(): return 'warning' if self.is_complete(): return 'success' return 'primary' def approvable_by(self, user): if self.is_complete(): return False if user.is_staff and self.user != user: return True return False @staticmethod
MIT License
mjhoptics/ray-optics
src/rayoptics/raytr/opticalspec.py
OpticalSpecs.__setitem__
python
def __setitem__(self, key, value): self._submodels[key] = value
Provide mapping interface to submodels.
https://github.com/mjhoptics/ray-optics/blob/3b2c9ab9100dd9e0cc9c52c33655dc69286ad40e/src/rayoptics/raytr/opticalspec.py#L61-L63
import math import numpy as np from rayoptics.parax.firstorder import compute_first_order, list_parax_trace from rayoptics.raytr.trace import aim_chief_ray from rayoptics.optical import model_enums import rayoptics.optical.model_constants as mc from opticalglass.spectral_lines import get_wavelength import rayoptics.util.colour_system as cs from rayoptics.util import colors srgb = cs.cs_srgb class OpticalSpecs: do_aiming_default = True def __init__(self, opt_model, specsheet=None, **kwargs): self.opt_model = opt_model self._submodels = {} self['wvls'] = WvlSpec(**kwargs) self['pupil'] = PupilSpec(self) self['fov'] = FieldSpec(self) self['focus'] = FocusRange(0.0) self.parax_data = None self.do_aiming = OpticalSpecs.do_aiming_default if specsheet: self.set_from_specsheet(specsheet) def __getitem__(self, key): return self._submodels[key]
BSD 3-Clause New or Revised License
arizvisa/syringe
lib/ptypes/utils.py
padding.fill
python
def fill(cls, amount, source): iterable = itertools.islice(source, amount) return bytes(bytearray(iterable))
Returns a bytearray of ``amount`` elements, from the specified ``source``
https://github.com/arizvisa/syringe/blob/9f144e7623f4332c51c8fdc74a3a639d6bcdf76c/lib/ptypes/utils.py#L83-L86
import sys, math, random import functools, operator, itertools, types izip_longest = itertools.izip_longest if sys.version_info.major < 3 else itertools.zip_longest string_types = (str, unicode) if sys.version_info.major < 3 else (str,) text_types = (unicode,) if sys.version_info.major < 3 else (str,) iterbytes = functools.partial(itertools.imap, ord) if sys.version_info.major < 3 else iter def strdup(string, terminator='\0'): count = len(list(itertools.takewhile(lambda item: item not in terminator, string))) return string[:count] def indent(string, tabsize=4, char=' ', newline='\n'): indent = char * tabsize strings = [(indent + item) for item in string.split(newline)] return newline.join(strings) class assign(object): magical = { 'source' : '__source__', } def __init__(self, *objects, **attributes): self.objects = objects self.attributes = { self.magical.get(attribute, attribute) : value for attribute, value in attributes.items() } def __enter__(self): objects, attributes = self.objects, self.attributes self.states = tuple({attribute : getattr(item, attribute) for attribute in attributes.keys()} for item in objects) [item.__update__(attributes) for item in objects] return objects def __exit__(self, exc_type, exc_value, traceback): [item.__update__(attributes) for item, attributes in zip(self.objects, self.states)] return class padding: class source: def __bytesdecorator__(method): def closure(*args, **kwargs): iterable = method(*args, **kwargs) return (bytes(bytearray([item])) for item in iterable) return closure if sys.version_info.major < 3 else method @classmethod @__bytesdecorator__ def repeat(cls, value): iterable = iterbytes(value) return itertools.cycle(iterable) @classmethod @__bytesdecorator__ def iterable(cls, iterable): return iterbytes(iterable) @classmethod def file(cls, file): return itertools.starmap(file.read, itertools.repeat([1])) @classmethod @__bytesdecorator__ def prng(cls, seed=None): random.seed(seed) return itertools.starmap(random.randint, itertools.repeat([0, 0xff])) @classmethod @__bytesdecorator__ def zero(cls): return iterbytes(cls.repeat(b'\x00')) @classmethod
BSD 2-Clause Simplified License
pavlov99/json-rpc
jsonrpc/backend/flask.py
JSONRPCAPI.jsonrpc_map
python
def jsonrpc_map(self): result = "<h1>JSON-RPC map</h1><pre>{0}</pre>".format("\n\n".join([ "{0}: {1}".format(fname, f.__doc__) for fname, f in self.dispatcher.items() ])) return Response(result)
Map of json-rpc available calls. :return str:
https://github.com/pavlov99/json-rpc/blob/00b24a9e811d9ca89ec3b1570195c881687bbef4/jsonrpc/backend/flask.py#L63-L73
from __future__ import absolute_import import copy import json import logging import time from uuid import uuid4 from flask import Blueprint, request, Response from ..exceptions import JSONRPCInvalidRequestException from ..jsonrpc import JSONRPCRequest from ..manager import JSONRPCResponseManager from ..utils import DatetimeDecimalEncoder from ..dispatcher import Dispatcher logger = logging.getLogger(__name__) class JSONRPCAPI(object): def __init__(self, dispatcher=None, check_content_type=True): self.dispatcher = dispatcher if dispatcher is not None else Dispatcher() self.check_content_type = check_content_type def as_blueprint(self, name=None): blueprint = Blueprint(name if name else str(uuid4()), __name__) blueprint.add_url_rule( '/', view_func=self.jsonrpc, methods=['POST']) blueprint.add_url_rule( '/map', view_func=self.jsonrpc_map, methods=['GET']) return blueprint def as_view(self): return self.jsonrpc def jsonrpc(self): request_str = self._get_request_str() try: jsonrpc_request = JSONRPCRequest.from_json(request_str) except (TypeError, ValueError, JSONRPCInvalidRequestException): response = JSONRPCResponseManager.handle( request_str, self.dispatcher) else: response = JSONRPCResponseManager.handle_request( jsonrpc_request, self.dispatcher) if response: response.serialize = self._serialize response = response.json return Response(response, content_type="application/json")
MIT License
crdoconnor/commandlib
commandlib/command.py
Command.piped
python
def piped(self): return PipedCommand(self)
Return PipedCommand object. This is what you use if you want to do limited piped interactions with the command: * Pipe a file handle in or out of the command. * Pipe in a string. * Run and get the output of the command.
https://github.com/crdoconnor/commandlib/blob/dee9cf7bbef0f27f9ec3e8e919f78773e5e48339/commandlib/command.py#L190-L201
from commandlib.exceptions import CommandError from commandlib.utils import _check_directory from commandlib.piped import PipedCommand from os import chdir, getcwd import subprocess import copy import sys import os def _type_check_command(command): if type(command) != Command: raise CommandError("Command must be of type commandlib.Command") def run(command): _type_check_command(command) command.run() class DirectoryContextManager(object): def __init__(self, directory): self.directory = directory def __enter__(self): self._previous_directory = getcwd() if self.directory is not None: chdir(self.directory) def __exit__(self, type, value, traceback): if self.directory is not None: chdir(self._previous_directory) class Command(object): def __init__(self, *args): self._arguments = [str(arg) for arg in args] self._directory = None self._env = {} self._env_drop = [] self._shell = None self._paths = [] self._trailing_args = [] self._ignore_errors = False @property def arguments(self): return self._arguments + self._trailing_args @property def env(self): env_vars = os.environ.copy() env_vars.update(self._env) new_path = ":".join( self._paths + [env_vars["PATH"]] if "PATH" in env_vars else [] + self._paths ) env_vars["PATH"] = new_path for env_var in self._env_drop: if env_var in env_vars: del env_vars[env_var] return env_vars def ignore_errors(self): new_command = copy.deepcopy(self) new_command._ignore_errors = True return new_command @property def directory(self): return self._directory def __call__(self, *arguments): arguments = [str(arg) for arg in arguments] new_command = copy.deepcopy(self) new_command._arguments.extend(arguments) return new_command def with_env(self, **environment_variables): new_env_vars = { str(var): str(val) for var, val in environment_variables.items() } new_command = copy.deepcopy(self) new_command._env.update(new_env_vars) return new_command def without_env(self, environment_variable): new_command = copy.deepcopy(self) new_command._env_drop.append(str(environment_variable)) return new_command def in_dir(self, directory): new_command = copy.deepcopy(self) new_command._directory = str(directory) return new_command def with_shell(self): new_command = copy.deepcopy(self) new_command._shell = True return new_command def with_trailing_args(self, *arguments): new_command = copy.deepcopy(self) new_command._trailing_args = [str(arg) for arg in arguments] return new_command def with_path(self, path): new_command = copy.deepcopy(self) new_command._paths.append(str(path)) return new_command def interact(self): import icommandlib return icommandlib.ICommand(self) @property
MIT License
stanfordvl/taskonomy
code/lib/models/resnet.py
_get_variable
python
def _get_variable(name, shape, initializer, weight_decay=0.0, dtype='float', trainable=True): if weight_decay > 0: regularizer = tf.contrib.layers.l2_regularizer(weight_decay) else: regularizer = None collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES] with tf.variable_scope(tf.get_variable_scope(), reuse=False): return tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype, regularizer=regularizer, collections=collections, trainable=trainable)
A little wrapper around tf.get_variable to do weight decay and add to
https://github.com/stanfordvl/taskonomy/blob/d486b5ecb7718531669a35d4fe3022a19c2bb377/code/lib/models/resnet.py#L233-L253
import skimage.io import skimage.transform import tensorflow as tf from tensorflow.python.ops import control_flow_ops from tensorflow.python.training import moving_averages from models.resnet_config import Config import datetime import numpy as np import os import time MOVING_AVERAGE_DECAY = 0.9997 BN_DECAY = MOVING_AVERAGE_DECAY BN_EPSILON = 0.001 CONV_WEIGHT_DECAY = 0.00004 CONV_WEIGHT_STDDEV = 0.1 FC_WEIGHT_DECAY = 0.00004 FC_WEIGHT_STDDEV = 0.01 RESNET_VARIABLES = 'resnet_variables' UPDATE_OPS_COLLECTION = 'resnet_update_ops' IMAGENET_MEAN_BGR = [103.062623801, 115.902882574, 123.151630838, ] tf.app.flags.DEFINE_integer('input_size', 224, "input image size") activation = tf.nn.relu def inference(x, is_training, num_classes=1000, num_blocks=[3, 8, 36, 3], use_bias=False, bottleneck=True): c = Config() c['bottleneck'] = bottleneck c['is_training'] = tf.convert_to_tensor(is_training, dtype='bool', name='is_training') c['ksize'] = 3 c['stride'] = 1 c['use_bias'] = use_bias c['fc_units_out'] = num_classes c['num_blocks'] = num_blocks c['stack_stride'] = 2 with tf.variable_scope('scale1'): c['conv_filters_out'] = 64 c['ksize'] = 7 c['stride'] = 2 x = conv(x, c) x = bn(x, c) x = activation(x) with tf.variable_scope('scale2'): x = _max_pool(x, ksize=3, stride=2) c['num_blocks'] = num_blocks[0] c['stack_stride'] = 1 c['block_filters_internal'] = 64 x = stack(x, c) with tf.variable_scope('scale3'): c['num_blocks'] = num_blocks[1] c['block_filters_internal'] = 128 assert c['stack_stride'] == 2 x = stack(x, c) with tf.variable_scope('scale4'): c['num_blocks'] = num_blocks[2] c['block_filters_internal'] = 256 x = stack(x, c) with tf.variable_scope('scale5'): c['num_blocks'] = num_blocks[3] c['block_filters_internal'] = 512 x = stack(x, c) x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool") if num_classes != None: with tf.variable_scope('fc'): x = fc(x, c) return x def _imagenet_preprocess(rgb): red, green, blue = tf.split(3, 3, rgb * 255.0) bgr = tf.concat(3, [blue, green, red]) bgr -= IMAGENET_MEAN_BGR return bgr def loss(logits, labels): cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels) cross_entropy_mean = tf.reduce_mean(cross_entropy) regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_ = tf.add_n([cross_entropy_mean] + regularization_losses) tf.scalar_summary('loss', loss_) return loss_ def stack(x, c): for n in range(c['num_blocks']): s = c['stack_stride'] if n == 0 else 1 c['block_stride'] = s with tf.variable_scope('block%d' % (n + 1)): x = block(x, c) return x def block(x, c): filters_in = x.get_shape()[-1] m = 4 if c['bottleneck'] else 1 filters_out = m * c['block_filters_internal'] shortcut = x c['conv_filters_out'] = c['block_filters_internal'] if c['bottleneck']: with tf.variable_scope('a'): c['ksize'] = 1 c['stride'] = c['block_stride'] x = conv(x, c) x = bn(x, c) x = activation(x) with tf.variable_scope('b'): x = conv(x, c) x = bn(x, c) x = activation(x) with tf.variable_scope('c'): c['conv_filters_out'] = filters_out c['ksize'] = 1 assert c['stride'] == 1 x = conv(x, c) x = bn(x, c) else: with tf.variable_scope('A'): c['stride'] = c['block_stride'] assert c['ksize'] == 3 x = conv(x, c) x = bn(x, c) x = activation(x) with tf.variable_scope('B'): c['conv_filters_out'] = filters_out assert c['ksize'] == 3 assert c['stride'] == 1 x = conv(x, c) x = bn(x, c) with tf.variable_scope('shortcut'): if filters_out != filters_in or c['block_stride'] != 1: c['ksize'] = 1 c['stride'] = c['block_stride'] c['conv_filters_out'] = filters_out shortcut = conv(shortcut, c) shortcut = bn(shortcut, c) return activation(x + shortcut) def bn(x, c): x_shape = x.get_shape() params_shape = x_shape[-1:] if c['use_bias']: bias = _get_variable('bias', params_shape, initializer=tf.zeros_initializer) return x + bias axis = list(range(len(x_shape) - 1)) beta = tf.get_variable('beta', params_shape, initializer=tf.zeros_initializer) gamma = tf.get_variable('gamma', params_shape, initializer=tf.ones_initializer) moving_mean = tf.get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer, trainable=False) moving_variance = tf.get_variable('moving_variance', params_shape, initializer=tf.ones_initializer, trainable=False) mean, variance = tf.nn.moments(x, axis) mean, variance = (moving_mean, moving_variance) x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON) return x def fc(x, c): num_units_in = x.get_shape()[1] num_units_out = c['fc_units_out'] weights_initializer = tf.truncated_normal_initializer( stddev=FC_WEIGHT_STDDEV) weights = _get_variable('weights', shape=[num_units_in, num_units_out], initializer=weights_initializer, weight_decay=FC_WEIGHT_STDDEV) biases = _get_variable('biases', shape=[num_units_out], initializer=tf.zeros_initializer) x = tf.nn.xw_plus_b(x, weights, biases) return x
MIT License
supercodepoet/django-merlin
src/merlin/wizards/session.py
SessionWizard.process_show_form
python
def process_show_form(self, request, step, form): pass
Hook used for providing extra context that can be used in the template used to render the current form. :param request: A ``HttpRequest`` object that carries along with it the session used to access the wizard state. :param step: The current :class:`Step` that is being processed. :param form: The Django ``Form`` object that is being processed.
https://github.com/supercodepoet/django-merlin/blob/7129d58638c78dcc56a0e2f2952220b201c3afd5/src/merlin/wizards/session.py#L393-L408
from functools import wraps from django.http import * from django.shortcuts import render_to_response from django.template.context import RequestContext from merlin.wizards import MissingStepException, MissingSlugException from merlin.wizards.utils import * def modifies_session(func): @wraps(func) def wrapper(self, request, *args, **kwargs): result = func(self, request, *args, **kwargs) request.session.modified = True return result return wrapper class SessionWizard(object): def __init__(self, steps): if not isinstance(steps, list): raise TypeError('steps must be an instance of or subclass of list') if [step for step in steps if not isinstance(step, Step)]: raise TypeError('All steps must be an instance of Step') slugs = set([step.slug for step in steps]) if len(slugs) != len(steps): raise ValueError('Step slugs must be unique.') clazz = self.__class__ self.id = '%s.%s' % (clazz.__module__, clazz.__name__,) self.base_steps = steps def __call__(self, request, *args, **kwargs): self._init_wizard(request) slug = kwargs.get('slug', None) if not slug: raise MissingSlugException("Slug not found.") step = self.get_step(request, slug) if not step: if slug == 'cancel': self.cancel(request) redirect = request.REQUEST.get('rd', '/') return HttpResponseRedirect(redirect) raise MissingStepException("Step for slug %s not found." % slug) method_name = 'process_%s' % request.method method = getattr(self, method_name) return method(request, step) def _init_wizard(self, request): if self.id not in request.session: request.session[self.id] = WizardState( steps=self.base_steps[:], current_step=self.base_steps[0], form_data={}) self.initialize(request, request.session[self.id]) def _get_state(self, request): return request.session[self.id] def _show_form(self, request, step, form): context = self.process_show_form(request, step, form) return self.render_form(request, step, form, { 'current_step': step, 'form': form, 'previous_step': self.get_before(request, step), 'next_step': self.get_after(request, step), 'url_base': self._get_URL_base(request, step), 'extra_context': context }) def _set_current_step(self, request, step): self._get_state(request).current_step = step return step def _get_URL_base(self, request, step): index = request.path.rfind(step.slug) return request.path[:index] def process_GET(self, request, step): form_data = self.get_cleaned_data(request, step) if form_data: form = step.form(form_data) else: form = step.form() return self._show_form(request, step, form) def process_POST(self, request, step): form = step.form(request.POST) if not form.is_valid(): return self._show_form(request, step, form) self.set_cleaned_data(request, step, form.cleaned_data) self.process_step(request, step, form) next_step = self.get_after(request, step) if next_step: url_base = self._get_URL_base(request, step) return HttpResponseRedirect(urljoin(url_base, next_step.slug)) else: return self.done(request) def get_steps(self, request): return self._get_state(request).steps def get_step(self, request, slug): steps = self.get_steps(request) try: return [step for step in steps if step.slug == slug][0] except IndexError: return None def get_before(self, request, step): steps = self.get_steps(request) index = steps.index(step) if index > 0: return steps[index - 1] else: return None def get_after(self, request, step): steps = self.get_steps(request) index = steps.index(step) try: return steps[index + 1] except IndexError: return None @modifies_session def remove_step(self, request, step): steps = self.get_steps(request) if step in steps: steps.remove(step) @modifies_session def insert_before(self, request, current_step, step): steps = self.get_steps(request) if step not in steps: index = steps.index(current_step) steps.insert(index, step) @modifies_session def insert_after(self, request, current_step, step): steps = self.get_steps(request) if step not in steps: index = steps.index(current_step) + 1 steps.insert(index, step) def get_cleaned_data(self, request, step): return self._get_state(request).form_data.get(step.slug, None) @modifies_session def set_cleaned_data(self, request, step, data): self._get_state(request).form_data[step.slug] = data def get_form_data(self, request): return request.session[self.id]['form_data'] def clear(self, request): del request.session[self.id] def initialize(self, request, wizard_state): pass def cancel(self, request): self.clear(request)
BSD 3-Clause New or Revised License
lenskit/lkpy
lenskit/metrics/topn.py
precision
python
def precision(recs, truth, k=None): if k is not None: recs = recs.iloc[:k] nrecs = len(recs) if nrecs == 0: return None ngood = recs['item'].isin(truth.index).sum() return ngood / nrecs
Compute recommendation precision. This is computed as: .. math:: \\frac{|L \\cap I_u^{\\mathrm{test}}|}{|L|} In the uncommon case that ``k`` is specified and ``len(recs) < k``, this metric uses ``len(recs)`` as the denominator.
https://github.com/lenskit/lkpy/blob/91060fce6ec765a8016c74846292326c3e08fa1c/lenskit/metrics/topn.py#L20-L38
import logging import numpy as np import pandas as pd _log = logging.getLogger(__name__) def bulk_impl(metric): def wrap(impl): metric.bulk_score = impl return impl return wrap
MIT License
faab5/errortools
errortools/errortools.py
expand
python
def expand(point, idx, rnge): x = np.repeat(point, len(rnge)).reshape(len(point), len(rnge)).T x[:, idx] = rnge return x
Expand a numpy array and replace the values at a specified index by a range of values :param point: the data point to be expanded :param idx: the index of the column to be replace by the values in rnge :param rnge: the values to replace in the data point
https://github.com/faab5/errortools/blob/b3d8972d884207aa8ab89dac98c23fd5f46a0c65/errortools/errortools.py#L186-L196
import numpy as np import iminuit from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt import scipy def estimate_errors_sampling(fnc, X, p, cvr_mtx, n_samples='auto', return_covariance=False, *args, **kwargs): if n_samples == 'auto': n_pars = p.shape[0] n_samples = 1000 if n_pars < 10 else 10000 if n_pars < 100 else 100000 if n_pars < 1000 else 100*n_pars sampled_parameters = np.random.multivariate_normal(p, cvr_mtx, n_samples).T sampled_function = np.array([fnc(X, q, *args, **kwargs) for q in sampled_parameters]).T reference_function = np.tile(fnc(X, p, *args, **kwargs), (n_samples, 1)).T function_variation = sampled_function - reference_function if return_covariance == True: covar = np.dot(function_variation, function_variation.T) / n_samples return covar else: error = np.sqrt(np.mean(np.square(function_variation), axis=1)) return error def estimate_errors_linear(grad, cvr_mtx, return_covariance=False): grad = np.atleast_2d(grad) cvr_mtx = np.atleast_2d(cvr_mtx) if cvr_mtx.ndim > 2: raise NotImplementedError("Multidimensional cases not implemented") if cvr_mtx.shape[0] != cvr_mtx.shape[1]: raise ValueError("Covariance matrix not square") if cvr_mtx.shape[0] != grad.shape[1]: raise ValueError("Shape mismatch between gradients and covariance matrix") if return_covariance == True: covar = np.dot(grad, np.dot(cvr_mtx, grad.T)) return covar else: error = np.sqrt(np.abs([np.dot(g, np.dot(cvr_mtx, g)) for g in grad])) return error def report_loss_versus_approximation(model, X, y, l1, l2, features, pdf=None, pdf_name = "report.pdf"): X_bias = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1) f0 = model.negativeLogPosterior(model.parameters, X_bias, y, l1, l2) if pdf == None: pdf = PdfPages(pdf_name) for p in range(0, model.parameters.shape[0]): fig, ax = plt.subplots(1, 1, figsize=(8,4)) param_minimum = model.minuit.get_param_states()[p]['value'] weights = np.linspace(param_minimum - 1, param_minimum + 1, 100) params = model.parameters.copy() loss = [] approx = [] for w in weights: params[p] = w loss.append(model.negativeLogPosterior(params, X_bias, y, l1, l2)) parabolic_approx = params - model.parameters approx.append(f0 + 0.5 * np.array([np.dot(parabolic_approx, np.dot(scipy.linalg.inv(model.cvr_mtx), parabolic_approx))])) col_ind = p % 2 row_ind = p // 2 ax.plot(weights, loss, '--', color='red', alpha=0.5, label="original") ax.plot(weights, approx, '-', color='orange', alpha=0.5, label="parabolic approximation") ax.set_xlabel(features[p]) ax.set_title("logloss") ax.grid() ax.legend() pdf.savefig(fig) return pdf def report_parameter_error(model, features, pdf=None, pdf_name = "report.pdf"): fig, ax = plt.subplots(1, 1, figsize=(8,4)) ax.errorbar(x=np.arange(model.parameters.shape[0]), y=model.parameters, yerr=np.sqrt(np.diag(model.cvr_mtx)), fmt='o', color='red', alpha=0.6, markersize=10, barsabove=True, capsize=10, label='fitted parameter value') ax.grid() ax.xaxis.set_ticks(np.arange(model.parameters.shape[0])) ax.xaxis.set_ticklabels(features + ['bias']) ax.set_xlabel("Parameters") ax.set_ylabel("Fitted parameter value") if pdf == None: pdf = PdfPages(pdf_name) pdf.savefig(fig) return pdf def report_correlation_matrix(model, features, pdf=None, pdf_name = "report.pdf"): fig, ax = plt.subplots(1, 1, figsize=(8,4)) ax.axis('off') corr_matrix = model.minuit.np_matrix(correlation=True) ax.table(cellText=corr_matrix, rowLabels=features, colLabels=features, loc='center') ax.set_title("Correlation matrix") if pdf == None: pdf = PdfPages(pdf_name) pdf.savefig(fig) return pdf
MIT License
acutronicrobotics/gym-gazebo2
gym_gazebo2/envs/MARA/mara_collision_orient.py
MARACollisionOrientEnv.step
python
def step(self, action): self.iterator+=1 self._pub.publish(ut_mara.getTrajectoryMessage( action[:self.numJoints], self.environment['jointOrder'], self.velocity)) self.ros_clock = rclpy.clock.Clock().now().nanoseconds obs = self.take_observation() rewardDist = ut_math.rmseFunc(obs[self.numJoints:(self.numJoints+3)]) rewardOrientation = 2 * np.arccos(abs(obs[self.numJoints+3])) collided = self.collision() reward = ut_math.computeReward(rewardDist, rewardOrientation, collision = collided) done = bool(self.iterator == self.max_episode_steps) self.buffer_dist_rewards.append(rewardDist) self.buffer_tot_rewards.append(reward) info = {} if self.iterator % self.max_episode_steps == 0: max_dist_tgt = max(self.buffer_dist_rewards) mean_dist_tgt = np.mean(self.buffer_dist_rewards) min_dist_tgt = min(self.buffer_dist_rewards) max_tot_rew = max(self.buffer_tot_rewards) mean_tot_rew = np.mean(self.buffer_tot_rewards) min_tot_rew = min(self.buffer_tot_rewards) num_coll = self.collided info = {"infos":{"ep_dist_max": max_dist_tgt,"ep_dist_mean": mean_dist_tgt,"ep_dist_min": min_dist_tgt, "ep_rew_max": max_tot_rew,"ep_rew_mean": mean_tot_rew,"ep_rew_min": min_tot_rew,"num_coll": num_coll}} self.buffer_dist_rewards = [] self.buffer_tot_rewards = [] self.collided = 0 return obs, reward, done, info
Implement the environment step abstraction. Execute action and returns: - action - observation - reward - done (status)
https://github.com/acutronicrobotics/gym-gazebo2/blob/545e203e07895927fd7aae66596d83d97ee89fe5/gym_gazebo2/envs/MARA/mara_collision_orient.py#L290-L339
import gym gym.logger.set_level(40) import time import numpy as np import copy import os import psutil import signal import sys import math import transforms3d as tf3d from gym import utils, spaces from gym_gazebo2.utils import ut_generic, ut_launch, ut_mara, ut_math, ut_gazebo, tree_urdf, general_utils from gym.utils import seeding from gazebo_msgs.srv import SpawnEntity from multiprocessing import Process import argparse import rclpy from rclpy.qos import QoSProfile, qos_profile_sensor_data from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint from control_msgs.msg import JointTrajectoryControllerState from gazebo_msgs.msg import ContactState from std_msgs.msg import String from std_srvs.srv import Empty from geometry_msgs.msg import Pose from ros2pkg.api import get_prefix_path from builtin_interfaces.msg import Duration from PyKDL import ChainJntToJacSolver class MARACollisionOrientEnv(gym.Env): def __init__(self): args = ut_generic.getArgsParserMARA().parse_args() self.gzclient = args.gzclient self.realSpeed = args.realSpeed self.velocity = args.velocity self.multiInstance = args.multiInstance self.port = args.port if self.realSpeed: urdf = "reinforcement_learning/mara_robot_run.urdf" urdfPath = get_prefix_path("mara_description") + "/share/mara_description/urdf/" + urdf else: urdf = "reinforcement_learning/mara_robot_train.urdf" urdfPath = get_prefix_path("mara_description") + "/share/mara_description/urdf/" + urdf self.launch_subp = ut_launch.startLaunchServiceProcess( ut_launch.generateLaunchDescriptionMara( self.gzclient, self.realSpeed, self.multiInstance, self.port, urdfPath)) rclpy.init(args=None) self.node = rclpy.create_node(self.__class__.__name__) self._observation_msg = None self.max_episode_steps = 1024 self.iterator = 0 self.reset_jnts = True self._collision_msg = None self.targetPosition = np.asarray([-0.40028, 0.095615, 0.72466]) self.target_orientation = np.asarray([0., 0.7071068, 0.7071068, 0.]) EE_POINTS = np.asmatrix([[0, 0, 0]]) EE_VELOCITIES = np.asmatrix([[0, 0, 0]]) INITIAL_JOINTS = np.array([0., 0., 0., 0., 0., 0.]) JOINT_PUBLISHER = '/mara_controller/command' JOINT_SUBSCRIBER = '/mara_controller/state' MOTOR1_JOINT = 'motor1' MOTOR2_JOINT = 'motor2' MOTOR3_JOINT = 'motor3' MOTOR4_JOINT = 'motor4' MOTOR5_JOINT = 'motor5' MOTOR6_JOINT = 'motor6' EE_LINK = 'ee_link' WORLD = 'world' BASE = 'base_robot' MARA_MOTOR1_LINK = 'motor1_link' MARA_MOTOR2_LINK = 'motor2_link' MARA_MOTOR3_LINK = 'motor3_link' MARA_MOTOR4_LINK = 'motor4_link' MARA_MOTOR5_LINK = 'motor5_link' MARA_MOTOR6_LINK = 'motor6_link' EE_LINK = 'ee_link' JOINT_ORDER = [MOTOR1_JOINT,MOTOR2_JOINT, MOTOR3_JOINT, MOTOR4_JOINT, MOTOR5_JOINT, MOTOR6_JOINT] LINK_NAMES = [ WORLD, BASE, MARA_MOTOR1_LINK, MARA_MOTOR2_LINK, MARA_MOTOR3_LINK, MARA_MOTOR4_LINK, MARA_MOTOR5_LINK, MARA_MOTOR6_LINK, EE_LINK] reset_condition = { 'initial_positions': INITIAL_JOINTS, 'initial_velocities': [] } m_jointOrder = copy.deepcopy(JOINT_ORDER) m_linkNames = copy.deepcopy(LINK_NAMES) self.environment = { 'jointOrder': m_jointOrder, 'linkNames': m_linkNames, 'reset_conditions': reset_condition, 'tree_path': urdfPath, 'end_effector_points': EE_POINTS, } self._pub = self.node.create_publisher(JointTrajectory, JOINT_PUBLISHER, qos_profile=qos_profile_sensor_data) self._sub = self.node.create_subscription(JointTrajectoryControllerState, JOINT_SUBSCRIBER, self.observation_callback, qos_profile=qos_profile_sensor_data) self._sub_coll = self.node.create_subscription(ContactState, '/gazebo_contacts', self.collision_callback, qos_profile=qos_profile_sensor_data) self.reset_sim = self.node.create_client(Empty, '/reset_simulation') _, self.ur_tree = tree_urdf.treeFromFile(self.environment['tree_path']) self.mara_chain = self.ur_tree.getChain(self.environment['linkNames'][0], self.environment['linkNames'][-1]) self.numJoints = self.mara_chain.getNrOfJoints() self.jacSolver = ChainJntToJacSolver(self.mara_chain) self.obs_dim = self.numJoints + 10 low = -np.pi * np.ones(self.numJoints) high = np.pi * np.ones(self.numJoints) self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) spawn_cli = self.node.create_client(SpawnEntity, '/spawn_entity') while not spawn_cli.wait_for_service(timeout_sec=1.0): self.node.get_logger().info('/spawn_entity service not available, waiting again...') modelXml = ut_gazebo.getTargetSdf() pose = Pose() pose.position.x = self.targetPosition[0] pose.position.y = self.targetPosition[1] pose.position.z = self.targetPosition[2] pose.orientation.x = self.target_orientation[1] pose.orientation.y= self.target_orientation[2] pose.orientation.z = self.target_orientation[3] pose.orientation.w = self.target_orientation[0] self.spawn_request = SpawnEntity.Request() self.spawn_request.name = "target" self.spawn_request.xml = modelXml self.spawn_request.robot_namespace = "" self.spawn_request.initial_pose = pose self.spawn_request.reference_frame = "world" target_future = spawn_cli.call_async(self.spawn_request) rclpy.spin_until_future_complete(self.node, target_future) self.seed() self.buffer_dist_rewards = [] self.buffer_tot_rewards = [] self.collided = 0 def observation_callback(self, message): self._observation_msg = message def collision_callback(self, message): collision_messages = ["mara::base_robot::base_robot_collision", "ground_plane::link::collision"] if message.collision1_name != message.collision2_name: if not ((message.collision1_name in collision_messages) and (message.collision2_name in collision_messages)): self._collision_msg = message def set_episode_size(self, episode_size): self.max_episode_steps = episode_size def take_observation(self): rclpy.spin_once(self.node) obs_message = self._observation_msg while obs_message is None or int(str(obs_message.header.stamp.sec)+(str(obs_message.header.stamp.nanosec))) < self.ros_clock: rclpy.spin_once(self.node) obs_message = self._observation_msg lastObservations = ut_mara.processObservations(obs_message, self.environment) self._observation_msg = None ee_link_jacobians = ut_mara.getJacobians(lastObservations, self.numJoints, self.jacSolver) if self.environment['linkNames'][-1] is None: print("End link is empty!!") return None else: translation, rot = general_utils.forwardKinematics(self.mara_chain, self.environment['linkNames'], lastObservations[:self.numJoints], baseLink=self.environment['linkNames'][0], endLink=self.environment['linkNames'][-1]) current_quaternion = tf3d.quaternions.mat2quat(rot) quat_error = tf3d.quaternions.qmult(current_quaternion, tf3d.quaternions.qconjugate(self.target_orientation)) current_eePos_tgt = np.ndarray.flatten(general_utils.getEePoints(self.environment['end_effector_points'], translation, rot).T) eePos_points = current_eePos_tgt - self.targetPosition eeVelocities = ut_mara.getEePointsVelocities(ee_link_jacobians, self.environment['end_effector_points'], rot, lastObservations) state = np.r_[np.reshape(lastObservations, -1), np.reshape(eePos_points, -1), np.reshape(quat_error, -1), np.reshape(eeVelocities, -1),] return state def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def collision(self): if self._collision_msg is not None: while not self.reset_sim.wait_for_service(timeout_sec=1.0): self.node.get_logger().info('/reset_simulation service not available, waiting again...') reset_future = self.reset_sim.call_async(Empty.Request()) rclpy.spin_until_future_complete(self.node, reset_future) self._collision_msg = None self.collided += 1 return True else: return False
Apache License 2.0
apache/bloodhound
bloodhound_dashboard/bhdashboard/tests/__init__.py
EnvironmentStub.disable_component
python
def disable_component(self, clsdef): if trac_version < trac_tags[0]: try: self.enabled_components.remove(clsdef) except ValueError: self.log.warning("Component %s was not enabled", clsdef) else: raise NotImplementedError("TODO: Disable components in Trac>=0.13")
r"""Disable a plugin temporarily at testing time.
https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_dashboard/bhdashboard/tests/__init__.py#L56-L66
__metaclass__ = type import sys import trac.test from trac.core import ComponentMeta from trac.db.api import _parse_db_str, DatabaseManager from trac.mimeview.api import Context from trac.util.compat import set from bhdashboard.util import trac_version, trac_tags class EnvironmentStub(trac.test.EnvironmentStub): @property def _abs_href(self): return self.abs_href def enable_component(self, clsdef): if trac_version < trac_tags[0]: if clsdef not in self.enabled_components: self.enabled_components.append(clsdef) else: raise NotImplementedError("TODO: Enable components in Trac>=0.13")
Apache License 2.0
asweigart/pygetwindow
src/pygetwindow/_pygetwindow_macos.py
MacOSWindow.title
python
def title(self): raise NotImplementedError
Returns the window title as a string.
https://github.com/asweigart/pygetwindow/blob/c5f3070324609e682d082ed53122a36002a3e293/src/pygetwindow/_pygetwindow_macos.py#L166-L168
import Quartz import pygetwindow def getAllTitles(): windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) return ['%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) for win in windows] def getActiveWindow(): windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) for win in windows: if win['kCGWindowLayer'] == 0: return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) raise Exception('Could not find an active window.') def getWindowsAt(x, y): windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) matches = [] for win in windows: w = win['kCGWindowBounds'] if pygetwindow.pointInRect(x, y, w['X'], w['Y'], w['Width'], w['Height']): matches.append('%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, ''))) return matches def activate(): pass def getWindowGeometry(title): windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) for win in windows: if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')): w = win['kCGWindowBounds'] return (w['X'], w['Y'], w['Width'], w['Height']) def isVisible(title): windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) for win in windows: if title in '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')): return win['kCGWindowAlpha'] != 0.0 def isMinimized(): pass class MacOSWindow(): def __init__(self, hWnd): self._hWnd = hWnd def _onRead(attrName): r = self._getWindowRect(_hWnd) self._rect._left = r.left self._rect._top = r.top self._rect._width = r.right - r.left self._rect._height = r.bottom - r.top def _onChange(oldBox, newBox): self.moveTo(newBox.left, newBox.top) self.resizeTo(newBox.width, newBox.height) r = self._getWindowRect(_hWnd) self._rect = pyrect.Rect(r.left, r.top, r.right - r.left, r.bottom - r.top, onChange=_onChange, onRead=_onRead) def __str__(self): r = self._getWindowRect(_hWnd) width = r.right - r.left height = r.bottom - r.top return '<%s left="%s", top="%s", width="%s", height="%s", title="%s">' % (self.__class__.__name__, r.left, r.top, width, height, self.title) def __repr__(self): return '%s(hWnd=%s)' % (self.__class__.__name__, self._hWnd) def __eq__(self, other): return isinstance(other, MacOSWindow) and self._hWnd == other._hWnd def close(self): raise NotImplementedError def minimize(self): raise NotImplementedError def maximize(self): raise NotImplementedError def restore(self): raise NotImplementedError def activate(self): raise NotImplementedError def resizeRel(self, widthOffset, heightOffset): raise NotImplementedError def resizeTo(self, newWidth, newHeight): raise NotImplementedError def moveRel(self, xOffset, yOffset): raise NotImplementedError def moveTo(self, newLeft, newTop): raise NotImplementedError @property def isMinimized(self): raise NotImplementedError @property def isMaximized(self): raise NotImplementedError @property def isActive(self): raise NotImplementedError @property
BSD 3-Clause New or Revised License
google/apitools
apitools/base/py/util.py
AcceptableMimeType
python
def AcceptableMimeType(accept_patterns, mime_type): if '/' not in mime_type: raise exceptions.InvalidUserInputError( 'Invalid MIME type: "%s"' % mime_type) unsupported_patterns = [p for p in accept_patterns if ';' in p] if unsupported_patterns: raise exceptions.GeneratedClientError( 'MIME patterns with parameter unsupported: "%s"' % ', '.join( unsupported_patterns)) def MimeTypeMatches(pattern, mime_type): if pattern == '*': pattern = '*/*' return all(accept in ('*', provided) for accept, provided in zip(pattern.split('/'), mime_type.split('/'))) return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns.
https://github.com/google/apitools/blob/31cad2d904f356872d2965687e84b2d87ee2cdd3/apitools/base/py/util.py#L166-L199
import os import random import six from six.moves import http_client import six.moves.urllib.error as urllib_error import six.moves.urllib.parse as urllib_parse import six.moves.urllib.request as urllib_request from apitools.base.protorpclite import messages from apitools.base.py import encoding_helper as encoding from apitools.base.py import exceptions if six.PY3: from collections.abc import Iterable else: from collections import Iterable __all__ = [ 'DetectGae', 'DetectGce', ] _RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;=" def DetectGae(): server_software = os.environ.get('SERVER_SOFTWARE', '') return (server_software.startswith('Development/') or server_software.startswith('Google App Engine/')) def DetectGce(): metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google') def NormalizeScopes(scope_spec): if isinstance(scope_spec, six.string_types): scope_spec = six.ensure_str(scope_spec) return set(scope_spec.split(' ')) elif isinstance(scope_spec, Iterable): scope_spec = [six.ensure_str(x) for x in scope_spec] return set(scope_spec) raise exceptions.TypecheckError( 'NormalizeScopes expected string or iterable, found %s' % ( type(scope_spec),)) def Typecheck(arg, arg_type, msg=None): if not isinstance(arg, arg_type): if msg is None: if isinstance(arg_type, tuple): msg = 'Type of arg is "%s", not one of %r' % ( type(arg), arg_type) else: msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type) raise exceptions.TypecheckError(msg) return arg def ExpandRelativePath(method_config, params, relative_path=None): path = relative_path or method_config.relative_path or '' for param in method_config.path_params: param_template = '{%s}' % param reserved_chars = '' reserved_template = '{+%s}' % param if reserved_template in path: reserved_chars = _RESERVED_URI_CHARS path = path.replace(reserved_template, param_template) if param_template not in path: raise exceptions.InvalidUserInputError( 'Missing path parameter %s' % param) try: value = params[param] except KeyError: raise exceptions.InvalidUserInputError( 'Request missing required parameter %s' % param) if value is None: raise exceptions.InvalidUserInputError( 'Request missing required parameter %s' % param) try: if not isinstance(value, six.string_types): value = str(value) path = path.replace(param_template, urllib_parse.quote(value.encode('utf_8'), reserved_chars)) except TypeError as e: raise exceptions.InvalidUserInputError( 'Error setting required parameter %s to value %s: %s' % ( param, value, e)) return path def CalculateWaitForRetry(retry_attempt, max_wait=60): wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
Apache License 2.0
cloudtools/stacker
stacker/blueprints/base.py
Blueprint.to_json
python
def to_json(self, variables=None): variables_to_resolve = [] if variables: for key, value in variables.items(): variables_to_resolve.append(Variable(key, value)) for k in self.get_parameter_definitions(): if not variables or k not in variables: variables_to_resolve.append(Variable(k, 'unused_value')) self.resolve_variables(variables_to_resolve) return self.render_template()[1]
Render the blueprint and return the template in json form. Args: variables (dict): Optional dictionary providing/overriding variable values. Returns: str: the rendered CFN JSON template
https://github.com/cloudtools/stacker/blob/f563a6f5a23550c7a668a1500bcea2b4e94f5bbf/stacker/blueprints/base.py#L492-L515
from __future__ import print_function from __future__ import division from __future__ import absolute_import from builtins import str from past.builtins import basestring from builtins import object import copy import hashlib import logging import string from stacker.util import read_value_from_path from stacker.variables import Variable from troposphere import ( Output, Parameter, Ref, Template, ) from ..exceptions import ( MissingVariable, UnresolvedVariable, UnresolvedVariables, ValidatorError, VariableTypeRequired, InvalidUserdataPlaceholder ) from .variables.types import ( CFNType, TroposphereType, ) logger = logging.getLogger(__name__) PARAMETER_PROPERTIES = { "default": "Default", "description": "Description", "no_echo": "NoEcho", "allowed_values": "AllowedValues", "allowed_pattern": "AllowedPattern", "max_length": "MaxLength", "min_length": "MinLength", "max_value": "MaxValue", "min_value": "MinValue", "constraint_description": "ConstraintDescription" } class CFNParameter(object): def __init__(self, name, value): acceptable_types = [basestring, bool, list, int] acceptable = False for acceptable_type in acceptable_types: if isinstance(value, acceptable_type): acceptable = True if acceptable_type == bool: logger.debug("Converting parameter %s boolean '%s' " "to string.", name, value) value = str(value).lower() break if acceptable_type == int: logger.debug("Converting parameter %s integer '%s' " "to string.", name, value) value = str(value) break if not acceptable: raise ValueError( "CFNParameter (%s) value must be one of %s got: %s" % ( name, "str, int, bool, or list", value)) self.name = name self.value = value def __repr__(self): return "CFNParameter({}: {})".format(self.name, self.value) def to_parameter_value(self): return self.value @property def ref(self): return Ref(self.name) def build_parameter(name, properties): p = Parameter(name, Type=properties.get("type")) for name, attr in PARAMETER_PROPERTIES.items(): if name in properties: setattr(p, attr, properties[name]) return p def validate_variable_type(var_name, var_type, value): if isinstance(var_type, CFNType): value = CFNParameter(name=var_name, value=value) elif isinstance(var_type, TroposphereType): try: value = var_type.create(value) except Exception as exc: name = "{}.create".format(var_type.resource_name) raise ValidatorError(var_name, name, value, exc) else: if not isinstance(value, var_type): raise ValueError( "Value for variable %s must be of type %s. Actual " "type: %s." % (var_name, var_type, type(value)) ) return value def validate_allowed_values(allowed_values, value): if not allowed_values or isinstance(value, CFNParameter): return True return value in allowed_values def resolve_variable(var_name, var_def, provided_variable, blueprint_name): try: var_type = var_def["type"] except KeyError: raise VariableTypeRequired(blueprint_name, var_name) if provided_variable: if not provided_variable.resolved: raise UnresolvedVariable(blueprint_name, provided_variable) value = provided_variable.value else: try: value = var_def["default"] except KeyError: raise MissingVariable(blueprint_name, var_name) validator = var_def.get("validator", lambda v: v) try: value = validator(value) except Exception as exc: raise ValidatorError(var_name, validator.__name__, value, exc) value = validate_variable_type(var_name, var_type, value) allowed_values = var_def.get("allowed_values") if not validate_allowed_values(allowed_values, value): message = ( "Invalid value passed to '%s' in blueprint: %s. Got: '%s', " "expected one of %s" ) % (var_name, blueprint_name, value, allowed_values) raise ValueError(message) return value def parse_user_data(variables, raw_user_data, blueprint_name): variable_values = {} for key, value in variables.items(): if type(value) is CFNParameter: variable_values[key] = value.to_parameter_value() else: variable_values[key] = value template = string.Template(raw_user_data) res = "" try: res = template.substitute(variable_values) except ValueError as exp: raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0]) except KeyError as key: raise MissingVariable(blueprint_name, key) return res class Blueprint(object): def __init__(self, name, context, mappings=None, description=None): self.name = name self.context = context self.mappings = mappings self.outputs = {} self.reset_template() self.resolved_variables = None self.description = description if hasattr(self, "PARAMETERS") or hasattr(self, "LOCAL_PARAMETERS"): raise AttributeError("DEPRECATION WARNING: Blueprint %s uses " "deprecated PARAMETERS or " "LOCAL_PARAMETERS, rather than VARIABLES. " "Please update your blueprints. See https://" "stacker.readthedocs.io/en/latest/blueprints." "html#variables for aditional information." % name) def get_parameter_definitions(self): output = {} for var_name, attrs in self.defined_variables().items(): var_type = attrs.get("type") if isinstance(var_type, CFNType): cfn_attrs = copy.deepcopy(attrs) cfn_attrs["type"] = var_type.parameter_type output[var_name] = cfn_attrs return output def get_output_definitions(self): return {k: output.to_dict() for k, output in self.template.outputs.items()} def get_required_parameter_definitions(self): required = {} for name, attrs in self.get_parameter_definitions().items(): if "Default" not in attrs: required[name] = attrs return required def get_parameter_values(self): variables = self.get_variables() output = {} for key, value in variables.items(): try: output[key] = value.to_parameter_value() except AttributeError: continue return output def setup_parameters(self): t = self.template parameters = self.get_parameter_definitions() if not parameters: logger.debug("No parameters defined.") return for name, attrs in parameters.items(): p = build_parameter(name, attrs) t.add_parameter(p) def defined_variables(self): return copy.deepcopy(getattr(self, "VARIABLES", {})) def get_variables(self): if self.resolved_variables is None: raise UnresolvedVariables(self.name) return self.resolved_variables def get_cfn_parameters(self): variables = self.get_variables() output = {} for key, value in variables.items(): if hasattr(value, "to_parameter_value"): output[key] = value.to_parameter_value() return output def resolve_variables(self, provided_variables): self.resolved_variables = {} defined_variables = self.defined_variables() variable_dict = dict((var.name, var) for var in provided_variables) for var_name, var_def in defined_variables.items(): value = resolve_variable( var_name, var_def, variable_dict.get(var_name), self.name ) self.resolved_variables[var_name] = value def import_mappings(self): if not self.mappings: return for name, mapping in self.mappings.items(): logger.debug("Adding mapping %s.", name) self.template.add_mapping(name, mapping) def reset_template(self): self.template = Template() self._rendered = None self._version = None def render_template(self): self.import_mappings() self.create_template() if self.description: self.set_template_description(self.description) self.setup_parameters() rendered = self.template.to_json(indent=self.context.template_indent) version = hashlib.md5(rendered.encode()).hexdigest()[:8] return (version, rendered)
BSD 2-Clause Simplified License
protothis/python-synology
src/synology_dsm/api/surveillance_station/camera.py
SynoCamera.name
python
def name(self): return self._data["name"]
Return name of the camera.
https://github.com/protothis/python-synology/blob/645b818be2013231ac126c6962d2f9092a5c3aae/src/synology_dsm/api/surveillance_station/camera.py#L31-L33
from .const import MOTION_DETECTION_DISABLED from .const import RECORDING_STATUS class SynoCamera: def __init__(self, data, live_view_data=None): self._data = data self.live_view = SynoCameraLiveView(live_view_data) self._motion_detection_enabled = None def update(self, data): self._data = data def update_motion_detection(self, data): self._motion_detection_enabled = ( MOTION_DETECTION_DISABLED != data["MDParam"]["source"] ) @property def id(self): return self._data["id"] @property
MIT License
ungarj/s2reader
s2reader/s2reader.py
SentinelGranule.cloudmask
python
def cloudmask(self): polys = list(self._get_mask(mask_type="MSK_CLOUDS")) return MultiPolygon([ poly["geometry"] for poly in polys if poly["attributes"]["maskType"] == "OPAQUE" ]).buffer(0)
Return cloudmask as a shapely geometry.
https://github.com/ungarj/s2reader/blob/376fd7ee1d15cce0849709c149d694663a7bc0ef/s2reader/s2reader.py#L342-L349
import os import pyproj import numpy as np import re import zipfile import warnings from lxml.etree import parse, fromstring from shapely.geometry import Polygon, MultiPolygon, box from shapely.ops import transform from functools import partial from cached_property import cached_property from itertools import chain from .exceptions import S2ReaderIOError, S2ReaderMetadataError def open(safe_file): if os.path.isdir(safe_file) or os.path.isfile(safe_file): return SentinelDataSet(safe_file) else: raise IOError("file not found: %s" % safe_file) BAND_IDS = [ "01", "02", "03", "04", "05", "06", "07", "08", "8A", "09", "10", "11", "12" ] class SentinelDataSet(object): def __init__(self, path): filename, extension = os.path.splitext(os.path.normpath(path)) if extension not in [".SAFE", ".ZIP", ".zip"]: raise IOError("only .SAFE folders or zipped .SAFE folders allowed") self.is_zip = True if extension in [".ZIP", ".zip"] else False self.path = os.path.normpath(path) if self.is_zip: self._zipfile = zipfile.ZipFile(self.path, 'r') self._zip_root = os.path.basename(filename) if self._zip_root not in self._zipfile.namelist(): if not filename.endswith(".SAFE"): self._zip_root = os.path.basename(filename) + ".SAFE/" else: self._zip_root = os.path.basename(filename) + "/" if self._zip_root not in self._zipfile.namelist(): raise S2ReaderIOError("unknown zipfile structure") self.manifest_safe_path = os.path.join( self._zip_root, "manifest.safe") else: self._zipfile = None self._zip_root = None self.manifest_safe_path = os.path.join(self.path, "manifest.safe") if ( not os.path.isfile(self.manifest_safe_path) and self.manifest_safe_path not in self._zipfile.namelist() ): raise S2ReaderIOError( "manifest.safe not found: %s" % self.manifest_safe_path ) @cached_property def _product_metadata(self): if self.is_zip: return fromstring(self._zipfile.read(self.product_metadata_path)) else: return parse(self.product_metadata_path) @cached_property def _manifest_safe(self): if self.is_zip: return fromstring(self._zipfile.read(self.manifest_safe_path)) else: return parse(self.manifest_safe_path) @cached_property def product_metadata_path(self): data_object_section = self._manifest_safe.find("dataObjectSection") for data_object in data_object_section: if data_object.attrib.get("ID") == "S2_Level-1C_Product_Metadata": relpath = os.path.relpath( data_object.iter("fileLocation").next().attrib["href"]) try: if self.is_zip: abspath = os.path.join(self._zip_root, relpath) assert abspath in self._zipfile.namelist() else: abspath = os.path.join(self.path, relpath) assert os.path.isfile(abspath) except AssertionError: raise S2ReaderIOError( "S2_Level-1C_product_metadata_path not found: %s \ " % abspath ) return abspath @cached_property def product_start_time(self): for element in self._product_metadata.iter("Product_Info"): return element.find("PRODUCT_START_TIME").text @cached_property def product_stop_time(self): for element in self._product_metadata.iter("Product_Info"): return element.find("PRODUCT_STOP_TIME").text @cached_property def generation_time(self): for element in self._product_metadata.iter("Product_Info"): return element.findtext("GENERATION_TIME") @cached_property def processing_level(self): for element in self._product_metadata.iter("Product_Info"): return element.findtext("PROCESSING_LEVEL") @cached_property def product_type(self): for element in self._product_metadata.iter("Product_Info"): return element.findtext("PRODUCT_TYPE") @cached_property def spacecraft_name(self): for element in self._product_metadata.iter("Datatake"): return element.findtext("SPACECRAFT_NAME") @cached_property def sensing_orbit_number(self): for element in self._product_metadata.iter("Datatake"): return element.findtext("SENSING_ORBIT_NUMBER") @cached_property def sensing_orbit_direction(self): for element in self._product_metadata.iter("Datatake"): return element.findtext("SENSING_ORBIT_DIRECTION") @cached_property def product_format(self): for element in self._product_metadata.iter("Query_Options"): return element.findtext("PRODUCT_FORMAT") @cached_property def footprint(self): product_footprint = self._product_metadata.iter("Product_Footprint") for element in product_footprint: global_footprint = None for global_footprint in element.iter("Global_Footprint"): coords = global_footprint.findtext("EXT_POS_LIST").split() return _polygon_from_coords(coords) @cached_property def granules(self): for element in self._product_metadata.iter("Product_Info"): product_organisation = element.find("Product_Organisation") if self.product_format == 'SAFE': return [ SentinelGranule(_id.find("Granules"), self) for _id in product_organisation.findall("Granule_List") ] elif self.product_format == 'SAFE_COMPACT': return [ SentinelGranuleCompact(_id.find("Granule"), self) for _id in product_organisation.findall("Granule_List") ] else: raise Exception( "PRODUCT_FORMAT not recognized in metadata file, found: '" + str(self.safe_format) + "' accepted are 'SAFE' and 'SAFE_COMPACT'" ) def granule_paths(self, band_id): band_id = str(band_id).zfill(2) try: assert isinstance(band_id, str) assert band_id in BAND_IDS except AssertionError: raise AttributeError( "band ID not valid: %s" % band_id ) return [ granule.band_path(band_id) for granule in self.granules ] def __enter__(self): return self def __exit__(self, t, v, tb): try: self._zipfile.close() except AttributeError: pass class SentinelGranule(object): def __init__(self, granule, dataset): self.dataset = dataset if self.dataset.is_zip: granules_path = os.path.join(self.dataset._zip_root, "GRANULE") else: granules_path = os.path.join(dataset.path, "GRANULE") self.granule_identifier = granule.attrib["granuleIdentifier"] self.granule_path = os.path.join( granules_path, self.granule_identifier) self.datastrip_identifier = granule.attrib["datastripIdentifier"] @cached_property def _metadata(self): if self.dataset.is_zip: return fromstring(self.dataset._zipfile.read(self.metadata_path)) else: return parse(self.metadata_path) @cached_property def _nsmap(self): if self.dataset.is_zip: root = self._metadata else: root = self._metadata.getroot() return { k: v for k, v in root.nsmap.iteritems() if k } @cached_property def srid(self): tile_geocoding = self._metadata.iter("Tile_Geocoding").next() return tile_geocoding.findtext("HORIZONTAL_CS_CODE") @cached_property def metadata_path(self): xml_name = _granule_identifier_to_xml_name(self.granule_identifier) metadata_path = os.path.join(self.granule_path, xml_name) try: assert os.path.isfile(metadata_path) or metadata_path in self.dataset._zipfile.namelist() except AssertionError: raise S2ReaderIOError( "Granule metadata XML does not exist:", metadata_path) return metadata_path @cached_property def pvi_path(self): return _pvi_path(self) @cached_property def tci_path(self): tci_paths = [ path for path in self.dataset._product_metadata.xpath( ".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()" % self.granule_identifier ) if path.endswith('TCI') ] try: tci_path = tci_paths[0] except IndexError: return None return os.path.join( self.dataset._zip_root if self.dataset.is_zip else self.dataset.path, tci_path ) + '.jp2' @cached_property def cloud_percent(self): image_content_qi = self._metadata.findtext( ( """n1:Quality_Indicators_Info/Image_Content_QI/""" """CLOUDY_PIXEL_PERCENTAGE""" ), namespaces=self._nsmap) return float(image_content_qi) @cached_property def footprint(self): tile_geocoding = self._metadata.iter("Tile_Geocoding").next() resolution = 10 searchstring = ".//*[@resolution='%s']" % resolution size, geoposition = tile_geocoding.findall(searchstring) nrows, ncols = (int(i.text) for i in size) ulx, uly, xdim, ydim = (int(i.text) for i in geoposition) lrx = ulx + nrows * resolution lry = uly - ncols * resolution utm_footprint = box(ulx, lry, lrx, uly) project = partial( pyproj.transform, pyproj.Proj(init=self.srid), pyproj.Proj(init='EPSG:4326') ) footprint = transform(project, utm_footprint).buffer(0) return footprint @cached_property
MIT License
nschloe/tikzplotlib
src/tikzplotlib/_save.py
get_tikz_code
python
def get_tikz_code( figure="gcf", filepath: str | pathlib.Path | None = None, axis_width: str | None = None, axis_height: str | None = None, textsize: float = 10.0, tex_relative_path_to_data: str | None = None, externalize_tables: bool = False, override_externals: bool = False, externals_search_path: str | None = None, strict: bool = False, wrap: bool = True, add_axis_environment: bool = True, extra_axis_parameters: list | set | None = None, extra_groupstyle_parameters: dict = {}, extra_tikzpicture_parameters: list | set | None = None, extra_lines_start: list | set | None = None, dpi: int | None = None, show_info: bool = False, include_disclaimer: bool = True, standalone: bool = False, float_format: str = ".15g", table_row_sep: str = "\n", flavor: str = "latex", ): if figure == "gcf": figure = plt.gcf() data = {} data["axis width"] = axis_width data["axis height"] = axis_height data["rel data path"] = tex_relative_path_to_data data["externalize tables"] = externalize_tables data["override externals"] = override_externals data["externals search path"] = externals_search_path if filepath: filepath = pathlib.Path(filepath) data["output dir"] = filepath.parent data["base name"] = filepath.stem else: directory = tempfile.mkdtemp() data["output dir"] = pathlib.Path(directory) data["base name"] = "tmp" data["strict"] = strict data["tikz libs"] = set() data["pgfplots libs"] = set() data["font size"] = textsize data["custom colors"] = {} data["legend colors"] = [] data["add axis environment"] = add_axis_environment data["show_info"] = show_info data["rectangle_legends"] = set() if extra_axis_parameters: data["extra axis options [base]"] = set(extra_axis_parameters).copy() else: data["extra axis options [base]"] = set() data["extra groupstyle options [base]"] = extra_groupstyle_parameters if dpi: data["dpi"] = dpi else: savefig_dpi = mpl.rcParams["savefig.dpi"] data["dpi"] = ( savefig_dpi if isinstance(savefig_dpi, int) else mpl.rcParams["figure.dpi"] ) data["float format"] = float_format data["table_row_sep"] = table_row_sep try: data["flavor"] = Flavors[flavor.lower()] except KeyError: raise ValueError( f"Unsupported TeX flavor {flavor!r}. " f"Please choose from {', '.join(map(repr, Flavors))}" ) if show_info: _print_pgfplot_libs_message(data) data, content = _recurse(data, figure) if "is_in_groupplot_env" in data and data["is_in_groupplot_env"]: content.extend(data["flavor"].end("groupplot") + "\n\n") code = """""" if include_disclaimer: disclaimer = f"This file was created with tikzplotlib v{__version__}." code += _tex_comment(disclaimer) if wrap and add_axis_environment: code += data["flavor"].start("tikzpicture") if extra_tikzpicture_parameters: code += "[\n" + ",\n".join(extra_tikzpicture_parameters) + "\n]" code += "\n" if extra_lines_start: code += "\n".join(extra_lines_start) + "\n" code += "\n" coldefs = _get_color_definitions(data) if coldefs: code += "\n".join(coldefs) + "\n\n" code += "".join(content) if wrap and add_axis_environment: code += data["flavor"].end("tikzpicture") + "\n" if standalone: code = data["flavor"].standalone(code) return code
Main function. Here, the recursion into the image starts and the contents are picked up. The actual file gets written in this routine. :param figure: either a Figure object or 'gcf' (default). :param axis_width: If not ``None``, this will be used as figure width within the TikZ/PGFPlots output. If ``axis_height`` is not given, ``tikzplotlib`` will try to preserve the original width/height ratio. Note that ``axis_width`` can be a string literal, such as ``'\\axis_width'``. :type axis_width: str :param axis_height: If not ``None``, this will be used as figure height within the TikZ/PGFPlots output. If ``axis_width`` is not given, ``tikzplotlib`` will try to preserve the original width/height ratio. Note that ``axis_width`` can be a string literal, such as ``'\\axis_height'``. :type axis_height: str :param textsize: The text size (in pt) that the target latex document is using. Default is 10.0. :type textsize: float :param tex_relative_path_to_data: In some cases, the TikZ file will have to refer to another file, e.g., a PNG for image plots. When ``\\input`` into a regular LaTeX document, the additional file is looked for in a folder relative to the LaTeX file, not the TikZ file. This arguments optionally sets the relative path from the LaTeX file to the data. :type tex_relative_path_to_data: str :param externalize_tables: Whether or not to externalize plot data tables into dat files. :type externalize_tables: bool :param override_externals: Whether or not to override existing external files (such as dat or images) with conflicting names (the alternative is to choose other names). :type override_externals: bool :param strict: Whether or not to strictly stick to matplotlib's appearance. This influences, for example, whether tick marks are set exactly as in the matplotlib plot, or if TikZ/PGFPlots can decide where to put the ticks. :type strict: bool :param wrap: Whether ``'\\begin{tikzpicture}'``/``'\\starttikzpicture'`` and ``'\\end{tikzpicture}'``/``'\\stoptikzpicture'`` will be written. One might need to provide custom arguments to the environment (eg. scale= etc.). Default is ``True``. :type wrap: bool :param add_axis_environment: Whether ``'\\begin{axis}[...]'``/`'\\startaxis[...]'` and ``'\\end{axis}'``/``'\\stopaxis'`` will be written. One needs to set the environment in the document. If ``False`` additionally sets ``wrap=False``. Default is ``True``. :type add_axis_environment: bool :param extra_axis_parameters: Extra axis options to be passed (as a list or set) to pgfplots. Default is ``None``. :type extra_axis_parameters: a list or set of strings for the pfgplots axes. :param extra_tikzpicture_parameters: Extra tikzpicture options to be passed (as a set) to pgfplots. :type extra_tikzpicture_parameters: a set of strings for the pfgplots tikzpicture. :param dpi: The resolution in dots per inch of the rendered image in case of QuadMesh plots. If ``None`` it will default to the value ``savefig.dpi`` from matplotlib.rcParams. Default is ``None``. :type dpi: int :param show_info: Show extra info on the command line. Default is ``False``. :type show_info: bool :param include_disclaimer: Include tikzplotlib disclaimer in the output. Set ``False`` to make tests reproducible. Default is ``True``. :type include_disclaimer: bool :param standalone: Include wrapper code for a standalone LaTeX file. :type standalone: bool :param float_format: Format for float entities. Default is ```".15g"```. :type float_format: str :param table_row_sep: Row separator for table data. Default is ```"\\n"```. :type table_row_sep: str :param flavor: TeX flavor of the output code. Supported are ``"latex"`` and``"context"``. Default is ``"latex"``. :type flavor: str :returns: None The following optional attributes of matplotlib's objects are recognized and handled: - axes.Axes._tikzplotlib_anchors This attribute can be set to a list of ((x,y), anchor_name) tuples. Invisible nodes at the respective location will be created which can be referenced from outside the axis environment.
https://github.com/nschloe/tikzplotlib/blob/162ad22ffcb44ccce9b9ee45f3496389d004b91c/src/tikzplotlib/_save.py#L19-L247
from __future__ import annotations import enum import pathlib import tempfile import warnings import matplotlib as mpl import matplotlib.pyplot as plt from . import _axes from . import _image as img from . import _legend, _line2d, _patch, _path from . import _quadmesh as qmsh from . import _text from .__about__ import __version__
MIT License
mattvonrocketstein/smash
smashlib/ipy3x/nbformat/v4/rwbase.py
split_lines
python
def split_lines(nb): for cell in nb.cells: source = cell.get('source', None) if isinstance(source, string_types): cell['source'] = source.splitlines(True) if cell.cell_type == 'code': for output in cell.outputs: if output.output_type in {'execute_result', 'display_data'}: for key, value in output.data.items(): if key != 'application/json' and isinstance(value, string_types): output.data[key] = value.splitlines(True) elif output.output_type == 'stream': if isinstance(output.text, string_types): output.text = output.text.splitlines(True) return nb
split likely multiline text into lists of strings For file output more friendly to line-based VCS. ``rejoin_lines(nb)`` will reverse the effects of ``split_lines(nb)``. Used when writing JSON files.
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/nbformat/v4/rwbase.py#L35-L57
from IPython.utils.py3compat import string_types, cast_unicode_py2 def rejoin_lines(nb): for cell in nb.cells: if 'source' in cell and isinstance(cell.source, list): cell.source = ''.join(cell.source) if cell.get('cell_type', None) == 'code': for output in cell.get('outputs', []): output_type = output.get('output_type', '') if output_type in {'execute_result', 'display_data'}: for key, value in output.get('data', {}).items(): if key != 'application/json' and isinstance(value, list): output.data[key] = ''.join(value) elif output_type: if isinstance(output.get('text', ''), list): output.text = ''.join(output.text) return nb
MIT License
rosshamish/classtime
classtime/brain/remote_db/remotedb_factory.py
RemoteDatabaseFactory.build
python
def build(institution): config_file = os.path.join(classtime.brain.institutions.CONFIG_FOLDER_PATH, '{institution}.json'.format(institution=institution)) with open(config_file, 'r') as config: config = json.loads(config.read()) course_db = None db_type = config.get('type') if db_type == 'ldap': course_db = RemoteLDAPDatabase(server=config.get('server'), basedn=config.get('basedn')) for name, params in config.get('saved_searches').items(): attrs = [attr.encode('ascii') for attr in params.get('attrs')] course_db.save_search(name=name, search_flt=params.get('search_flt'), attrs=attrs) return course_db
Builds an object which implements AbstractRemoteDatabase, based on config info stored in `classtime/brain/institutions/<institution>.json` Config info should be valid JSON which specifies all information required to create the type of AbstractRemoteDatabase that the specified institution uses
https://github.com/rosshamish/classtime/blob/42b704def7be2893c73cf4682d78a20524d27cc7/classtime/brain/remote_db/remotedb_factory.py#L10-L37
import os import json from .ldapdb import RemoteLDAPDatabase import classtime.brain.institutions class RemoteDatabaseFactory(object): @staticmethod
MIT License
criteo/autofaiss
autofaiss/metrics/benchmark.py
benchmark_index
python
def benchmark_index( indices_dict, gt_test, test_points, vectors_size_in_bytes, save_path=None, speed_dict=None, size_dict=None ): perfect_index_label = "perfect index" if perfect_index_label not in indices_dict: indices_dict[perfect_index_label] = None if speed_dict: speed_dict[perfect_index_label] = vectors_size_in_bytes k_max = gt_test.shape[1] plt.figure(figsize=(16, 8)) k_values = np.arange(0, k_max + 1) avg_one_recall_at_r = {} avg_r_recall_at_r = {} timout_s = 5.0 comp_size = vectors_size_in_bytes for index_key in tq(list(sorted(indices_dict.keys()))): if index_key not in indices_dict: continue index = indices_dict[index_key] if index_key == "Flat" or (index is None): y_r_recall_at_r = np.arange(1, k_max + 1) y_one_recall_at_r = np.ones(k_max) tot = 1 else: y_r_recall_at_r = np.zeros(k_max) y_one_recall_at_r = np.zeros(k_max) tot = 0 start_time = time.time() for i, item in enumerate(test_points): y_r_recall_at_r += np.array(r_recall_at_r_single(item, gt_test[i], index, k_max)) y_one_recall_at_r += np.array(one_recall_at_r_single(item, gt_test[i], index, k_max)) tot += 1 if time.time() - start_time > timout_s and tot > 150: break avg_r_recall_at_r[index_key] = y_r_recall_at_r / tot avg_one_recall_at_r[index_key] = y_one_recall_at_r / tot info_string = {index_key: "" for index_key in indices_dict} initial_size_string = cast_bytes_to_memory_string(comp_size) for index_key in indices_dict: if index_key in speed_dict: info_string[index_key] += f"avg speed: {format_speed_ms_per_query(speed_dict[index_key])}, " if index_key in size_dict: info_string[index_key] += ( f"(Size: {cast_bytes_to_memory_string(size_dict[index_key])} " f"({(100*size_dict[index_key]/comp_size):.1f}% of {initial_size_string})" ) plt.subplot(121) for index_key in sorted(indices_dict.keys()): if index_key not in indices_dict: continue label = f"{index_key:<30} Index, {info_string[index_key]}" plt.plot(k_values, np.concatenate(([0], avg_r_recall_at_r[index_key])), label=label) plt.xlabel("k, number of nearests items") plt.ylabel("k-recall@k") plt.vlines(40, 0, k_max) plt.legend() plt.tight_layout() plt.subplot(122) for index_key in sorted(indices_dict.keys()): if index_key not in indices_dict: continue label = f"{index_key:<30} Index, {info_string[index_key]}" plt.plot(k_values, np.concatenate(([0], 100 * avg_one_recall_at_r[index_key])), label=label) plt.xlabel("k, number of nearests items") plt.ylabel("1-Recall@k") plt.vlines(100, 0, k_max) plt.legend() plt.tight_layout() if save_path: plt.savefig(save_path) plt.show()
Compute recall curves for the indices.
https://github.com/criteo/autofaiss/blob/2bb606b6a3580d41d7b86302ea7d94f3c18fe2e3/autofaiss/metrics/benchmark.py#L35-L135
import time import numpy as np from matplotlib import pyplot as plt from tqdm import tqdm as tq from autofaiss.indices.index_utils import format_speed_ms_per_query, get_index_size, speed_test_ms_per_query from autofaiss.metrics.recalls import r_recall_at_r_single, one_recall_at_r_single from autofaiss.utils.cast import cast_bytes_to_memory_string def avg_speed_dict_ms_per_query(indices_dict, vectors, k_closest: int = 40, timeout_s: float = 5): speed_dict = {} for index_key in indices_dict: speed = speed_test_ms_per_query(indices_dict[index_key], vectors, k_closest, timeout_s) speed_dict[index_key] = speed return speed_dict def index_sizes_in_bytes_dict(indices_dict): size_dict = {} for index_key in indices_dict: size_dict[index_key] = get_index_size(indices_dict[index_key]) return size_dict
Apache License 2.0
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1alpha1_pipeline_resource_spec.py
V1alpha1PipelineResourceSpec.params
python
def params(self): return self._params
Gets the params of this V1alpha1PipelineResourceSpec. # noqa: E501 :return: The params of this V1alpha1PipelineResourceSpec. # noqa: E501 :rtype: list[V1alpha1ResourceParam]
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1alpha1_pipeline_resource_spec.py#L106-L113
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1alpha1PipelineResourceSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'description': 'str', 'params': 'list[V1alpha1ResourceParam]', 'secrets': 'list[V1alpha1SecretParam]', 'type': 'str' } attribute_map = { 'description': 'description', 'params': 'params', 'secrets': 'secrets', 'type': 'type' } def __init__(self, description=None, params=None, secrets=None, type='', local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._description = None self._params = None self._secrets = None self._type = None self.discriminator = None if description is not None: self.description = description self.params = params if secrets is not None: self.secrets = secrets self.type = type @property def description(self): return self._description @description.setter def description(self, description): self._description = description @property
Apache License 2.0
timothycrosley/quickpython
quickpython/examples/eightpuzzle.py
EightPuzzle.game
python
def game(self): pass
starts the game
https://github.com/timothycrosley/quickpython/blob/adbf79947f9c9e094b5a32bf72c056bfcc7c623d/quickpython/examples/eightpuzzle.py#L41-L46
__authors__ = [("shineydev", "contact@shiney.dev")] __maintainers__ = [("shineydev", "contact@shiney.dev")] __version_info__ = (0, 0, 1, "alpha", 0) __version__ = "{0}.{1}.{2}{3}{4}".format( *[str(n)[0] if (i == 3) else str(n) for (i, n) in enumerate(__version_info__)] ) import os import pyfiglet class EightPuzzle: def __init__(self): pass
MIT License
tensorflow/tensor2tensor
tensor2tensor/data_generators/algorithmic_math.py
random_expr
python
def random_expr(depth, vlist, ops): if not depth: return str(vlist[random.randrange(len(vlist))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) left = random_expr(depth - 1 if max_depth_side else other_side_depth, vlist, ops) right = random_expr(depth - 1 if not max_depth_side else other_side_depth, vlist, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
Generate a random expression tree. Args: depth: At least one leaf will be this many levels down from the top. vlist: A list of chars. These chars are randomly selected as leaf values. ops: A list of ExprOp instances. Returns: An ExprNode instance which is the root of the generated expression tree.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/data_generators/algorithmic_math.py#L132-L155
from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import random import six from six.moves import range import sympy class ExprOp(object): def __init__(self, symbol, precedence, associative=False): self.symbol = symbol self.precedence = precedence self.associative = associative def __str__(self): return self.symbol def __eq__(self, other): return isinstance(other, ExprOp) and self.symbol == other.symbol class ExprNode(object): def __init__(self, left, right, op): self.left = left self.right = right self.op = op left_depth = left.depth if isinstance(left, ExprNode) else 0 right_depth = right.depth if isinstance(right, ExprNode) else 0 self.depth = max(left_depth, right_depth) + 1 def __str__(self): left_str = str(self.left) right_str = str(self.right) left_use_parens = (isinstance(self.left, ExprNode) and self.left.op.precedence < self.op.precedence) right_use_parens = (isinstance(self.right, ExprNode) and self.right.op.precedence <= self.op.precedence and not (self.op.associative and self.right.op == self.op)) left_final = "(" + left_str + ")" if left_use_parens else left_str right_final = "(" + right_str + ")" if right_use_parens else right_str return left_final + str(self.op) + right_final def is_in(self, expr): if expr == self: return True is_in_left = is_in_expr(self.left, expr) is_in_right = is_in_expr(self.right, expr) return is_in_left or is_in_right def is_in_expr(expr, find): return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find)) def random_expr_with_required_var(depth, required_var, optional_list, ops): if not depth: if required_var: return required_var return str(optional_list[random.randrange(len(optional_list))]) max_depth_side = random.randrange(2) other_side_depth = random.randrange(depth) required_var_side = random.randrange(2) left = random_expr_with_required_var( depth - 1 if max_depth_side else other_side_depth, required_var if required_var_side else None, optional_list, ops) right = random_expr_with_required_var( depth - 1 if not max_depth_side else other_side_depth, required_var if not required_var_side else None, optional_list, ops) op = ops[random.randrange(len(ops))] return ExprNode(left, right, op)
Apache License 2.0
willmcgugan/textual
src/textual/layout.py
Layout.get_widget_at
python
def get_widget_at(self, x: int, y: int) -> tuple[Widget, Region]: for widget, cropped_region, region in self: if widget.is_visual and cropped_region.contains(x, y): return widget, region raise NoWidget(f"No widget under screen coordinate ({x}, {y})")
Get the widget under the given point or None.
https://github.com/willmcgugan/textual/blob/f574294beb733df0cfd17993401713edf5ed7fca/src/textual/layout.py#L189-L194
from __future__ import annotations from abc import ABC, abstractmethod, abstractmethod from dataclasses import dataclass from itertools import chain from operator import itemgetter import sys from typing import Iterable, Iterator, NamedTuple, TYPE_CHECKING from rich import segment import rich.repr from rich.control import Control from rich.console import Console, ConsoleOptions, RenderResult, RenderableType from rich.segment import Segment, SegmentLines from rich.style import Style from . import log, panic from ._loop import loop_last from .layout_map import LayoutMap from ._profile import timer from ._lines import crop_lines from ._types import Lines from .geometry import clamp, Region, Offset, Size PY38 = sys.version_info >= (3, 8) if TYPE_CHECKING: from .widget import Widget from .view import View class NoWidget(Exception): pass class OrderedRegion(NamedTuple): region: Region order: tuple[int, int] class ReflowResult(NamedTuple): hidden: set[Widget] shown: set[Widget] resized: set[Widget] class WidgetPlacement(NamedTuple): region: Region widget: Widget | None = None order: tuple[int, ...] = () @rich.repr.auto class LayoutUpdate: def __init__(self, lines: Lines, region: Region) -> None: self.lines = lines self.region = region def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: yield Control.home().segment x = self.region.x new_line = Segment.line() move_to = Control.move_to for last, (y, line) in loop_last(enumerate(self.lines, self.region.y)): yield move_to(x, y).segment yield from line if not last: yield new_line def __rich_repr__(self) -> rich.repr.Result: x, y, width, height = self.region yield "x", x yield "y", y yield "width", width yield "height", height class Layout(ABC): def __init__(self) -> None: self._layout_map: LayoutMap | None = None self.width = 0 self.height = 0 self.regions: dict[Widget, tuple[Region, Region]] = {} self._cuts: list[list[int]] | None = None self._require_update: bool = True self.background = "" def check_update(self) -> bool: return self._require_update def require_update(self) -> None: self._require_update = True self.reset() self._layout_map = None def reset_update(self) -> None: self._require_update = False def reset(self) -> None: self._cuts = None def reflow(self, view: View, size: Size) -> ReflowResult: self.reset() self.width = size.width self.height = size.height map = LayoutMap(size) map.add_widget(view, size.region, (), size.region) self._require_update = False old_widgets = set() if self.map is None else set(self.map.keys()) new_widgets = set(map.keys()) shown_widgets = new_widgets - old_widgets hidden_widgets = old_widgets - new_widgets self._layout_map = map new_renders = { widget: (region, clip) for widget, (region, _order, clip) in map.items() } self.regions = new_renders resized_widgets = { widget for widget, (region, *_) in map.items() if widget in old_widgets and widget.size != region.size } return ReflowResult( hidden=hidden_widgets, shown=shown_widgets, resized=resized_widgets ) @abstractmethod def get_widgets(self) -> Iterable[Widget]: ... @abstractmethod def arrange(self, size: Size, scroll: Offset) -> Iterable[WidgetPlacement]: async def mount_all(self, view: "View") -> None: await view.mount(*self.get_widgets()) @property def map(self) -> LayoutMap | None: return self._layout_map def __iter__(self) -> Iterator[tuple[Widget, Region, Region]]: if self.map is not None: layers = sorted( self.map.widgets.items(), key=lambda item: item[1].order, reverse=True ) for widget, (region, order, clip) in layers: yield widget, region.intersection(clip), region def get_offset(self, widget: Widget) -> Offset: try: return self.map[widget].region.origin except KeyError: raise NoWidget("Widget is not in layout")
MIT License
adirockzz95/piwho
piwho/recognition.py
SpeakerRecognizer.identify_speaker
python
def identify_speaker(self, audiofile=None): if audiofile is None: if( len(self.get_speakers()) < 2 ): raise IndexError("Insufficient speakers in database.") expand = os.path.join(self.dirpath, '') try: newest = max(glob.iglob((expand + '*.wav')), key=os.path.getctime) except ValueError: logging.error('No wav file found in path: ' + expand) raise if not self._is_good_wave(newest): self._convert_file(newest) self.last_recognized_file = os.path.basename(newest) name = self._start_subprocess('java ' + config.JAVA_MEM + '-jar ' + config.SPEAKER_RECOG_JAR + ' --ident ' + newest + self.feature) self.scores = name[4:] if not self.debug: try: names = [] names.append(str((name[2].split()[2]).decode('utf-8'))) names.append(str((name[3].split()[3]).decode('utf-8'))) return names except IndexError: logging.error('MARF execution failed.' ' Please set debug=True to print error' ' info.', exc_info=True) else: data = (''.join(str(x) for x in name)) fi = data.encode('ascii', 'ignore').decode('unicode_escape') print(re.sub("[b']", '', fi)) else: expand = os.path.expanduser(audiofile) if not self._is_good_wave(expand): self._convert_file(expand) name = self._start_subprocess('java ' + config.JAVA_MEM + '-jar ' + config.SPEAKER_RECOG_JAR + ' --ident ' + expand + self.feature) self.scores = name[4:] if not self.debug: try: names = [] names.append(str((name[2].split()[2]).decode('utf-8'))) names.append(str((name[3].split()[3]).decode('utf-8'))) return names except IndexError: logging.error('MARF execution failed.' ' Please set debug=True to print error' ' info.', exc_info=True) else: data = (''.join(str(x) for x in name)) fi = data.encode('ascii', 'ignore').decode('unicode_escape') print(re.sub("[b']", '', fi))
Identify the speaker in the audio wav according to speakers.txt database and trained model. It will always return speaker name from the database as the MARF is a closed set framework. You can use function 'get_speaker_scores()' to print the likehood score for each speaker. If the audio filepath is not given then the recently added file in the directory 'dirpath' will be used for identification. :param str audiofile: audio file for the speaker identification :return: the identified speaker :Raises: ValueError, IndexError
https://github.com/adirockzz95/piwho/blob/27956d27d86dbf2db2f382c2240c9fe7e84f6205/piwho/recognition.py#L165-L252
import os import subprocess import signal import glob import shlex import time import logging import wave import audioop import re import multiprocessing as mp from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler from . import config class SpeakerRecognizer(object): def __init__(self, dirpath=None): if dirpath is None: self.dirpath = os.getcwd() else: self.dirpath = os.path.abspath(dirpath) self.debug = False self.last_trained_file = None self.last_recognized_file = None self.speaker_name = None self.filepath = None self.scores = [] self.feature = ' -endp -lpc -cheb' def train_new_data(self, filepath=None, speakername=None): if speakername is None and self.speaker_name is None: raise ValueError('field speaker_name cannot be None.') if speakername is None: speakername = self.speaker_name if filepath is not None and os.path.isdir(filepath): lists = os.listdir(filepath) self.filepath = filepath for wav in lists: if wav.endswith('.wav'): fullpath = os.path.realpath( os.path.join(self.filepath, wav)) if not self._is_good_wave(fullpath): self._convert_file(fullpath) self.last_trained_file = wav self._create_entry(speakername, self.last_trained_file) reply = self._start_subprocess('java ' + config.JAVA_MEM + '-jar ' + config.SPEAKER_RECOG_JAR + ' --train ' + filepath + self.feature) if filepath is not None and os.path.isfile(os.path.abspath(filepath)): expand = os.path.expanduser(filepath) self.last_trained_file = os.path.basename(expand) if not self._is_good_wave(expand): self._convert_file(expand) self._create_entry(speakername, self.last_trained_file) reply = self._start_subprocess('java ' + config.JAVA_MEM + '-jar ' + config.SPEAKER_RECOG_JAR + ' --single-train ' + expand + self.feature) elif filepath is None: expand = os.path.join(self.dirpath, '') try: newest = max(glob.iglob((expand + '*.wav')), key=os.path.getctime) except ValueError: logging.error('No wav file found in path: ' + (expand)) raise if self.last_trained_file == os.path.basename(newest): return elif not self._is_good_wave(newest): self._convert_file(newest) self.last_trained_file = os.path.basename(newest) self._create_entry(speakername, self.last_trained_file) reply = self._start_subprocess('java ' + config.JAVA_MEM + '-jar ' + config.SPEAKER_RECOG_JAR + ' --single-train ' + newest + self.feature) if self.debug: data = (''.join(str(x) for x in reply)) fi = data.encode('ascii', 'ignore').decode('unicode_escape') print(re.sub("[b']", '', fi))
MIT License
gidgidonihah/github-review-slack-notifier
app/github.py
GithubWebhookPayloadParser.get_assignee_username
python
def get_assignee_username(self): return self._data.get('assignee', {}).get('login')
Parse and retrieve the assignee's username.
https://github.com/gidgidonihah/github-review-slack-notifier/blob/d9438faaf6d1a85ad918570fa74f587e49abe43a/app/github.py#L70-L72
import copy import os import requests from werkzeug.exceptions import BadRequest IGNORED_USERS = os.environ.get('IGNORED_USERS', '').split(',') def is_valid_pull_request(data): is_valid_request = _validate_pull_request(data) is_valid_action = data.get('action') == 'review_requested' or data.get('action') == 'assigned' return is_valid_request and is_valid_action def _validate_pull_request(data): if 'action' not in data: raise BadRequest('no event supplied') if 'pull_request' not in data or 'html_url' not in data.get('pull_request'): raise BadRequest('payload.pull_request.html_url missing') if data.get('sender', {}).get('login') in IGNORED_USERS: return False return True def get_recipient_github_username_by_action(data): payload_parser = GithubWebhookPayloadParser(data) if data.get('action') == 'review_requested': username = payload_parser.get_request_reviewer_username() elif data.get('action') == 'assigned': username = payload_parser.get_assignee_username() else: raise BadRequest('Github username not found') return username def lookup_github_full_name(gh_username): url = 'https://api.github.com/users/{}'.format(gh_username) request = requests.get(url, auth=(os.environ.get('GITHUB_API_USER', ''), os.environ.get('GITHUB_API_TOKEN', ''))) user = request.json() return user.get('name', '') class GithubWebhookPayloadParser: def __init__(self, data=None): if data is None: data = {} self._data = copy.deepcopy(data) def get_request_reviewer_username(self): return self._data.get('requested_reviewer', {}).get('login')
MIT License
olitheolix/aiokubernetes
aiokubernetes/models/v1_config_map_volume_source.py
V1ConfigMapVolumeSource.optional
python
def optional(self, optional): self._optional = optional
Sets the optional of this V1ConfigMapVolumeSource. Specify whether the ConfigMap or it's keys must be defined # noqa: E501 :param optional: The optional of this V1ConfigMapVolumeSource. # noqa: E501 :type: bool
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_config_map_volume_source.py#L147-L156
import pprint import re from aiokubernetes.models.v1_key_to_path import V1KeyToPath class V1ConfigMapVolumeSource(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'default_mode': 'int', 'items': 'list[V1KeyToPath]', 'name': 'str', 'optional': 'bool' } attribute_map = { 'default_mode': 'defaultMode', 'items': 'items', 'name': 'name', 'optional': 'optional' } def __init__(self, default_mode=None, items=None, name=None, optional=None): self._default_mode = None self._items = None self._name = None self._optional = None self.discriminator = None if default_mode is not None: self.default_mode = default_mode if items is not None: self.items = items if name is not None: self.name = name if optional is not None: self.optional = optional @property def default_mode(self): return self._default_mode @default_mode.setter def default_mode(self, default_mode): self._default_mode = default_mode @property def items(self): return self._items @items.setter def items(self, items): self._items = items @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def optional(self): return self._optional @optional.setter
Apache License 2.0
binary-signal/mass-apk-installer
mass_apk/adb.py
Adb.state
python
def state(self) -> ConnectionState: return self._update_state()
Get connection state of adb server. If `adb-server` state is `device` then phone got connected.
https://github.com/binary-signal/mass-apk-installer/blob/9472f791a7c0ac9aa3c1ec12ccb8dc3fdc354448/mass_apk/adb.py#L74-L80
from typing import List, Union import logging import os from pathlib import Path import subprocess from enum import Enum, unique from mass_apk import pkg_root, runtime_platform from mass_apk.exceptions import MassApkError from mass_apk.helpers import Platform log = logging.getLogger(__name__) class AdbError(MassApkError): class AdbApkExists(AdbError): class Adb(object): @unique class ConnectionState(Enum): CONNECTED = True DISCONNECTED = False @classmethod def _get_adb_path(cls) -> os.PathLike: if runtime_platform == Platform.OSX: path = os.path.join(pkg_root, "bin", "osx", "adb") elif runtime_platform == Platform.WIN: path = os.path.join(pkg_root, "bin", "win", "adb.exe") elif runtime_platform == Platform.LINUX: path = os.path.join(pkg_root, "bin", "linux", "adb") else: raise RuntimeError("Unsupported runtime platform") return Path(path) def __init__(self, auto_start: bool = False): self._path = self._get_adb_path() self._state = self.__class__.ConnectionState.DISCONNECTED if auto_start: self.start_server() @property def path(self): return self._path @property
BSD 3-Clause New or Revised License
jittor/jittor
python/jittor/linalg.py
pinv
python
def pinv(x): def forward_code(np, data): a = data["inputs"][0] m_a = data["outputs"][0] t_a = np.linalg.pinv(a) np.copyto(m_a, t_a) def backward_code(np, data): def T(x): return np.swapaxes(x, -1, -2) _dot = partial(np.einsum, '...ij,...jk->...ik') dout = data["dout"] out = data["outputs"][0] inp = data["inputs"][0] lmx = data["f_outputs"] mx = lmx[0] t = T( -_dot(_dot(mx, T(dout)), mx) + _dot(_dot(_dot(mx, T(mx)), dout), np.eye(inp.shape[-2]) - _dot(inp, mx)) + _dot(_dot(_dot(np.eye(mx.shape[-2]) - _dot(mx, inp), dout), T(mx)), mx) ) np.copyto(out, t) sw = list(x.shape[:-2]) + [x.shape[-1]] + [x.shape[-2]] lmx = jt.numpy_code( [sw], [x.dtype], [x], forward_code, [backward_code], ) mx = lmx[0] return mx
r""" calculate the pseudo-inverse of a x. :param x (...,M,N) :return: x's pinv (...N,M)
https://github.com/jittor/jittor/blob/5c2fb6bfceec779050f3d17917c22f9b7f50c5c0/python/jittor/linalg.py#L179-L215
import jittor as jt from functools import partial def svd(x): def forward_code(np, data): a = data["inputs"][0] u, s, v = data["outputs"] tu, ts, tv = np.linalg.svd(a, full_matrices=0) np.copyto(u, tu) np.copyto(s, ts) np.copyto(v, tv) def backward_code(np, data): def T(x): return np.swapaxes(x, -1, -2) _dot = partial(np.einsum, '...ij,...jk->...ik') dout = data["dout"] out = data["outputs"][0] inp = data["inputs"][0] out_index = data["out_index"] u, s, v = data["f_outputs"] v = T(v) m, n = inp.shape[-2:] k = np.min((m, n)) i = np.reshape(np.eye(k), np.concatenate((np.ones(inp.ndim - 2, dtype=int), (k, k)))) if out_index == 0: f = 1 / (s[..., np.newaxis, :] ** 2 - s[..., :, np.newaxis] ** 2 + i) gu = dout utgu = _dot(T(u), gu) t = (f * (utgu - T(utgu))) * s[..., np.newaxis, :] t = _dot(_dot(u, t), T(v)) if m > n: i_minus_uut = (np.reshape(np.eye(m), np.concatenate((np.ones(inp.ndim - 2, dtype=int), (m, m)))) - _dot(u, np.conj(T(u)))) t = t + T(_dot(_dot(v / s[..., np.newaxis, :], T(gu)), i_minus_uut)) np.copyto(out, t) elif out_index == 1: gs = dout t = i * gs[..., :, np.newaxis] t = _dot(_dot(u, t), T(v)) np.copyto(out, t) elif out_index == 2: f = 1 / (s[..., np.newaxis, :] ** 2 - s[..., :, np.newaxis] ** 2 + i) gv = dout vtgv = _dot(T(v), gv) t = s[..., :, np.newaxis] * (f * (vtgv - T(vtgv))) t = _dot(_dot(u, t), T(v)) if m < n: i_minus_vvt = (np.reshape(np.eye(n), np.concatenate((np.ones(inp.ndim - 2, dtype=int), (n, n)))) - _dot(v, np.conj(T(v)))) t = t + T(_dot(_dot(u / s[..., np.newaxis, :], T(gv)), i_minus_vvt)) np.copyto(out, t) m, n = x.shape[-2:] k = min(m, n) s1 = list(x.shape) s1[-1] = k s2 = list(x.shape) s2[-2] = k s3 = list(x.shape)[:-2] s3.append(k) u, s, v = jt.numpy_code( [s1, s3, s2], [x.dtype, x.dtype, x.dtype], [x], forward_code, [backward_code], ) return u, s, v def eigh(x): def forward_code(np, data): a = data["inputs"][0] w, v = data["outputs"] tw, tv = np.linalg.eigh(a, UPLO='L') np.copyto(w, tw) np.copyto(v, tv) def backward_code(np, data): def T(x): return np.swapaxes(x, -1, -2) _dot = partial(np.einsum, '...ij,...jk->...ik') dout = data["dout"] out = data["outputs"][0] inp = data["inputs"][0] out_index = data["out_index"] w, v = data["f_outputs"] k = int(inp.shape[-1]) w_repeated = np.repeat(w[..., np.newaxis], k, axis=-1) if out_index == 0: t = _dot(v * dout[..., np.newaxis, :], T(v)) np.copyto(out, t) elif out_index == 1: if np.any(dout): off_diag = np.ones((k, k)) - np.eye(k) F = off_diag / (T(w_repeated) - w_repeated + np.eye(k)) t = _dot(_dot(v, F * _dot(T(v), dout)), T(v)) np.copyto(out, t) sw = x.shape[:-2] + x.shape[-1:] sv = x.shape w, v = jt.numpy_code( [sw, sv], [x.dtype, x.dtype], [x], forward_code, [backward_code], ) return w, v def inv(x): def forward_code(np, data): a = data["inputs"][0] m_a = data["outputs"][0] t_a = np.linalg.inv(a) np.copyto(m_a, t_a) def backward_code(np, data): def T(x): return np.swapaxes(x, -1, -2) _dot = partial(np.einsum, '...ij,...jk->...ik') dout = data["dout"] out = data["outputs"][0] lmx = data["f_outputs"] mx = lmx[0] t = -_dot(_dot(T(mx), dout), T(mx)) np.copyto(out, t) lmx = jt.numpy_code( [x.shape], [x.dtype], [x], forward_code, [backward_code], ) mx = lmx[0] return mx
Apache License 2.0
blacklight/platypush
platypush/plugins/media/plex/__init__.py
MediaPlexPlugin.forward
python
def forward(self, client): return self.client(client).stepForward()
Forward playback on a client
https://github.com/blacklight/platypush/blob/a5f1dc2638d7c6308325e0ca39dc7d5e262836aa/platypush/plugins/media/plex/__init__.py#L208-L213
import urllib.parse from platypush.context import get_plugin from platypush.plugins import Plugin, action class MediaPlexPlugin(Plugin): def __init__(self, server, username, password, **kwargs): from plexapi.myplex import MyPlexAccount super().__init__(**kwargs) self.resource = MyPlexAccount(username, password).resource(server) self._plex = None @property def plex(self): if not self._plex: self._plex = self.resource.connect() return self._plex @action def get_clients(self): return [{ 'device': c.device, 'device_class': c.deviceClass, 'local': c.local, 'model': c.model, 'platform': c.platform, 'platform_version': c.platformVersion, 'product': c.product, 'state': c.state, 'title': c.title, 'version': c.version, } for c in self.plex.clients()] def _get_client(self, name): return self.plex.client(name) @action def search(self, section=None, title=None, **kwargs): ret = [] library = self.plex.library if section: library = library.section(section) if title or kwargs: items = library.search(title, **kwargs) else: items = library.all() for item in items: ret.append(self._flatten_item(item)) return ret @action def playlists(self): return [ { 'title': pl.title, 'duration': pl.duration, 'summary': pl.summary, 'viewed_at': pl.viewedAt, 'items': [self._flatten_item(item) for item in pl.items()], } for pl in self.plex.playlists() ] @action def history(self): return [ self._flatten_item(item) for item in self.plex.history() ] @staticmethod def get_chromecast(chromecast): from ..lib.plexcast import PlexController hndl = PlexController() hndl.namespace = 'urn:x-cast:com.google.cast.sse' cast = get_plugin('media.chromecast').get_chromecast(chromecast) cast.register_handler(hndl) return cast, hndl @action def play(self, client=None, chromecast=None, **kwargs): if not client and not chromecast: raise RuntimeError('No client nor chromecast specified') if client: client = self.plex.client(client) elif chromecast: (chromecast, handler) = self.get_chromecast(chromecast) if not kwargs: if client: return client.play() elif chromecast: return handler.play() if 'section' in kwargs: library = self.plex.library.section(kwargs.pop('section')) else: library = self.plex.library results = library.search(**kwargs) if not results: self.logger.info('No results for {}'.format(kwargs)) return item = results[0] self.logger.info('Playing {} on {}'.format(item.title, client or chromecast)) if client: return client.playMedia(item) elif chromecast: return handler.play_media(item, self.plex) @action def pause(self, client): return self.client(client).pause() @action def stop(self, client): return self.client(client).stop() @action def seek(self, client, offset): return self.client(client).seekTo(offset) @action
MIT License
romeodespres/reapy
reapy/__init__.py
is_inside_reaper
python
def is_inside_reaper(): inside = hasattr(sys.modules["__main__"], "obj") if not inside: return False else: try: return machines.get_selected_machine_host() is None except NameError: return True
Return whether ``reapy`` is imported from inside REAPER. If ``reapy`` is run from inside a REAPER instance but currently controls another REAPER instance on a slave machine (with ``reapy.connect``), return False.
https://github.com/romeodespres/reapy/blob/730627cee6f39fc26d6ebc8a3df0112e5921cd9f/reapy/__init__.py#L4-L21
import sys
MIT License
mlenzen/collections-extended
collections_extended/indexed_dict.py
IndexedDict.copy
python
def copy(self): ret = IndexedDict() ret._dict = self._dict.copy() ret._list = list(self._list) return ret
Return a shallow copy.
https://github.com/mlenzen/collections-extended/blob/e3d3e29ee7a15d9af21561587c2a0beda16b010b/collections_extended/indexed_dict.py#L280-L285
from collections.abc import MutableMapping from ._util import deprecation_warning from .sentinel import NOT_SET __all__ = ('IndexedDict', ) KEY_AND_INDEX_ERROR = TypeError( "Specifying both `key` and `index` is not allowed") KEY_EQ_INDEX_ERROR = TypeError( "Exactly one of `key` and `index` must be specified") class IndexedDict(MutableMapping): def __init__(self, iterable=None, **kwargs): self._dict = {} self._list = [] self.update(iterable or [], **kwargs) def clear(self): self._dict = {} self._list = [] def get(self, key=NOT_SET, index=NOT_SET, default=NOT_SET, d=NOT_SET): if d is not NOT_SET: if default is not NOT_SET: raise ValueError('Specified default and d') deprecation_warning( "IndexedDict.pop parameter 'd' has been renamed to 'default'" ) default = d if default is NOT_SET: default = None if index is NOT_SET and key is not NOT_SET: try: index, value = self._dict[key] except KeyError: return default else: return value elif index is not NOT_SET and key is NOT_SET: try: key, value = self._list[index] except IndexError: return default else: return value else: raise KEY_EQ_INDEX_ERROR def pop(self, key=NOT_SET, index=NOT_SET, default=NOT_SET, d=NOT_SET): if d is not NOT_SET: if default is not NOT_SET: raise ValueError('Specified default and d') deprecation_warning( "IndexedDict.pop parameter 'd' has been renamed to 'default'" ) default = d has_default = default is not NOT_SET if index is NOT_SET and key is not NOT_SET: index, value = self._pop_key(key, has_default) elif key is NOT_SET: key, index, value = self._pop_index(index, has_default) else: raise KEY_AND_INDEX_ERROR if index is None: return default else: self._fix_indices_after_delete(index) return value def _pop_key(self, key, has_default): try: index, value = self._dict.pop(key) except KeyError: if has_default: return None, None else: raise key2, value2 = self._list.pop(index) assert key is key2 assert value is value2 return index, value def _pop_index(self, index, has_default): try: if index is NOT_SET: index = len(self._list) - 1 key, value = self._list.pop() else: key, value = self._list.pop(index) if index < 0: index += len(self._list) + 1 except IndexError: if has_default: return None, None, None else: raise index2, value2 = self._dict.pop(key) assert index == index2 assert value is value2 return key, index, value def fast_pop(self, key=NOT_SET, index=NOT_SET): if index is NOT_SET and key is not NOT_SET: index, popped_value = self._dict.pop(key) elif key is NOT_SET: if index is NOT_SET: index = len(self._list) - 1 key, popped_value2 = self._list[-1] else: key, popped_value2 = self._list[index] if index < 0: index += len(self._list) index2, popped_value = self._dict.pop(key) assert index == index2 else: raise KEY_AND_INDEX_ERROR if key == self._list[-1][0]: _, popped_value2 = self._list.pop() assert popped_value is popped_value2 return popped_value, len(self._list), key, popped_value else: self._list[index] = self._list[-1] moved_key, moved_value = self._list.pop() self._dict[moved_key] = (index, moved_value) return popped_value, index, moved_key, moved_value def popitem(self, last=NOT_SET, *, key=NOT_SET, index=NOT_SET): if not self: raise KeyError('IndexedDict is empty') if sum(x is not NOT_SET for x in (last, key, index)) > 1: raise ValueError( "Cannot specify more than one of key, index and last" ) if key is not NOT_SET: index, value = self._pop_key(key=key, has_default=False) else: if last is not NOT_SET: index = -1 if last else 0 if index is NOT_SET: index = -1 key, index, value = self._pop_index(index, has_default=False) self._fix_indices_after_delete(starting_index=index) return key, value def move_to_end(self, key=NOT_SET, index=NOT_SET, last=True): if index is NOT_SET and key is not NOT_SET: index, value = self._dict[key] elif index is not NOT_SET and key is NOT_SET: key, value = self._list[index] if index < 0: index += len(self._list) else: raise KEY_EQ_INDEX_ERROR if last: index_range = range(len(self._list) - 1, index - 1, -1) self._dict[key] = (len(self._list) - 1, value) else: index_range = range(index + 1) self._dict[key] = (0, value) previous = (key, value) for i in index_range: self._dict[previous[0]] = i, previous[1] previous, self._list[i] = self._list[i], previous
Apache License 2.0
catalyst-cooperative/pudl
src/pudl/helpers.py
simplify_strings
python
def simplify_strings(df, columns): out_df = df.copy() for col in columns: if col in out_df.columns: out_df.loc[out_df[col].notnull(), col] = ( out_df.loc[out_df[col].notnull(), col] .astype(str) .str.replace(r"[\x00-\x1f\x7f-\x9f]", "", regex=True) .str.strip() .str.lower() .str.replace(r'\s+', ' ', regex=True) ) return out_df
Simplify the strings contained in a set of dataframe columns. Performs several operations to simplify strings for comparison and parsing purposes. These include removing Unicode control characters, stripping leading and trailing whitespace, using lowercase characters, and compacting all internal whitespace to a single space. Leaves null values unaltered. Casts other values with astype(str). Args: df (pandas.DataFrame): DataFrame whose columns are being cleaned up. columns (iterable): The labels of the string columns to be simplified. Returns: pandas.DataFrame: The whole DataFrame that was passed in, with the string columns cleaned up.
https://github.com/catalyst-cooperative/pudl/blob/6a75069b90219a2da55262737b92fe0a024c4fb8/src/pudl/helpers.py#L354-L385
import itertools import logging import pathlib import re import shutil from functools import partial from io import BytesIO import addfips import numpy as np import pandas as pd import requests import sqlalchemy as sa import timezonefinder import pudl from pudl import constants as pc logger = logging.getLogger(__name__) sum_na = partial(pd.Series.sum, skipna=False) TZ_FINDER = timezonefinder.TimezoneFinder() def download_zip_url(url, save_path, chunk_size=128): headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'DNT': '1', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', } r = requests.get(url, stream=True, headers=headers) with save_path.open(mode='wb') as fd: for chunk in r.iter_content(chunk_size=chunk_size): fd.write(chunk) def add_fips_ids(df, state_col="state", county_col="county", vintage=2015): df = df.astype({ state_col: pd.StringDtype(), county_col: pd.StringDtype(), }) af = addfips.AddFIPS(vintage=vintage) df["state_id_fips"] = df.apply( lambda x: (af.get_state_fips(state=x[state_col]) if pd.notnull(x[state_col]) else pd.NA), axis=1) logger.info( f"Assigned state FIPS codes for " f"{len(df[df.state_id_fips.notnull()])/len(df):.2%} of records." ) df["county_id_fips"] = df.apply( lambda x: (af.get_county_fips(state=x[state_col], county=x[county_col]) if pd.notnull(x[county_col]) else pd.NA), axis=1) df = df.astype({ "county_id_fips": pd.StringDtype(), "state_id_fips": pd.StringDtype(), }) logger.info( f"Assigned county FIPS codes for " f"{len(df[df.county_id_fips.notnull()])/len(df):.2%} of records." ) return df def clean_eia_counties(df, fixes, state_col="state", county_col="county"): df = df.copy() df[county_col] = ( df[county_col].str.strip() .str.replace(r"\s+", " ", regex=True) .str.replace(r"^St ", "St. ", regex=True) .str.replace(r"^Ste ", "Ste. ", regex=True) .str.replace("Kent & New Castle", "Kent, New Castle") .str.replace("Borough, Kodiak Island", "Kodiak Island Borough") .str.replace(r",$", "", regex=True).str.split(',') ) df = df.explode(county_col) df[county_col] = df[county_col].str.strip() df.loc[(df[state_col] == "WY") & (df[county_col] == "Yellowstone"), state_col] = "MT" for fix in fixes.itertuples(): state_mask = df[state_col] == fix.state county_mask = df[county_col] == fix.eia_county df.loc[state_mask & county_mask, county_col] = fix.fips_county return df def oob_to_nan(df, cols, lb=None, ub=None): out_df = df.copy() for col in cols: out_df.loc[:, col] = pd.to_numeric(out_df[col], errors="coerce") if lb is not None: out_df.loc[out_df[col] < lb, col] = np.nan if ub is not None: out_df.loc[out_df[col] > ub, col] = np.nan return out_df def prep_dir(dir_path, clobber=False): dir_path = pathlib.Path(dir_path) if dir_path.exists(): if clobber: shutil.rmtree(dir_path) else: raise FileExistsError( f'{dir_path} exists and clobber is {clobber}') dir_path.mkdir(parents=True) return dir_path def is_doi(doi): doi_regex = re.compile( r'(doi:\s*|(?:https?://)?(?:dx\.)?doi\.org/)?(10\.\d+(.\d+)*/.+)$', re.IGNORECASE | re.UNICODE) return bool(re.match(doi_regex, doi)) def clean_merge_asof( left, right, left_on="report_date", right_on="report_date", by={}, ): if left_on not in left.columns: raise ValueError(f"Left dataframe has no column {left_on}.") if right_on not in right.columns: raise ValueError(f"Right dataframe has no {right_on}.") missing_left_cols = [col for col in by if col not in left.columns] if missing_left_cols: raise ValueError(f"Left dataframe is missing {missing_left_cols}.") missing_right_cols = [col for col in by if col not in right.columns] if missing_right_cols: raise ValueError(f"Left dataframe is missing {missing_right_cols}.") def cleanup(df, on, by): df = df.astype(get_pudl_dtypes(by)) df.loc[:, on] = pd.to_datetime(df[on]) df = df.sort_values([on] + list(by.keys())) return df return pd.merge_asof( cleanup(df=left, on=left_on, by=by), cleanup(df=right, on=right_on, by=by), left_on=left_on, right_on=right_on, by=list(by.keys()), tolerance=pd.Timedelta("365 days") ) def get_pudl_dtype(col, data_source): return pudl.constants.column_dtypes[data_source][col] def get_pudl_dtypes(col_source_dict): return { col: get_pudl_dtype(col, col_source_dict[col]) for col in col_source_dict } def organize_cols(df, cols): data_cols = sorted([c for c in df.columns.tolist() if c not in cols]) organized_cols = cols + data_cols return df[organized_cols]
MIT License
viralogic/py-enumerable
py_linq/py_linq.py
Enumerable.min
python
def min(self, func=lambda x: x): if not self.any(): raise NoElementsError(u"Iterable contains no elements") return func(min(self, key=func))
Returns the min value of data elements :param func: lambda expression to transform data :return: minimum value
https://github.com/viralogic/py-enumerable/blob/7c450550e71f758e8b5fcc8f92d75f1d8908c122/py_linq/py_linq.py#L97-L105
import itertools import json import io from queue import LifoQueue from six import string_types try: from itertools import imap as map from itertools import ifilter as filter from itertools import izip as zip except ImportError: pass from builtins import range from .core import Key, OrderingDirection, RepeatableIterable from .decorators import deprecated from .exceptions import ( NoElementsError, NoMatchingElement, NullArgumentError, MoreThanOneMatchingElement, ) class Enumerable(object): def __init__(self, data=None): self._iterable = RepeatableIterable(data) def __iter__(self): return iter(self._iterable) def next(self): return next(self._iterable) def __next__(self): return self.next() def __getitem__(self, n): for i, e in enumerate(self): if i == n: return e def __len__(self): return len(self._iterable) def __repr__(self): return list(self).__repr__() def to_list(self): return [x for x in self] def count(self, predicate=None): if predicate is not None: return sum(1 for element in self.where(predicate)) return sum(1 for element in self) def select(self, func=lambda x: x): return SelectEnumerable(Enumerable(iter(self)), func) def sum(self, func=lambda x: x): return sum(func(x) for x in self)
MIT License
scikit-hep/pyhf
src/pyhf/tensor/jax_backend.py
jax_backend.erf
python
def erf(self, tensor_in): return special.erf(tensor_in)
The error function of complex argument. Example: >>> import pyhf >>> pyhf.set_backend("jax") >>> a = pyhf.tensorlib.astensor([-2., -1., 0., 1., 2.]) >>> pyhf.tensorlib.erf(a) DeviceArray([-0.99532227, -0.84270079, 0. , 0.84270079, 0.99532227], dtype=float64) Args: tensor_in (:obj:`tensor`): The input tensor object Returns: JAX ndarray: The values of the error function at the given points.
https://github.com/scikit-hep/pyhf/blob/7ecaa63f6673108d23e4bd984e32504a150cfbb8/src/pyhf/tensor/jax_backend.py#L93-L112
from jax.config import config config.update('jax_enable_x64', True) import jax.numpy as jnp from jax.scipy.special import gammaln, xlogy from jax.scipy import special from jax.scipy.stats import norm import numpy as np import scipy.stats as osp_stats import logging log = logging.getLogger(__name__) class _BasicPoisson: def __init__(self, rate): self.rate = rate def sample(self, sample_shape): return jnp.asarray( osp_stats.poisson(self.rate).rvs(size=sample_shape + self.rate.shape), dtype=jnp.float64, ) def log_prob(self, value): tensorlib = jax_backend() return tensorlib.poisson_logpdf(value, self.rate) class _BasicNormal: def __init__(self, loc, scale): self.loc = loc self.scale = scale def sample(self, sample_shape): return jnp.asarray( osp_stats.norm(self.loc, self.scale).rvs( size=sample_shape + self.loc.shape ), dtype=jnp.float64, ) def log_prob(self, value): tensorlib = jax_backend() return tensorlib.normal_logpdf(value, self.loc, self.scale) class jax_backend: __slots__ = ['name', 'precision', 'dtypemap', 'default_do_grad'] def __init__(self, **kwargs): self.name = 'jax' self.precision = kwargs.get('precision', '64b') self.dtypemap = { 'float': jnp.float64 if self.precision == '64b' else jnp.float32, 'int': jnp.int64 if self.precision == '64b' else jnp.int32, 'bool': jnp.bool_, } self.default_do_grad = True def _setup(self): def clip(self, tensor_in, min_value, max_value): return jnp.clip(tensor_in, min_value, max_value)
Apache License 2.0
zayfod/pycozmo
pycozmo/util.py
Matrix44.in_row_order
python
def in_row_order(self) -> Tuple[float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]: return self.m00, self.m01, self.m02, self.m03, self.m10, self.m11, self.m12, self.m13, self.m20, self.m21, self.m22, self.m23, self.m30, self.m31, self.m32, self.m33
Returns the contents of the matrix in row order.
https://github.com/zayfod/pycozmo/blob/1b6dcd9b869a3784f1d8b02e820bb033f95fd13a/pycozmo/util.py#L367-L377
from typing import Optional, Tuple import os import pathlib import math import time from . import exception __all__ = [ 'Angle', 'Distance', 'Speed', 'Vector3', 'angle_z_to_quaternion', 'Matrix44', 'Quaternion', 'Pose', 'FPSTimer', 'hex_dump', 'hex_load', 'frange', 'get_pycozmo_dir', 'get_cozmo_asset_dir', 'check_assets', 'get_cozmo_anim_dir', ] class Angle: __slots__ = '_radians' def __init__(self, radians: Optional[float] = None, degrees: Optional[float] = None): if radians is None and degrees is None: raise ValueError("Expected either the degrees or radians keyword argument") if radians and degrees: raise ValueError("Expected either the degrees or radians keyword argument, not both") if degrees is not None: radians = degrees * math.pi / 180 self._radians = float(radians) def __repr__(self): return "<%s %.2f radians (%.2f degrees)>" % (self.__class__.__name__, self.radians, self.degrees) def __add__(self, other): if not isinstance(other, Angle): raise TypeError("Unsupported type for + expected Angle") return Angle(radians=self.radians + other.radians) def __sub__(self, other): if not isinstance(other, Angle): raise TypeError("Unsupported type for - expected Angle") return Angle(radians=self.radians - other.radians) def __mul__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported type for * expected number") return Angle(radians=self.radians * other) def __truediv__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported type for / expected number") return Angle(radians=self.radians / other) def _cmp_int(self, other): if not isinstance(other, Angle): raise TypeError("Unsupported type for comparison expected Angle") return self.radians - other.radians def __eq__(self, other): return self._cmp_int(other) == 0 def __ne__(self, other): return self._cmp_int(other) != 0 def __gt__(self, other): return self._cmp_int(other) > 0 def __lt__(self, other): return self._cmp_int(other) < 0 def __ge__(self, other): return self._cmp_int(other) >= 0 def __le__(self, other): return self._cmp_int(other) <= 0 @property def radians(self) -> float: return self._radians @property def degrees(self) -> float: return self._radians / math.pi * 180 @property def abs_value(self): return Angle(radians=abs(self._radians)) class Distance: __slots__ = '_mm' def __init__(self, mm: Optional[float] = None, inches: Optional[float] = None): if mm is None and inches is None: raise ValueError("Expected either the mm or inches keyword argument") if mm and inches: raise ValueError("Expected either the mm or inches keyword argument, not both") if inches is not None: mm = inches * 25.4 self._mm = mm def __repr__(self): return "<%s %.2f mm (%.2f inches)>" % (self.__class__.__name__, self.mm, self.inches) def __add__(self, other): if not isinstance(other, Distance): raise TypeError("Unsupported operand for + expected Distance") return Distance(mm=self.mm + other.mm) def __sub__(self, other): if not isinstance(other, Distance): raise TypeError("Unsupported operand for - expected Distance") return Distance(mm=self.mm - other.mm) def __mul__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for * expected number") return Distance(mm=self.mm * other) def __truediv__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for / expected number") return Distance(mm=self.mm / other) @property def mm(self) -> float: return self._mm @property def inches(self) -> float: return self._mm / 25.4 class Speed: __slots__ = '_mmps' def __init__(self, mmps: float): self._mmps = mmps def __repr__(self): return "<%s %.2f mmps>" % (self.__class__.__name__, self.mmps) def __add__(self, other): if not isinstance(other, Speed): raise TypeError("Unsupported operand for + expected Speed") return Speed(mmps=self.mmps + other.mmps) def __sub__(self, other): if not isinstance(other, Speed): raise TypeError("Unsupported operand for - expected Speed") return Speed(mmps=self.mmps - other.mmps) def __mul__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for * expected number") return Speed(mmps=self.mmps * other) def __truediv__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for / expected number") return Speed(mmps=self.mmps / other) @property def mmps(self) -> float: return self._mmps class Vector3: __slots__ = ('_x', '_y', '_z') def __init__(self, x: float, y: float, z: float): self._x = x self._y = y self._z = z def set_to(self, rhs): self._x = rhs.x self._y = rhs.y self._z = rhs.z @property def x(self) -> float: return self._x @property def y(self) -> float: return self._y @property def z(self) -> float: return self._z @property def x_y_z(self) -> Tuple[float, float, float]: return self._x, self._y, self._z def __repr__(self): return "<%s x: %.2f y: %.2f z: %.2f>" % (self.__class__.__name__, self.x, self.y, self.z) def __add__(self, other): if not isinstance(other, Vector3): raise TypeError("Unsupported operand for + expected Vector3") return Vector3(self.x + other.x, self.y + other.y, self.z + other.z) def __sub__(self, other): if not isinstance(other, Vector3): raise TypeError("Unsupported operand for - expected Vector3") return Vector3(self.x - other.x, self.y - other.y, self.z - other.z) def __mul__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for * expected number") return Vector3(self.x * other, self.y * other, self.z * other) def __truediv__(self, other): if not isinstance(other, (int, float)): raise TypeError("Unsupported operand for / expected number") return Vector3(self.x / other, self.y / other, self.z / other) def angle_z_to_quaternion(angle_z: Angle) -> Tuple[float, float, float, float]: q0 = math.cos(angle_z.radians / 2) q1 = 0 q2 = 0 q3 = math.sin(angle_z.radians / 2) return q0, q1, q2, q3 class Matrix44: __slots__ = ('m00', 'm10', 'm20', 'm30', 'm01', 'm11', 'm21', 'm31', 'm02', 'm12', 'm22', 'm32', 'm03', 'm13', 'm23', 'm33') def __init__(self, m00, m10, m20, m30, m01, m11, m21, m31, m02, m12, m22, m32, m03, m13, m23, m33): self.m00 = m00 self.m10 = m10 self.m20 = m20 self.m30 = m30 self.m01 = m01 self.m11 = m11 self.m21 = m21 self.m31 = m31 self.m02 = m02 self.m12 = m12 self.m22 = m22 self.m32 = m32 self.m03 = m03 self.m13 = m13 self.m23 = m23 self.m33 = m33 def __repr__(self): return ("<%s: " "%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f " "%.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f>" % (self.__class__.__name__, *self.in_row_order)) @property def tabulated_string(self): return ("%.1f\t%.1f\t%.1f\t%.1f\n" "%.1f\t%.1f\t%.1f\t%.1f\n" "%.1f\t%.1f\t%.1f\t%.1f\n" "%.1f\t%.1f\t%.1f\t%.1f" % self.in_row_order) @property
MIT License
arxiv-vanity/arxiv-vanity
arxiv_vanity/storage.py
storage_delete_path
python
def storage_delete_path(storage, root_path): for path in storage_walk(storage, root_path): storage.delete(path)
Resursive delete for Django storage.
https://github.com/arxiv-vanity/arxiv-vanity/blob/25c6a79c63ed3d00e8dcde4bd9dad25b78c61a8c/arxiv_vanity/storage.py#L18-L23
import os def storage_walk(storage, cur_dir=""): dirs, files = storage.listdir(cur_dir) for directory in dirs: new_dir = os.path.join(cur_dir, directory) for path in storage_walk(storage, cur_dir=new_dir): yield path for fname in files: path = os.path.join(cur_dir, fname) yield path
Apache License 2.0
tellapart/taba
src/taba/server/model/client_storage.py
ClientStorageManager.RemoveClient
python
def RemoveClient(self, client): self.index_manager.RemoveValue(client)
Delete a Client Name from the mapping. (Note: CIDs will never be recycled, even after they are deleted). Args: client - Client Name to remove from the mapping.
https://github.com/tellapart/taba/blob/0254e76348d247ab957ff547df9662a69cab4c9c/src/taba/server/model/client_storage.py#L116-L123
import logging from taba.server.storage import util from taba.server.storage.double_index_storage import DoubleIndexStorageManager KEY_PREFIX = 'C' CACHE_TTL_SEC = 3600 LOG = logging.getLogger(__name__) class ClientStorageManager(object): def __init__(self, engine): self.index_manager = DoubleIndexStorageManager( engine=engine, key_prefix=KEY_PREFIX, cache_ttl=CACHE_TTL_SEC) def GetCids(self, clients, create_if_new=False): op = util.StrictOp('retrieving CIDs for Client Names', self.index_manager.GetIdsForValues, clients, create_if_new) return op.response_value def GetClients(self, cids): op = util.StrictOp('retrieving Client Names for CIDs', self.index_manager.GetValuesForIds, cids) return op.response_value def GetAllClients(self): op = util.StrictOp('retrieving all Client Names', self.index_manager.GetAllValues) return op.response_value def GetAllCids(self): op = util.StrictOp('retrieving all CIDs', self.index_manager.GetAllIds) return op.response_value def GetClientCidMap(self): op = util.StrictOp('retrieving total Client => CID map', self.index_manager.GetAllValueIdMap) return op.response_value def AddClient(self, client): self.index_manager.AddValue(client)
Apache License 2.0
ebranlard/pydatview
pydatview/GUIPlotPanel.py
PlotPanel.setPD_PDF
python
def setPD_PDF(self,PD,c): nBins = self.pdfPanel.scBins.GetValue() bSmooth = self.pdfPanel.cbSmooth.GetValue() nBins_out= PD.toPDF(nBins,bSmooth) if nBins_out!=nBins: self.pdfPanel.scBins.SetValue(nBins)
Convert plot data to PDF data based on GUI options
https://github.com/ebranlard/pydatview/blob/3516ffaff601c122d62ffc94abd842958354ece8/pydatview/GUIPlotPanel.py#L704-L711
import os import numpy as np import wx import wx.lib.buttons as buttons import dateutil import matplotlib matplotlib.use('wxAgg') from matplotlib import rc as matplotlib_rc try: from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas except Exception as e: print('') print('Error: problem importing `matplotlib.backends.backend_wx`.') import platform if platform.system()=='Darwin': print('') print('pyDatView help:') print(' This is a typical issue on MacOS, most likely you are') print(' using the native MacOS python with the native matplolib') print(' library, which is incompatible with `wxPython`.') print('') print(' You can solve this by either:') print(' - using python3, and pip3 e.g. installing it with brew') print(' - using a virtual environment with python 2 or 3') print(' - using anaconda with python 2 or 3'); print('') import sys sys.exit(1) else: raise e from matplotlib.figure import Figure from matplotlib.pyplot import rcParams as pyplot_rc from matplotlib import font_manager from pandas.plotting import register_matplotlib_converters import gc from .common import * from .plotdata import PlotData, compareMultiplePD from .GUICommon import * from .GUIToolBox import MyMultiCursor, MyNavigationToolbar2Wx, TBAddTool, TBAddCheckTool from .GUIMeasure import GUIMeasure from . import icons font = {'size' : 8} matplotlib_rc('font', **font) pyplot_rc['agg.path.chunksize'] = 20000 class PDFCtrlPanel(wx.Panel): def __init__(self, parent): super(PDFCtrlPanel,self).__init__(parent) self.parent = parent lb = wx.StaticText( self, -1, 'Number of bins:') self.scBins = wx.SpinCtrl(self, value='50',size=wx.Size(70,-1)) self.scBins.SetRange(3, 10000) self.cbSmooth = wx.CheckBox(self, -1, 'Smooth',(10,10)) self.cbSmooth.SetValue(False) dummy_sizer = wx.BoxSizer(wx.HORIZONTAL) dummy_sizer.Add(lb ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.scBins ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.cbSmooth ,0, flag = wx.CENTER|wx.LEFT,border = 6) self.SetSizer(dummy_sizer) self.Bind(wx.EVT_TEXT , self.onPDFOptionChange, self.scBins) self.Bind(wx.EVT_CHECKBOX, self.onPDFOptionChange) self.Hide() def onPDFOptionChange(self,event=None): self.parent.load_and_draw(); class MinMaxPanel(wx.Panel): def __init__(self, parent): super(MinMaxPanel,self).__init__(parent) self.parent = parent self.cbxMinMax = wx.CheckBox(self, -1, 'xMinMax',(10,10)) self.cbyMinMax = wx.CheckBox(self, -1, 'yMinMax',(10,10)) self.cbxMinMax.SetValue(False) self.cbyMinMax.SetValue(True) dummy_sizer = wx.BoxSizer(wx.HORIZONTAL) dummy_sizer.Add(self.cbxMinMax ,0, flag=wx.CENTER|wx.LEFT, border = 1) dummy_sizer.Add(self.cbyMinMax ,0, flag=wx.CENTER|wx.LEFT, border = 1) self.SetSizer(dummy_sizer) self.Bind(wx.EVT_CHECKBOX, self.onMinMaxChange) self.Hide() def onMinMaxChange(self,event=None): self.parent.load_and_draw(); class CompCtrlPanel(wx.Panel): def __init__(self, parent): super(CompCtrlPanel,self).__init__(parent) self.parent = parent lblList = ['Relative', '|Relative|','Ratio','Absolute','Y-Y'] self.rbType = wx.RadioBox(self, label = 'Type', choices = lblList, majorDimension = 1, style = wx.RA_SPECIFY_ROWS) dummy_sizer = wx.BoxSizer(wx.HORIZONTAL) dummy_sizer.Add(self.rbType ,0, flag = wx.CENTER|wx.LEFT,border = 1) self.SetSizer(dummy_sizer) self.rbType.Bind(wx.EVT_RADIOBOX,self.onTypeChange) self.Hide() def onTypeChange(self,e): self.parent.load_and_draw(); class SpectralCtrlPanel(wx.Panel): def __init__(self, parent): super(SpectralCtrlPanel,self).__init__(parent) self.parent = parent lb = wx.StaticText( self, -1, 'Type:') self.cbType = wx.ComboBox(self, choices=['PSD','f x PSD','Amplitude'] , style=wx.CB_READONLY) self.cbType.SetSelection(0) lbAveraging = wx.StaticText( self, -1, 'Avg.:') self.cbAveraging = wx.ComboBox(self, choices=['None','Welch'] , style=wx.CB_READONLY) self.cbAveraging.SetSelection(1) self.lbAveragingMethod = wx.StaticText( self, -1, 'Window:') self.cbAveragingMethod = wx.ComboBox(self, choices=['Hamming','Hann','Rectangular'] , style=wx.CB_READONLY) self.cbAveragingMethod.SetSelection(0) self.lbP2 = wx.StaticText( self, -1, '2^n:') self.scP2 = wx.SpinCtrl(self, value='11',size=wx.Size(40,-1)) self.lbWinLength = wx.StaticText( self, -1, '(2048) ') self.scP2.SetRange(3, 19) lbMaxFreq = wx.StaticText( self, -1, 'Xlim:') self.tMaxFreq = wx.TextCtrl(self,size = (30,-1),style=wx.TE_PROCESS_ENTER) self.tMaxFreq.SetValue("-1") self.cbDetrend = wx.CheckBox(self, -1, 'Detrend',(10,10)) lbX = wx.StaticText( self, -1, 'x:') self.cbTypeX = wx.ComboBox(self, choices=['1/x','2pi/x','x'] , style=wx.CB_READONLY) self.cbTypeX.SetSelection(0) dummy_sizer = wx.BoxSizer(wx.HORIZONTAL) dummy_sizer.Add(lb ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.cbType ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbAveraging ,0, flag = wx.CENTER|wx.LEFT,border = 6) dummy_sizer.Add(self.cbAveraging ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.lbAveragingMethod,0, flag = wx.CENTER|wx.LEFT,border = 6) dummy_sizer.Add(self.cbAveragingMethod,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.lbP2 ,0, flag = wx.CENTER|wx.LEFT,border = 6) dummy_sizer.Add(self.scP2 ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.lbWinLength ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbMaxFreq ,0, flag = wx.CENTER|wx.LEFT,border = 6) dummy_sizer.Add(self.tMaxFreq ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbX ,0, flag = wx.CENTER|wx.LEFT,border = 6) dummy_sizer.Add(self.cbTypeX ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.cbDetrend ,0, flag = wx.CENTER|wx.LEFT,border = 7) self.SetSizer(dummy_sizer) self.Bind(wx.EVT_COMBOBOX ,self.onSpecCtrlChange) self.Bind(wx.EVT_TEXT ,self.onP2ChangeText ,self.scP2 ) self.Bind(wx.EVT_TEXT_ENTER,self.onXlimChange ,self.tMaxFreq ) self.Bind(wx.EVT_CHECKBOX ,self.onDetrendChange ,self.cbDetrend) self.Hide() def onXlimChange(self,event=None): self.parent.redraw_same_data(); def onSpecCtrlChange(self,event=None): self.parent.load_and_draw() def onDetrendChange(self,event=None): self.parent.load_and_draw() def onP2ChangeText(self,event=None): nExp=self.scP2.GetValue() self.updateP2(nExp) self.parent.load_and_draw() def updateP2(self,P2): self.lbWinLength.SetLabel("({})".format(2**P2)) class PlotTypePanel(wx.Panel): def __init__(self, parent): super(PlotTypePanel,self).__init__(parent) self.parent = parent self.cbRegular = wx.RadioButton(self, -1, 'Regular',style=wx.RB_GROUP) self.cbPDF = wx.RadioButton(self, -1, 'PDF' , ) self.cbFFT = wx.RadioButton(self, -1, 'FFT' , ) self.cbMinMax = wx.RadioButton(self, -1, 'MinMax' , ) self.cbCompare = wx.RadioButton(self, -1, 'Compare', ) self.cbRegular.SetValue(True) self.Bind(wx.EVT_RADIOBUTTON, self.pdf_select , self.cbPDF ) self.Bind(wx.EVT_RADIOBUTTON, self.fft_select , self.cbFFT ) self.Bind(wx.EVT_RADIOBUTTON, self.minmax_select , self.cbMinMax ) self.Bind(wx.EVT_RADIOBUTTON, self.compare_select, self.cbCompare) self.Bind(wx.EVT_RADIOBUTTON, self.regular_select, self.cbRegular) cb_sizer = wx.FlexGridSizer(rows=5, cols=1, hgap=0, vgap=0) cb_sizer.Add(self.cbRegular , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbPDF , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbFFT , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbMinMax , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbCompare , 0, flag=wx.ALL, border=1) self.SetSizer(cb_sizer) def plotType(self): plotType='Regular' if self.cbMinMax.GetValue(): plotType='MinMax' elif self.cbPDF.GetValue(): plotType='PDF' elif self.cbFFT.GetValue(): plotType='FFT' elif self.cbCompare.GetValue(): plotType='Compare' return plotType def regular_select(self, event=None): self.clear_measures() self.parent.cbLogY.SetValue(False) self.parent.spcPanel.Hide(); self.parent.pdfPanel.Hide(); self.parent.cmpPanel.Hide(); self.parent.mmxPanel.Hide(); self.parent.slEsth.Hide(); self.parent.plotsizer.Layout() self.parent.load_and_draw() def compare_select(self, event=None): self.clear_measures() self.parent.cbLogY.SetValue(False) self.parent.show_hide(self.parent.cmpPanel, self.cbCompare.GetValue()) self.parent.spcPanel.Hide(); self.parent.pdfPanel.Hide(); self.parent.mmxPanel.Hide(); self.parent.plotsizer.Layout() self.parent.load_and_draw() def fft_select(self, event=None): self.clear_measures() self.parent.show_hide(self.parent.spcPanel, self.cbFFT.GetValue()) self.parent.cbLogY.SetValue(self.cbFFT.GetValue()) self.parent.pdfPanel.Hide(); self.parent.mmxPanel.Hide(); self.parent.plotsizer.Layout() self.parent.load_and_draw() def pdf_select(self, event=None): self.clear_measures() self.parent.cbLogX.SetValue(False) self.parent.cbLogY.SetValue(False) self.parent.show_hide(self.parent.pdfPanel, self.cbPDF.GetValue()) self.parent.spcPanel.Hide(); self.parent.cmpPanel.Hide(); self.parent.mmxPanel.Hide(); self.parent.plotsizer.Layout() self.parent.load_and_draw() def minmax_select(self, event): self.clear_measures() self.parent.cbLogY.SetValue(False) self.parent.show_hide(self.parent.mmxPanel, self.cbMinMax.GetValue()) self.parent.spcPanel.Hide(); self.parent.pdfPanel.Hide(); self.parent.cmpPanel.Hide(); self.parent.plotsizer.Layout() self.parent.load_and_draw() def clear_measures(self): self.parent.rightMeasure.clear() self.parent.leftMeasure.clear() self.parent.lbDeltaX.SetLabel('') self.parent.lbDeltaY.SetLabel('') class EstheticsPanel(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) self.parent=parent lbFont = wx.StaticText( self, -1, 'Font:') self.cbFont = wx.ComboBox(self, choices=['6','7','8','9','10','11','12','13','14','15','16','17','18'] , style=wx.CB_READONLY) self.cbFont.SetSelection(2) lbLegend = wx.StaticText( self, -1, 'Legend:') self.cbLegend = wx.ComboBox(self, choices=['None','Upper right','Upper left','Lower left','Lower right','Right','Center left','Center right','Lower center','Upper center','Center'] , style=wx.CB_READONLY) self.cbLegend.SetSelection(1) lbLgdFont = wx.StaticText( self, -1, 'Legend font:') self.cbLgdFont = wx.ComboBox(self, choices=['6','7','8','9','10','11','12','13','14','15','16','17','18'] , style=wx.CB_READONLY) self.cbLgdFont.SetSelection(2) lbLW = wx.StaticText( self, -1, 'Line width:') self.cbLW = wx.ComboBox(self, choices=['0.5','1.0','1.5','2.0','2.5','3.0'] , style=wx.CB_READONLY) self.cbLW.SetSelection(2) lbMS = wx.StaticText( self, -1, 'Marker size:') self.cbMS= wx.ComboBox(self, choices=['0.5','1','2','3','4','5','6','7','8'] , style=wx.CB_READONLY) self.cbMS.SetSelection(2) dummy_sizer = wx.WrapSizer(orient=wx.HORIZONTAL) dummy_sizer.Add(lbFont ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(self.cbFont ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbLW ,0, flag = wx.CENTER|wx.LEFT,border = 5) dummy_sizer.Add(self.cbLW ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbMS ,0, flag = wx.CENTER|wx.LEFT,border = 5) dummy_sizer.Add(self.cbMS ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbLegend ,0, flag = wx.CENTER|wx.LEFT,border = 5) dummy_sizer.Add(self.cbLegend ,0, flag = wx.CENTER|wx.LEFT,border = 1) dummy_sizer.Add(lbLgdFont ,0, flag = wx.CENTER|wx.LEFT,border = 5) dummy_sizer.Add(self.cbLgdFont ,0, flag = wx.CENTER|wx.LEFT,border = 1) self.SetSizer(dummy_sizer) self.Hide() self.Bind(wx.EVT_COMBOBOX ,self.onAnyEsthOptionChange) self.cbFont.Bind(wx.EVT_COMBOBOX ,self.onFontOptionChange) def onAnyEsthOptionChange(self,event=None): self.parent.redraw_same_data() def onFontOptionChange(self,event=None): matplotlib_rc('font', **{'size':int(self.cbFont.Value) }) self.onAnyEsthOptionChange() class PlotPanel(wx.Panel): def __init__(self, parent, selPanel,infoPanel=None, mainframe=None): super(PlotPanel,self).__init__(parent) font = parent.GetFont() font.SetPointSize(font.GetPointSize()-1) self.SetFont(font) self.specialFont=None try: pyplot_path = matplotlib.get_data_path() except: pyplot_path = pyplot_rc['datapath'] CH_F_PATHS = [ os.path.join(pyplot_path, 'fonts/ttf/SimHei.ttf'), os.path.join(os.path.dirname(__file__),'../SimHei.ttf')] for fpath in CH_F_PATHS: if os.path.exists(fpath): fontP = font_manager.FontProperties(fname=fpath) fontP.set_size(font.GetPointSize()) self.specialFont=fontP break self.selPanel = selPanel self.selMode = '' self.infoPanel=infoPanel self.infoPanel.setPlotMatrixCallbacks(self._onPlotMatrixLeftClick, self._onPlotMatrixRightClick) self.parent = parent self.mainframe= mainframe self.plotData = [] self.plotDataOptions=dict() if self.selPanel is not None: bg=self.selPanel.BackgroundColour self.SetBackgroundColour(bg) self.leftMeasure = GUIMeasure(1, 'firebrick') self.rightMeasure = GUIMeasure(2, 'darkgreen') self.xlim_prev = [[0, 1]] self.ylim_prev = [[0, 1]] self.fig = Figure(facecolor="white", figsize=(1, 1)) register_matplotlib_converters() self.canvas = FigureCanvas(self, -1, self.fig) self.canvas.mpl_connect('motion_notify_event', self.onMouseMove) self.canvas.mpl_connect('button_press_event', self.onMouseClick) self.canvas.mpl_connect('button_release_event', self.onMouseRelease) self.canvas.mpl_connect('draw_event', self.onDraw) self.clickLocation = (None, 0, 0) self.navTBTop = MyNavigationToolbar2Wx(self.canvas, ['Home', 'Pan']) self.navTBBottom = MyNavigationToolbar2Wx(self.canvas, ['Subplots', 'Save']) TBAddCheckTool(self.navTBBottom,'', icons.chart.GetBitmap(), self.onEsthToggle) self.esthToggle=False self.navTBBottom.Realize() self.toolbar_sizer = wx.BoxSizer(wx.VERTICAL) self.toolbar_sizer.Add(self.navTBTop) self.toolbar_sizer.Add(self.navTBBottom) self.toolSizer= wx.BoxSizer(wx.VERTICAL) self.pltTypePanel= PlotTypePanel(self); self.spcPanel = SpectralCtrlPanel(self) self.pdfPanel = PDFCtrlPanel(self) self.cmpPanel = CompCtrlPanel(self) self.mmxPanel = MinMaxPanel(self) self.esthPanel = EstheticsPanel(self) self.ctrlPanel= wx.Panel(self) self.cbCurveType = wx.ComboBox(self.ctrlPanel, choices=['Plain','LS','Markers','Mix'] , style=wx.CB_READONLY) self.cbCurveType.SetSelection(1) self.cbSub = wx.CheckBox(self.ctrlPanel, -1, 'Subplot',(10,10)) self.cbLogX = wx.CheckBox(self.ctrlPanel, -1, 'Log-x',(10,10)) self.cbLogY = wx.CheckBox(self.ctrlPanel, -1, 'Log-y',(10,10)) self.cbSync = wx.CheckBox(self.ctrlPanel, -1, 'Sync-x',(10,10)) self.cbXHair = wx.CheckBox(self.ctrlPanel, -1, 'CrossHair',(10,10)) self.cbPlotMatrix = wx.CheckBox(self.ctrlPanel, -1, 'Matrix',(10,10)) self.cbAutoScale = wx.CheckBox(self.ctrlPanel, -1, 'AutoScale',(10,10)) self.cbGrid = wx.CheckBox(self.ctrlPanel, -1, 'Grid',(10,10)) self.cbStepPlot = wx.CheckBox(self.ctrlPanel, -1, 'StepPlot',(10,10)) self.cbMeasure = wx.CheckBox(self.ctrlPanel, -1, 'Measure',(10,10)) self.cbSync.SetValue(True) self.cbXHair.SetValue(True) self.cbAutoScale.SetValue(True) self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbSub ) self.Bind(wx.EVT_COMBOBOX, self.redraw_event , self.cbCurveType) self.Bind(wx.EVT_CHECKBOX, self.log_select , self.cbLogX ) self.Bind(wx.EVT_CHECKBOX, self.log_select , self.cbLogY ) self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbSync ) self.Bind(wx.EVT_CHECKBOX, self.crosshair_event , self.cbXHair ) self.Bind(wx.EVT_CHECKBOX, self.plot_matrix_select, self.cbPlotMatrix ) self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbAutoScale ) self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbGrid ) self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbStepPlot ) self.Bind(wx.EVT_CHECKBOX, self.measure_select , self.cbMeasure ) self.Bind(wx.EVT_CHECKBOX, self.measure_select , self.cbMeasure ) cb_sizer = wx.FlexGridSizer(rows=4, cols=3, hgap=0, vgap=0) cb_sizer.Add(self.cbCurveType , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbSub , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbAutoScale , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbLogX , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbLogY , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbStepPlot , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbXHair , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbGrid , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbSync , 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbPlotMatrix, 0, flag=wx.ALL, border=1) cb_sizer.Add(self.cbMeasure , 0, flag=wx.ALL, border=1) self.ctrlPanel.SetSizer(cb_sizer) crossHairPanel= wx.Panel(self) self.lbCrossHairX = wx.StaticText(crossHairPanel, -1, 'x = ... ') self.lbCrossHairY = wx.StaticText(crossHairPanel, -1, 'y = ... ') self.lbDeltaX = wx.StaticText(crossHairPanel, -1, ' ') self.lbDeltaY = wx.StaticText(crossHairPanel, -1, ' ') self.lbCrossHairX.SetFont(getMonoFont(self)) self.lbCrossHairY.SetFont(getMonoFont(self)) self.lbDeltaX.SetFont(getMonoFont(self)) self.lbDeltaY.SetFont(getMonoFont(self)) cbCH = wx.FlexGridSizer(rows=4, cols=1, hgap=0, vgap=0) cbCH.Add(self.lbCrossHairX , 0, flag=wx.ALL, border=1) cbCH.Add(self.lbCrossHairY , 0, flag=wx.ALL, border=1) cbCH.Add(self.lbDeltaX , 0, flag=wx.ALL, border=1) cbCH.Add(self.lbDeltaY , 0, flag=wx.ALL, border=1) crossHairPanel.SetSizer(cbCH) row_sizer = wx.BoxSizer(wx.HORIZONTAL) sl2 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL) sl3 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL) sl4 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL) row_sizer.Add(self.pltTypePanel , 0 , flag=wx.LEFT|wx.RIGHT|wx.CENTER , border=1) row_sizer.Add(sl2 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0) row_sizer.Add(self.toolbar_sizer, 0 , flag=wx.LEFT|wx.RIGHT|wx.CENTER , border=1) row_sizer.Add(sl3 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0) row_sizer.Add(self.ctrlPanel , 1 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0) row_sizer.Add(sl4 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0) row_sizer.Add(crossHairPanel , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=1) plotsizer = wx.BoxSizer(wx.VERTICAL) self.slCtrl = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL) self.slCtrl.Hide() self.slEsth = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL) self.slEsth.Hide() sl1 = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL) plotsizer.Add(self.toolSizer,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.canvas ,1,flag = wx.EXPAND,border = 5 ) plotsizer.Add(sl1 ,0,flag = wx.EXPAND,border = 0) plotsizer.Add(self.spcPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.pdfPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.cmpPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.mmxPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.slEsth ,0,flag = wx.EXPAND,border = 0) plotsizer.Add(self.esthPanel,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10) plotsizer.Add(self.slCtrl ,0,flag = wx.EXPAND,border = 0) plotsizer.Add(row_sizer ,0,flag = wx.EXPAND|wx.NORTH ,border = 2) self.show_hide(self.spcPanel, self.pltTypePanel.cbFFT.GetValue()) self.show_hide(self.cmpPanel, self.pltTypePanel.cbCompare.GetValue()) self.show_hide(self.pdfPanel, self.pltTypePanel.cbPDF.GetValue()) self.show_hide(self.mmxPanel, self.pltTypePanel.cbMinMax.GetValue()) self.SetSizer(plotsizer) self.plotsizer=plotsizer; self.set_subplot_spacing(init=True) def onEsthToggle(self,event): self.esthToggle=not self.esthToggle if self.esthToggle: self.slCtrl.Show() self.esthPanel.Show() else: self.slCtrl.Hide() self.esthPanel.Hide() self.plotsizer.Layout() event.Skip() def set_subplot_spacing(self, init=False): if init: bottom = 0.12 left = 0.12 else: if self.Size[1]<300: bottom=0.20 elif self.Size[1]<350: bottom=0.18 elif self.Size[1]<430: bottom=0.16 elif self.Size[1]<600: bottom=0.13 elif self.Size[1]<800: bottom=0.09 else: bottom=0.07 if self.Size[0]<300: left=0.22 elif self.Size[0]<450: left=0.20 elif self.Size[0]<950: left=0.12 else: left=0.06 if self.cbPlotMatrix.GetValue(): self.fig.subplots_adjust(top=0.97,bottom=bottom,left=left,right=0.98-left) else: self.fig.subplots_adjust(top=0.97,bottom=bottom,left=left,right=0.98) def plot_matrix_select(self, event): self.infoPanel.togglePlotMatrix(self.cbPlotMatrix.GetValue()) self.redraw_same_data() def measure_select(self, event): if self.cbMeasure.IsChecked(): self.cbAutoScale.SetValue(False) self.redraw_same_data() def redraw_event(self, event): self.redraw_same_data() def log_select(self, event): if self.pltTypePanel.cbPDF.GetValue(): self.cbLogX.SetValue(False) self.cbLogY.SetValue(False) else: self.redraw_same_data() def crosshair_event(self, event): try: self.multiCursors.vertOn =self.cbXHair.GetValue() self.multiCursors.horizOn=self.cbXHair.GetValue() self.multiCursors._update() except: pass def show_hide(self,panel,bShow): if bShow: panel.Show() self.slEsth.Show() else: self.slEsth.Hide() panel.Hide() @property def sharex(self): return self.cbSync.IsChecked() and (not self.pltTypePanel.cbPDF.GetValue()) def set_subplots(self,nPlots): self.set_subplot_spacing() for ax in self.fig.axes: self.fig.delaxes(ax) sharex=None for i in range(nPlots): if i==0: ax=self.fig.add_subplot(nPlots,1,i+1) if self.sharex: sharex=ax else: ax=self.fig.add_subplot(nPlots,1,i+1,sharex=sharex) def onMouseMove(self, event): if event.inaxes: x, y = event.xdata, event.ydata self.lbCrossHairX.SetLabel('x =' + self.formatLabelValue(x)) self.lbCrossHairY.SetLabel('y =' + self.formatLabelValue(y)) def onMouseClick(self, event): self.clickLocation = (event.inaxes, event.xdata, event.ydata) def onMouseRelease(self, event): if self.cbMeasure.GetValue(): for ax, ax_idx in zip(self.fig.axes, range(len(self.fig.axes))): if event.inaxes == ax: x, y = event.xdata, event.ydata if self.clickLocation != (ax, x, y): self.cbAutoScale.SetValue(False) return if event.button == 1: self.infoPanel.setMeasurements((x, y), None) self.leftMeasure.set(ax_idx, x, y) self.leftMeasure.plot(ax, ax_idx) elif event.button == 3: self.infoPanel.setMeasurements(None, (x, y)) self.rightMeasure.set(ax_idx, x, y) self.rightMeasure.plot(ax, ax_idx) else: return if self.cbAutoScale.IsChecked() is False: self._restore_limits() if self.leftMeasure.axis_idx == self.rightMeasure.axis_idx and self.leftMeasure.axis_idx != -1: self.lbDeltaX.SetLabel('dx=' + self.formatLabelValue(self.rightMeasure.x - self.leftMeasure.x)) self.lbDeltaY.SetLabel('dy=' + self.formatLabelValue(self.rightMeasure.y - self.leftMeasure.y)) else: self.lbDeltaX.SetLabel('') self.lbDeltaY.SetLabel('') return def onDraw(self, event): self._store_limits() def formatLabelValue(self, value): try: if abs(value)<1000 and abs(value)>1e-4: s = '{:10.5f}'.format(value) else: s = '{:10.3e}'.format(value) except TypeError: s = ' ' return s def removeTools(self,event=None,Layout=True): try: self.toolPanel.destroy() except: pass try: self.toolSizer.Clear(delete_windows=True) except: if hasattr(self,'toolPanel'): self.toolSizer.Remove(self.toolPanel) self.toolPanel.Destroy() del self.toolPanel self.toolSizer.Clear() if Layout: self.plotsizer.Layout() def showTool(self,toolName=''): from .GUITools import TOOLS self.Freeze() self.removeTools(Layout=False) if toolName in TOOLS.keys(): self.toolPanel=TOOLS[toolName](self) else: raise Exception('Unknown tool {}'.format(toolName)) self.toolSizer.Add(self.toolPanel, 0, wx.EXPAND|wx.ALL, 5) self.plotsizer.Layout() self.Thaw()
MIT License
pypa/pipenv
pipenv/vendor/shellingham/posix/__init__.py
get_shell
python
def get_shell(pid=None, max_depth=10): pid = str(pid or os.getpid()) mapping = _get_process_mapping() for proc_args in _iter_process_args(mapping, pid, max_depth): shell = _get_shell(*proc_args) if shell: return shell return None
Get the shell that the supplied pid or os.getpid() is running in.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/vendor/shellingham/posix/__init__.py#L82-L90
import os import re from .._core import SHELL_NAMES, ShellDetectionFailure from . import proc, ps def _get_process_mapping(): for impl in (proc, ps): try: mapping = impl.get_process_mapping() except EnvironmentError: continue return mapping raise ShellDetectionFailure("compatible proc fs or ps utility is required") def _iter_process_args(mapping, pid, max_depth): for _ in range(max_depth): try: proc = mapping[pid] except KeyError: break if proc.args: yield proc.args pid = proc.ppid def _get_login_shell(proc_cmd): login_shell = os.environ.get("SHELL", "") if login_shell: proc_cmd = login_shell else: proc_cmd = proc_cmd[1:] return (os.path.basename(proc_cmd).lower(), proc_cmd) _INTERPRETER_SHELL_NAMES = [ (re.compile(r"^python(\d+(\.\d+)?)?$"), {"xonsh"}), ] def _get_interpreter_shell(proc_name, proc_args): for pattern, shell_names in _INTERPRETER_SHELL_NAMES: if not pattern.match(proc_name): continue for arg in proc_args: name = os.path.basename(arg).lower() if os.path.isfile(arg) and name in shell_names: return (name, arg) return None def _get_shell(cmd, *args): if cmd.startswith("-"): return _get_login_shell(cmd) name = os.path.basename(cmd).lower() if name in SHELL_NAMES: return (name, cmd) shell = _get_interpreter_shell(name, args) if shell: return shell return None
MIT License
decstionback/aaai_2020_commonsenseqa
hubconfs/gpt_hubconf.py
openAIGPTDoubleHeadsModel
python
def openAIGPTDoubleHeadsModel(*args, **kwargs): model = OpenAIGPTDoubleHeadsModel.from_pretrained(*args, **kwargs) return model
OpenAIGPTDoubleHeadsModel is the OpenAI GPT Transformer model with the tied (pre-trained) language modeling head and a multiple choice classification head (only initialized, not pre-trained). Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'openAIGPTTokenizer', 'openai-gpt') # Prepare tokenized input >>> text1 = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man" >>> tokenized_text1 = tokenizer.tokenize(text1) >>> tokenized_text2 = tokenizer.tokenize(text2) >>> indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1) >>> indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2) >>> tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]]) >>> mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]]) # Load openAIGPTDoubleHeadsModel >>> model = torch.hub.load('huggingface/pytorch-transformers', 'openAIGPTDoubleHeadsModel', 'openai-gpt') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): lm_logits, multiple_choice_logits = model(tokens_tensor, mc_token_ids)
https://github.com/decstionback/aaai_2020_commonsenseqa/blob/7f7b46329333671253c2def93bc2d08fb4bcc79d/hubconfs/gpt_hubconf.py#L156-L186
from pytorch_transformers.tokenization_openai import OpenAIGPTTokenizer from pytorch_transformers.modeling_openai import ( OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel ) specific_dependencies = ['spacy', 'ftfy'] gpt_docstring = """ OpenAI GPT use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `openai-gpt` - a path or url to a pretrained model archive containing: . `openai_gpt_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance - a path or url to a pretrained model archive containing: . `openai-gpt-config.json` a configuration file for the model . a series of NumPy files containing OpenAI TensorFlow trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific OpenAI-GPT class """ def _append_from_pretrained_docstring(docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + docstr return fn return docstring_decorator def openAIGPTTokenizer(*args, **kwargs): tokenizer = OpenAIGPTTokenizer.from_pretrained(*args, **kwargs) return tokenizer @_append_from_pretrained_docstring(gpt_docstring) def openAIGPTModel(*args, **kwargs): model = OpenAIGPTModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt_docstring) def openAIGPTLMHeadModel(*args, **kwargs): model = OpenAIGPTLMHeadModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt_docstring)
Apache License 2.0
pypa/pipenv
pipenv/patched/notpip/_internal/operations/prepare.py
_get_prepared_distribution
python
def _get_prepared_distribution( req, req_tracker, finder, build_isolation ): abstract_dist = make_distribution_for_install_requirement(req) with req_tracker.track(req): abstract_dist.prepare_distribution_metadata(finder, build_isolation) return abstract_dist
Prepare a distribution for installation.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/patched/notpip/_internal/operations/prepare.py#L84-L96
import logging import mimetypes import os import shutil import sys from pipenv.patched.notpip._vendor import requests from pipenv.patched.notpip._vendor.six import PY2 from pipenv.patched.notpip._internal.distributions import ( make_distribution_for_install_requirement, ) from pipenv.patched.notpip._internal.distributions.installed import InstalledDistribution from pipenv.patched.notpip._internal.exceptions import ( DirectoryUrlHashUnsupported, HashMismatch, HashUnpinned, InstallationError, PreviousBuildDirError, VcsHashUnsupported, ) from pipenv.patched.notpip._internal.utils.filesystem import copy2_fixed from pipenv.patched.notpip._internal.utils.hashes import MissingHashes from pipenv.patched.notpip._internal.utils.logging import indent_log from pipenv.patched.notpip._internal.utils.marker_files import write_delete_marker_file from pipenv.patched.notpip._internal.utils.misc import ( ask_path_exists, backup_dir, display_path, hide_url, path_to_display, rmtree, ) from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING from pipenv.patched.notpip._internal.utils.unpacking import unpack_file from pipenv.patched.notpip._internal.vcs import vcs if MYPY_CHECK_RUNNING: from typing import ( Callable, List, Optional, Tuple, ) from mypy_extensions import TypedDict from pipenv.patched.notpip._internal.distributions import AbstractDistribution from pipenv.patched.notpip._internal.index.package_finder import PackageFinder from pipenv.patched.notpip._internal.models.link import Link from pipenv.patched.notpip._internal.network.download import Downloader from pipenv.patched.notpip._internal.req.req_install import InstallRequirement from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker from pipenv.patched.notpip._internal.utils.hashes import Hashes if PY2: CopytreeKwargs = TypedDict( 'CopytreeKwargs', { 'ignore': Callable[[str, List[str]], List[str]], 'symlinks': bool, }, total=False, ) else: CopytreeKwargs = TypedDict( 'CopytreeKwargs', { 'copy_function': Callable[[str, str], None], 'ignore': Callable[[str, List[str]], List[str]], 'ignore_dangling_symlinks': bool, 'symlinks': bool, }, total=False, ) logger = logging.getLogger(__name__)
MIT License
roberttlange/mle-logging
mle_logging/save/model_log.py
ModelLog.save_top_k_model
python
def save_top_k_model(self, model, clock_to_track, stats_to_track): score = stats_to_track[self.top_k_metric_name].to_numpy()[-1] time = clock_to_track[self.ckpt_time_to_track].to_numpy()[-1] if len(self.top_k_performance) < self.save_top_k_ckpt: ckpt_path = ( self.top_k_model_save_fname + str(len(self.top_k_performance)) + self.model_fname_ext ) save_model_ckpt(model, ckpt_path, self.model_type) self.top_k_performance.append(score) self.top_k_storage_time.append(time) self.top_k_ckpt_list.append(ckpt_path) self.stored_top_k = True return if not self.top_k_minimize_metric: top_k_scores = [-1 * s for s in self.top_k_performance] score_to_eval = -1 * score else: top_k_scores = [s for s in self.top_k_performance] score_to_eval = score if max(top_k_scores) > score_to_eval: id_to_replace = np.argmax(top_k_scores) self.top_k_performance[id_to_replace] = score self.top_k_storage_time[id_to_replace] = time ckpt_path = ( self.top_k_model_save_fname + str(id_to_replace) + self.model_fname_ext ) save_model_ckpt(model, ckpt_path, self.model_type) self.stored_top_k = True
Store top-k checkpoints by performance.
https://github.com/roberttlange/mle-logging/blob/9089bf2d3e084489a22fbc2dd9ab76201c721c28/mle_logging/save/model_log.py#L137-L173
import os import numpy as np from typing import Union, List from ..utils import save_pkl_object from ..load import load_log class ModelLog(object): def __init__( self, experiment_dir: str = "/", seed_id: str = "no_seed_provided", model_type: str = "no-model-type-provided", ckpt_time_to_track: Union[str, None] = None, save_every_k_ckpt: Union[int, None] = None, save_top_k_ckpt: Union[int, None] = None, top_k_metric_name: Union[str, None] = None, top_k_minimize_metric: Union[bool, None] = None, reload: bool = False, ): self.experiment_dir = experiment_dir assert model_type in [ "torch", "tensorflow", "jax", "sklearn", "numpy", "no-model-type", ] self.model_type = model_type self.save_every_k_ckpt = save_every_k_ckpt self.save_top_k_ckpt = save_top_k_ckpt self.ckpt_time_to_track = ckpt_time_to_track self.top_k_metric_name = top_k_metric_name self.top_k_minimize_metric = top_k_minimize_metric self.ckpt_dir = os.path.join(self.experiment_dir, "models/") self.final_model_save_fname = os.path.join( self.ckpt_dir, "final", "final_" + seed_id ) self.init_model_save_fname = os.path.join( self.ckpt_dir, "init", "init_" + seed_id ) self.init_model_saved = False if self.save_every_k_ckpt is not None: self.every_k_ckpt_list: List[str] = [] self.every_k_dir = os.path.join(self.experiment_dir, "models/every_k/") self.every_k_model_save_fname = os.path.join( self.every_k_dir, "every_k_" + seed_id + "_k_" ) if self.save_top_k_ckpt is not None: self.top_k_ckpt_list: List[str] = [] self.top_k_dir = os.path.join(self.experiment_dir, "models/top_k/") self.top_k_model_save_fname = os.path.join( self.top_k_dir, "top_k_" + seed_id + "_top_" ) if self.model_type in ["torch", "tensorflow", "jax", "sklearn", "numpy"]: if self.model_type in ["torch", "tensorflow"]: self.model_fname_ext = ".pt" elif self.model_type in ["jax", "sklearn", "numpy"]: self.model_fname_ext = ".pkl" self.final_model_save_fname += self.model_fname_ext self.init_model_save_fname += self.model_fname_ext if reload: self.reload() else: self.model_save_counter = 0 if self.save_every_k_ckpt is not None: self.every_k_storage_time: List[int] = [] if self.save_top_k_ckpt is not None: self.top_k_performance: List[float] = [] self.top_k_storage_time: List[int] = [] def setup_model_ckpt_dir(self): os.makedirs(self.ckpt_dir, exist_ok=True) if self.save_every_k_ckpt is not None: os.makedirs(self.every_k_dir, exist_ok=True) if self.save_top_k_ckpt is not None: os.makedirs(self.top_k_dir, exist_ok=True) def save(self, model, clock_to_track, stats_to_track): self.model_save_counter += 1 if self.model_save_counter == 1: os.makedirs(os.path.join(self.ckpt_dir, "final"), exist_ok=True) self.stored_every_k = False self.stored_top_k = False if self.model_save_counter == 1: self.setup_model_ckpt_dir() self.save_final_model(model) if self.save_every_k_ckpt is not None: self.save_every_k_model(model, clock_to_track) if self.save_top_k_ckpt is not None: self.save_top_k_model(model, clock_to_track, stats_to_track) def save_init_model(self, model): os.makedirs(os.path.join(self.ckpt_dir, "init"), exist_ok=True) save_model_ckpt(model, self.init_model_save_fname, self.model_type) self.init_model_saved = True def save_final_model(self, model): save_model_ckpt(model, self.final_model_save_fname, self.model_type) def save_every_k_model(self, model, clock_to_track): if self.model_save_counter % self.save_every_k_ckpt == 0: ckpt_path = ( self.every_k_model_save_fname + str(self.model_save_counter) + self.model_fname_ext ) save_model_ckpt(model, ckpt_path, self.model_type) time = clock_to_track[self.ckpt_time_to_track].to_numpy()[-1] self.every_k_storage_time.append(time) self.every_k_ckpt_list.append(ckpt_path) self.stored_every_k = True
MIT License
iffix/machin
machin/frame/transition.py
TransitionBase.__init__
python
def __init__( self, major_attr: Iterable[str], sub_attr: Iterable[str], custom_attr: Iterable[str], major_data: Iterable[Dict[str, t.Tensor]], sub_data: Iterable[Union[Scalar, t.Tensor]], custom_data: Iterable[Any], ): self._major_attr = list(major_attr) self._sub_attr = list(sub_attr) self._custom_attr = list(custom_attr) self._keys = self._major_attr + self._sub_attr + self._custom_attr self._length = len(self._keys) self._batch_size = None for attr, data in zip( chain(major_attr, sub_attr, custom_attr), chain(major_data, sub_data, custom_data), ): object.__setattr__(self, attr, data) self._inited = True self._detach()
Note: Major attributes store things like state, action, next_states, etc. They are usually **concatenated by their dictionary keys** during sampling, and passed as keyword arguments to actors, critics, etc. Sub attributes store things like terminal states, reward, etc. They are usually **concatenated directly** during sampling, and used in different algorithms. Custom attributes store not concatenatable values, usually user specified states, used in models or as special arguments in different algorithms. They will be collected together as a list during sampling, **no further concatenation is performed**. Args: major_attr: A list of major attribute names. sub_attr: A list of sub attribute names. custom_attr: A list of custom attribute names. major_data: Data of major attributes. sub_data: Data of sub attributes. custom_data: Data of custom attributes.
https://github.com/iffix/machin/blob/7fa986b1bafdefff117d6ff73d14644a5488de9d/machin/frame/transition.py#L16-L62
from typing import Union, Dict, Iterable, Any, NewType from itertools import chain import torch as t import numpy as np Scalar = NewType("Scalar", Union[int, float, bool]) class TransitionBase: _inited = False
MIT License
fancompute/neuroptica
neuroptica/nonlinearities.py
ComplexNonlinearity.__init__
python
def __init__(self, N, holomorphic=False, mode="condensed"): super().__init__(N) self.holomorphic = holomorphic self.mode = mode
Initialize the nonlinearity :param N: dimensionality of the nonlinear function :param holomorphic: whether the function is holomorphic :param mode: for nonholomorphic functions, can be "full", "condensed", or "polar". Full requires that you specify 4 derivatives for d{Re,Im}/d{Re,Im}, condensed requires only df/d{Re,Im}, and polar takes Z=re^iphi
https://github.com/fancompute/neuroptica/blob/7bc3c152f2713780b88e701744b0541175b12111/neuroptica/nonlinearities.py#L45-L55
import numpy as np from neuroptica.settings import NP_COMPLEX class Nonlinearity: def __init__(self, N): self.N = N def forward_pass(self, X: np.ndarray) -> np.ndarray: raise NotImplementedError('forward_pass() must be overridden in child class!') def backward_pass(self, gamma: np.ndarray, Z: np.ndarray) -> np.ndarray: raise NotImplementedError('backward_pass() must be overridden in child class!') def __repr__(self): return type(self).__name__ + '(N={})'.format(self.N) class ComplexNonlinearity(Nonlinearity):
MIT License
directgroup/direct
direct/data/mri_transforms.py
CropAndMask.__init__
python
def __init__( self, crop, use_seed=True, forward_operator=T.fft2, backward_operator=T.ifft2, image_space_center_crop=False, random_crop_sampler_type="uniform", ): super().__init__() self.logger = logging.getLogger(type(self).__name__) self.use_seed = use_seed self.image_space_center_crop = image_space_center_crop self.crop = crop self.crop_func = None self.random_crop_sampler_type = random_crop_sampler_type if self.crop: if self.image_space_center_crop: self.crop_func = T.complex_center_crop else: self.crop_func = functools.partial(T.complex_random_crop, sampler=self.random_crop_sampler_type) self.forward_operator = forward_operator self.backward_operator = backward_operator self.image_space_center_crop = image_space_center_crop
Parameters ---------- crop : tuple or None Size to crop input_image to. mask_func : direct.common.subsample.MaskFunc A function which creates a mask of the appropriate shape. use_seed : bool If true, a pseudo-random number based on the filename is computed so that every slice of the volume get the same mask every time. forward_operator : callable The __call__ operator, e.g. some form of FFT (centered or uncentered). backward_operator : callable The backward operator, e.g. some form of inverse FFT (centered or uncentered). image_space_center_crop : bool If set, the crop in the data will be taken in the center random_crop_sampler_type : str If "uniform" the random cropping will be done by uniformly sampling `crop`, as opposed to `gaussian` which will sample from a gaussian distribution.
https://github.com/directgroup/direct/blob/961989bfac0177988de04e8a3ff563db850575e2/direct/data/mri_transforms.py#L111-L158
import functools import logging import warnings from typing import Any, Callable, Dict, Iterable, Optional import numpy as np import torch import torch.nn as nn from direct.data import transforms as T from direct.utils import DirectModule, DirectTransform from direct.utils.asserts import assert_complex logger = logging.getLogger(__name__) class Compose(DirectModule): def __init__(self, transforms: Iterable) -> None: super().__init__() self.transforms = transforms def __call__(self, sample): for transform in self.transforms: sample = transform(sample) return sample def __repr__(self): repr_string = self.__class__.__name__ + "(" for transform in self.transforms: repr_string += "\n" repr_string += f" {transform}" repr_string += "\n)" return repr_string class RandomFlip(DirectTransform): def __call__(self): raise NotImplementedError class CreateSamplingMask(DirectModule): def __init__(self, mask_func, shape=None, use_seed=True, return_acs=False): super().__init__() self.mask_func = mask_func self.shape = shape self.use_seed = use_seed self.return_acs = return_acs def __call__(self, sample): if not self.shape: shape = sample["kspace"].shape[1:] elif any(_ is None for _ in self.shape): kspace_shape = list(sample["kspace"].shape[1:-1]) shape = tuple(_ if _ else kspace_shape[idx] for idx, _ in enumerate(self.shape)) + (2,) else: shape = self.shape + (2,) seed = None if not self.use_seed else tuple(map(ord, str(sample["filename"]))) sampling_mask = self.mask_func(shape, seed, return_acs=False) if sample.get("padding_left", 0) > 0 or sample.get("padding_right", 0) > 0: if sample["kspace"].shape[2] != shape[-2]: raise ValueError( "Currently only support for the `width` axis to be at the 2th position when padding. " + "When padding in left or right is present, you cannot crop in the phase-encoding direction!" ) padding_left = sample["padding_left"] padding_right = sample["padding_right"] sampling_mask[:, :, :padding_left, :] = 0 sampling_mask[:, :, padding_right:, :] = 0 sample["sampling_mask"] = sampling_mask if self.return_acs: kspace_shape = sample["kspace"].shape[1:] sample["acs_mask"] = self.mask_func(kspace_shape, seed, return_acs=True) return sample class CropAndMask(DirectModule):
Apache License 2.0
thibaultgroueix/atlasnet
training/trainer.py
Trainer.__init__
python
def __init__(self, opt): super(Trainer, self).__init__(opt) self.dataset_train = None self.opt.training_media_path = os.path.join(self.opt.dir_name, "training_media") if not opt.demo and not os.path.exists(self.opt.training_media_path): os.mkdir(self.opt.training_media_path) self.flags = EasyDict() self.flags.media_count = 0 self.flags.add_log = True self.flags.build_website = False self.flags.get_closer_neighbourg = False self.flags.compute_clustering_errors = False self.display = EasyDict({"recons": []}) self.colormap = mesh_processor.ColorMap()
Main Atlasnet class inheriting from the other main modules. It implements all functions related to train and evaluate for an epoch. Author : Thibault Groueix 01.11.2019 :param opt:
https://github.com/thibaultgroueix/atlasnet/blob/0d09516c877de5c25a53a6852aa0a74da5bc7cc0/training/trainer.py#L17-L39
import torch import os import auxiliary.html_report as html_report import numpy as np from easydict import EasyDict import pymesh from training.trainer_abstract import TrainerAbstract import dataset.mesh_processor as mesh_processor from training.trainer_iteration import TrainerIteration from model.trainer_model import TrainerModel from dataset.trainer_dataset import TrainerDataset from training.trainer_loss import TrainerLoss class Trainer(TrainerAbstract, TrainerLoss, TrainerIteration, TrainerDataset, TrainerModel):
MIT License
erigones/esdc-ce
core/management/commands/_base.py
DanubeCloudCommand.local_username
python
def local_username(self): if self._local_username is None: self._local_username = getpass.getuser() return self._local_username
Used by the command_prompt property
https://github.com/erigones/esdc-ce/blob/f83a62d0d430e3c8f9aac23d958583b0efce4312/core/management/commands/_base.py#L145-L149
from __future__ import absolute_import from __future__ import print_function import os import getpass from optparse import Option from subprocess import Popen, PIPE, STDOUT from contextlib import contextmanager from django.core.management.base import BaseCommand, CommandError from django.core.management import call_command from django.utils.six.moves import input from django.conf import settings from ._color import no_color, shell_color @contextmanager def lcd(dirpath): prev_cwd = os.getcwd() dirpath = dirpath.replace(' ', '\ ') if not dirpath.startswith('/') and not dirpath.startswith('~'): new_cwd = os.path.join(os.path.abspath(os.getcwd()), dirpath) else: new_cwd = dirpath os.chdir(new_cwd) try: yield finally: os.chdir(prev_cwd) CommandOption = Option class DanubeCloudCommand(BaseCommand): settings = settings DEFAULT_BRANCH = 'master' PROJECT_DIR = settings.PROJECT_DIR PROJECT_NAME = 'esdc-ce' CTLSH = os.path.join(PROJECT_DIR, 'bin', 'ctl.sh') cmd_sha = 'git log --pretty=oneline -1 | cut -d " " -f 1' cmd_tag = 'git symbolic-ref -q --short HEAD || git describe --tags --exact-match' default_verbosity = 1 verbose = False strip_newline = False colors = shell_color options = () option_list = BaseCommand.option_list + ( CommandOption('--no-newline', action='store_true', dest='no_newline', default=False, help='Strip newlines from output'), ) _local_username = None def __init__(self, **kwargs): if self.options: self.option_list = self.__class__.option_list + self.options super(DanubeCloudCommand, self).__init__(**kwargs) def get_version(self): from core.version import __version__ return 'Danube Cloud %s' % __version__ def get_git_version(self): with lcd(self.PROJECT_DIR): _tag = self.local(self.cmd_tag, capture=True).strip().split('/')[-1] _sha = self.local(self.cmd_sha, capture=True).strip() return _tag, _sha def execute(self, *args, **options): self.verbose = int(options.get('verbosity', self.default_verbosity)) >= self.default_verbosity self.strip_newline = options.pop('no_newline', False) if options.get('no_color'): options['no_color'] = True self.colors = no_color return super(DanubeCloudCommand, self).execute(*args, **options) @staticmethod def confirm(question, default='yes'): valid = {"yes": True, "y": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: print(question + prompt, end='') choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: print("Please respond with 'yes' or 'no' (or 'y' or 'n').") @staticmethod def _path(*args): return os.path.join(*args) @staticmethod def _path_exists(basepath, *args): return os.path.exists(os.path.join(basepath, *args)) @property
Apache License 2.0
midnighter/structurizr-python
src/structurizr/view/dynamic_view.py
DynamicView.relationship_views
python
def relationship_views(self) -> Iterable[RelationshipView]: return sorted(self._relationship_views, key=attrgetter("order"))
Return the relationship views in order of their sequence number. Sorting uses "version number" style ordering, so 1 < 1.1 < 2 < 10.
https://github.com/midnighter/structurizr-python/blob/9d482a5ad5a4a867b0b6e798ced137c5f1e1ac25/src/structurizr/view/dynamic_view.py#L199-L204
from contextlib import contextmanager from operator import attrgetter from typing import Iterable, Optional, Tuple, Union from pydantic import Field from ..mixin.model_ref_mixin import ModelRefMixin from ..model import Component, Container, Element, Person, Relationship, SoftwareSystem from ..model.static_structure_element import StaticStructureElement from .relationship_view import RelationshipView from .sequence_number import SequenceNumber from .view import View, ViewIO __all__ = ("DynamicView", "DynamicViewIO") class DynamicViewIO(ViewIO): element_id: Optional[str] = Field(default=None, alias="elementId") class DynamicView(ModelRefMixin, View): def __init__( self, *, element: Optional[Union[Container, SoftwareSystem]] = None, **kwargs, ) -> None: if "software_system" in kwargs: raise ValueError( "Software system must be specified through the 'element' argument for " "DynamicViews" ) super().__init__(**kwargs) self.element = element self.element_id = self.element.id if self.element else None self.sequence_number = SequenceNumber() def add( self, source: Element, destination: Element, description: Optional[str] = None, *, technology: Optional[str] = None, ) -> RelationshipView: self.check_element_can_be_added(source) self.check_element_can_be_added(destination) relationship, response = self._find_relationship( source, description, destination, technology ) if relationship is None: if technology: raise ValueError( f"A relationship between {source.name} and " f"{destination.name} with technology " f"'{technology}' does not exist in the model." ) else: raise ValueError( f"A relationship between {source.name} and " f"{destination.name} does not exist in " "the model." ) self._add_element(source, False) self._add_element(destination, False) return self._add_relationship( relationship, description=description or relationship.description, order=self.sequence_number.get_next(), response=response, ) @contextmanager def subsequence(self): try: self.sequence_number.start_subsequence() yield self finally: self.sequence_number.end_subsequence() @contextmanager def parallel_sequence(self, *, continue_numbering: bool = False): try: self.sequence_number.start_parallel_sequence() yield self finally: self.sequence_number.end_parallel_sequence(continue_numbering) @property
Apache License 2.0
devopshq/teamcity
dohq_teamcity/api/vcs_root_instance_api.py
VcsRootInstanceApi.__get_metadata_with_http_info
python
def __get_metadata_with_http_info(self, path, vcs_root_instance_locator, **kwargs): all_params = ['path', 'vcs_root_instance_locator', 'fields'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_metadata" % key ) params[key] = val del params['kwargs'] if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `get_metadata`") if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `get_metadata`") if 'path' in params and not re.search('(\/.*)?', params['path']): raise ValueError("Invalid value for parameter `path` when calling `get_metadata`, must conform to the pattern `/(\/.*)?/`") collection_formats = {} path_params = {} if 'path' in params: if isinstance(params['path'], TeamCityObject): path_params['path'] = params['path'].locator_id else: path_params['path'] = params['path'] if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] if 'fields' in params: query_params.append(('fields', params['fields'])) header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/files/latest/metadata{path}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='File', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
get_metadata # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.__get_metadata_with_http_info(path, vcs_root_instance_locator, async_req=True) >>> result = thread.get() :param async_req bool :param str path: (required) :param str vcs_root_instance_locator: (required) :param str fields: :return: File If the method is called asynchronously, returns the request thread.
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/api/vcs_root_instance_api.py#L978-L1062
from __future__ import absolute_import from dohq_teamcity.custom.base_model import TeamCityObject import re import six from dohq_teamcity.models.entries import Entries from dohq_teamcity.models.file import File from dohq_teamcity.models.files import Files from dohq_teamcity.models.properties import Properties from dohq_teamcity.models.vcs_root_instance import VcsRootInstance from dohq_teamcity.models.vcs_root_instances import VcsRootInstances class VcsRootInstanceApi(object): base_name = 'VcsRootInstance' def __init__(self, api_client=None): self.api_client = api_client def delete_instance_field(self, vcs_root_instance_locator, field, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) else: (data) = self.__delete_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) return data def delete_repository_state(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__delete_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) return data def get_children(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_children_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_children_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def get_children_alias(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_children_alias_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_children_alias_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def get_content(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_content_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_content_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def get_content_alias(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_content_alias_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_content_alias_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def get_metadata(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_metadata_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_metadata_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def get_repository_state(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__get_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) return data def get_repository_state_creation_date(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_repository_state_creation_date_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__get_repository_state_creation_date_with_http_info(vcs_root_instance_locator, **kwargs) return data def get_root(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_root_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__get_root_with_http_info(vcs_root_instance_locator, **kwargs) return data def get_zipped(self, path, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_zipped_with_http_info(path, vcs_root_instance_locator, **kwargs) else: (data) = self.__get_zipped_with_http_info(path, vcs_root_instance_locator, **kwargs) return data def schedule_checking_for_changes(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__schedule_checking_for_changes_with_http_info(**kwargs) else: (data) = self.__schedule_checking_for_changes_with_http_info(**kwargs) return data def schedule_checking_for_changes_0(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__schedule_checking_for_changes_0_with_http_info(**kwargs) else: (data) = self.__schedule_checking_for_changes_0_with_http_info(**kwargs) return data def serve_instance(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__serve_instance_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__serve_instance_with_http_info(vcs_root_instance_locator, **kwargs) return data def serve_instance_field(self, vcs_root_instance_locator, field, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__serve_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) else: (data) = self.__serve_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) return data def serve_instances(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__serve_instances_with_http_info(**kwargs) else: (data) = self.__serve_instances_with_http_info(**kwargs) return data def serve_root_instance_properties(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__serve_root_instance_properties_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__serve_root_instance_properties_with_http_info(vcs_root_instance_locator, **kwargs) return data def set_instance_field(self, vcs_root_instance_locator, field, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__set_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) else: (data) = self.__set_instance_field_with_http_info(vcs_root_instance_locator, field, **kwargs) return data def set_repository_state(self, vcs_root_instance_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__set_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) else: (data) = self.__set_repository_state_with_http_info(vcs_root_instance_locator, **kwargs) return data def __delete_instance_field_with_http_info(self, vcs_root_instance_locator, field, **kwargs): all_params = ['vcs_root_instance_locator', 'field'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_instance_field" % key ) params[key] = val del params['kwargs'] if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `delete_instance_field`") if ('field' not in params or params['field'] is None): raise ValueError("Missing the required parameter `field` when calling `delete_instance_field`") collection_formats = {} path_params = {} if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] if 'field' in params: if isinstance(params['field'], TeamCityObject): path_params['field'] = params['field'].locator_id else: path_params['field'] = params['field'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/{field}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def __delete_repository_state_with_http_info(self, vcs_root_instance_locator, **kwargs): all_params = ['vcs_root_instance_locator'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_repository_state" % key ) params[key] = val del params['kwargs'] if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `delete_repository_state`") collection_formats = {} path_params = {} if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/repositoryState', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def __get_children_with_http_info(self, path, vcs_root_instance_locator, **kwargs): all_params = ['path', 'vcs_root_instance_locator', 'base_path', 'locator', 'fields'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_children" % key ) params[key] = val del params['kwargs'] if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `get_children`") if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `get_children`") if 'path' in params and not re.search('(\/.*)?', params['path']): raise ValueError("Invalid value for parameter `path` when calling `get_children`, must conform to the pattern `/(\/.*)?/`") collection_formats = {} path_params = {} if 'path' in params: if isinstance(params['path'], TeamCityObject): path_params['path'] = params['path'].locator_id else: path_params['path'] = params['path'] if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] if 'base_path' in params: query_params.append(('basePath', params['base_path'])) if 'locator' in params: query_params.append(('locator', params['locator'])) if 'fields' in params: query_params.append(('fields', params['fields'])) header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/files/latest/children{path}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Files', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def __get_children_alias_with_http_info(self, path, vcs_root_instance_locator, **kwargs): all_params = ['path', 'vcs_root_instance_locator', 'base_path', 'locator', 'fields'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_children_alias" % key ) params[key] = val del params['kwargs'] if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `get_children_alias`") if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `get_children_alias`") if 'path' in params and not re.search('(.*)?', params['path']): raise ValueError("Invalid value for parameter `path` when calling `get_children_alias`, must conform to the pattern `/(.*)?/`") collection_formats = {} path_params = {} if 'path' in params: if isinstance(params['path'], TeamCityObject): path_params['path'] = params['path'].locator_id else: path_params['path'] = params['path'] if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] if 'base_path' in params: query_params.append(('basePath', params['base_path'])) if 'locator' in params: query_params.append(('locator', params['locator'])) if 'fields' in params: query_params.append(('fields', params['fields'])) header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/files/latest/{path}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Files', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def __get_content_with_http_info(self, path, vcs_root_instance_locator, **kwargs): all_params = ['path', 'vcs_root_instance_locator', 'response_builder'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_content" % key ) params[key] = val del params['kwargs'] if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `get_content`") if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `get_content`") if 'path' in params and not re.search('(\/.*)?', params['path']): raise ValueError("Invalid value for parameter `path` when calling `get_content`, must conform to the pattern `/(\/.*)?/`") collection_formats = {} path_params = {} if 'path' in params: if isinstance(params['path'], TeamCityObject): path_params['path'] = params['path'].locator_id else: path_params['path'] = params['path'] if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] if 'response_builder' in params: query_params.append(('responseBuilder', params['response_builder'])) header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/files/latest/content{path}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def __get_content_alias_with_http_info(self, path, vcs_root_instance_locator, **kwargs): all_params = ['path', 'vcs_root_instance_locator'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_content_alias" % key ) params[key] = val del params['kwargs'] if ('path' not in params or params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `get_content_alias`") if ('vcs_root_instance_locator' not in params or params['vcs_root_instance_locator'] is None): raise ValueError("Missing the required parameter `vcs_root_instance_locator` when calling `get_content_alias`") if 'path' in params and not re.search('(\/.*)?', params['path']): raise ValueError("Invalid value for parameter `path` when calling `get_content_alias`, must conform to the pattern `/(\/.*)?/`") collection_formats = {} path_params = {} if 'path' in params: if isinstance(params['path'], TeamCityObject): path_params['path'] = params['path'].locator_id else: path_params['path'] = params['path'] if 'vcs_root_instance_locator' in params: if isinstance(params['vcs_root_instance_locator'], TeamCityObject): path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'].locator_id else: path_params['vcsRootInstanceLocator'] = params['vcs_root_instance_locator'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/app/rest/vcs-root-instances/{vcsRootInstanceLocator}/files/latest/files{path}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
MIT License
cfedermann/appraise
appraise/wmt16/models.py
remove_user_from_hit
python
def remove_user_from_hit(sender, instance, **kwargs): user = instance.user try: hit = instance.item.hit LOGGER.debug('Removing user "{0}" from HIT {1}'.format(user, hit)) hit.users.remove(user) from appraise.wmt16.views import _compute_next_task_for_user _compute_next_task_for_user(user, hit.project, hit.language_pair) except (HIT.DoesNotExist, RankingTask.DoesNotExist): pass
Removes user from list of users who have completed corresponding HIT.
https://github.com/cfedermann/appraise/blob/2cce477efd5594699d6e0fa58f6312df60e05394/appraise/wmt16/models.py#L990-L1006
 import logging import uuid from datetime import datetime from xml.etree.ElementTree import fromstring, ParseError, tostring from django.dispatch import receiver from django.contrib.auth.models import User, Group from django.core.urlresolvers import reverse from django.core.validators import RegexValidator from django.db import models from django.template import Context from django.template.loader import get_template from appraise.wmt16.validators import validate_hit_xml, validate_segment_xml from appraise.settings import LOG_LEVEL, LOG_HANDLER from appraise.utils import datetime_to_seconds, AnnotationTask logging.basicConfig(level=LOG_LEVEL) LOGGER = logging.getLogger('appraise.wmt16.models') LOGGER.addHandler(LOG_HANDLER) MAX_USERS_PER_HIT = 1 LANGUAGE_PAIR_CHOICES = ( ('eng2ces', 'English → Czech'), ('eng2deu', 'English → German'), ('eng2fin', 'English → Finnish'), ('eng2rom', 'English → Romanian'), ('eng2rus', 'English → Russian'), ('eng2trk', 'English → Turkish'), ('ces2eng', 'Czech → English'), ('deu2eng', 'German → English'), ('fin2eng', 'Finnish → English'), ('rom2eng', 'Romanian → English'), ('rus2eng', 'Russian → English'), ('trk2eng', 'Turkish → English'), ('eng2bul', 'English → Bulgarian'), ('eng2esn', 'English → Spanish'), ('eng2baq', 'English → Basque'), ('eng2nld', 'English → Dutch'), ('eng2ptb', 'English → Portguese'), ) ISO639_3_TO_NAME_MAPPING = { 'ces': 'Czech', 'cze': 'Czech', 'deu': 'German', 'ger': 'German', 'eng': 'English', 'esn': 'Spanish', 'spa': 'Spanish', 'fra': 'French', 'fre': 'French', 'rus': 'Russian', 'fin': 'Finnish', 'rom': 'Romanian', 'ron': 'Romanian', 'trk': 'Turkish', 'tur': 'Turkish', 'eus': 'Basque', 'baq': 'Basque', 'bul': 'Bulgarian', 'nld': 'Dutch', 'ptb': 'Portguese', } GROUP_HIT_REQUIREMENTS = { 'MSR': 0, 'MTMA': 0, 'Aalto': 100, 'Abu-Matran': 300, 'AFRL-MITLL': 400, 'AMU-UEDIN': 200, 'CMU': 100, 'CUNI': 500, 'JHU': 1600, 'KIT': 300, 'KIT-LIMSI': 100, 'LIMSI': 300, 'LMU-CUNI': 100, 'METAMIND': 100, 'TBTK': 200, 'Cambridge': 100, 'NRC': 100, 'NYU-UMontreal': 400, 'PJATK': 200, 'PROMT': 800, 'QT21/HimL': 100, 'RWTH': 100, 'UEdin': 1900, 'UH': 400, 'USFD': 100, 'UUT': 100, 'YSDA': 200, 'JXNU-IIP': 100, 'UPF': 100, 'ParFDA': 200, 'Berlin': 600, 'Hamburg': 200, 'Prague': 400, 'Amsterdam': 200, 'Saarbrücken': 100, 'Groningen': 300, 'Sofia': 200, 'Donostia': 500, 'Lisbon': 300, } class HIT(models.Model): hit_id = models.CharField( max_length=8, db_index=True, unique=True, editable=False, help_text="Unique identifier for this HIT instance.", verbose_name="HIT identifier" ) block_id = models.IntegerField( db_index=True, help_text="Block ID for this HIT instance.", verbose_name="HIT block identifier" ) hit_xml = models.TextField( help_text="XML source for this HIT instance.", validators=[validate_hit_xml], verbose_name="HIT source XML" ) language_pair = models.CharField( max_length=7, choices=LANGUAGE_PAIR_CHOICES, db_index=True, help_text="Language pair choice for this HIT instance.", verbose_name="Language pair" ) hit_attributes = {} users = models.ManyToManyField( User, blank=True, db_index=True, null=True, help_text="Users who work on this HIT instance." ) active = models.BooleanField( db_index=True, default=True, help_text="Indicates that this HIT instance is still in use.", verbose_name="Active?" ) mturk_only = models.BooleanField( db_index=True, default=False, help_text="Indicates that this HIT instance is ONLY usable via MTurk.", verbose_name="MTurk only?" ) completed = models.BooleanField( db_index=True, default=False, help_text="Indicates that this HIT instance is completed.", verbose_name="Completed?" ) assigned = models.DateTimeField(blank=True, null=True, editable=False) finished = models.DateTimeField(blank=True, null=True, editable=False) class Meta: ordering = ('id', 'hit_id', 'language_pair', 'block_id') verbose_name = "HIT instance" verbose_name_plural = "HIT instances" def __init__(self, *args, **kwargs): super(HIT, self).__init__(*args, **kwargs) if not self.hit_id: self.hit_id = self.__class__._create_hit_id() self.reload_dynamic_fields() def __unicode__(self): return u'<HIT id="{0}" hit="{1}" block="{2}" language-pair="{3}">' .format(self.id, self.hit_id, self.block_id, self.language_pair) @classmethod def _create_hit_id(cls): new_id = uuid.uuid4().hex[:8] while cls.objects.filter(hit_id=new_id): new_id = uuid.uuid4().hex[:8] return new_id @classmethod def compute_remaining_hits(cls, language_pair=None): hits_qs = cls.objects.filter(active=True, mturk_only=False, completed=False) if language_pair: hits_qs = hits_qs.filter(language_pair=language_pair) available = 0 for hit in hits_qs: if hit.users.count() < MAX_USERS_PER_HIT: available = available + 1 else: hit.completed = True hit.save() return available @classmethod def compute_status_for_user(cls, user, project=None, language_pair=None): hits_qs = cls.objects.filter(users=user) if project: project_instance = Project.objects.filter(id=project.id) if project_instance.exists(): hits_qs = hits_qs.filter(project=project_instance[0]) else: return [0, 0, 0] if language_pair: hits_qs = hits_qs.filter(language_pair=language_pair) _completed_hits = hits_qs.count() _durations = [] for hit in hits_qs: _results = RankingResult.objects.filter(user=user, item__hit=hit) _durations.extend(_results.values_list('duration', flat=True)) _durations = [datetime_to_seconds(d) for d in _durations if d] _total_duration = sum(_durations) _average_duration = _total_duration / float(_completed_hits or 1) current_status = [] current_status.append(_completed_hits) current_status.append(_average_duration) current_status.append(_total_duration) return current_status @classmethod def compute_status_for_group(cls, group, project=None, language_pair=None): combined = [0, 0, 0] for user in group.user_set.all(): _user_status = cls.compute_status_for_user(user, project, language_pair) combined[0] = combined[0] + _user_status[0] combined[1] = combined[1] + _user_status[1] combined[2] = combined[2] + _user_status[2] combined[1] = combined[2] / float(combined[0] or 1) return combined def save(self, *args, **kwargs): if not self.id: self.full_clean() super(HIT, self).save(*args, **kwargs) _tree = fromstring(self.hit_xml.encode("utf-8")) for _child in _tree: new_item = RankingTask(hit=self, item_xml=tostring(_child)) new_item.save() try: related_result = RankingResult.objects.filter(item__hit=self).latest('completion') self.finished = related_result.completion except RankingResult.DoesNotExist: pass super(HIT, self).save(*args, **kwargs) def get_absolute_url(self): hit_handler_view = 'appraise.wmt16.views.hit_handler' kwargs = {'hit_id': self.hit_id} return reverse(hit_handler_view, kwargs=kwargs) def get_status_url(self): status_handler_view = 'appraise.wmt16.views.status_view' kwargs = {'hit_id': self.hit_id} return reverse(status_handler_view, kwargs=kwargs) def reload_dynamic_fields(self): if self.hit_xml: try: _hit_xml = fromstring(self.hit_xml.encode("utf-8")) self.hit_attributes = {} for key, value in _hit_xml.attrib.items(): self.hit_attributes[key] = value except (ParseError), msg: self.hit_attributes = {'note': msg} def export_to_xml(self): template = get_template('wmt16/task_result.xml') self.reload_dynamic_fields() _attr = self.hit_attributes.items() attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr]) results = [] for item in RankingTask.objects.filter(hit=self): item.reload_dynamic_fields() try: source_id = item.source[1]["id"] except: source_id = -1 _results = [] for _result in item.rankingresult_set.all(): _results.append(_result.export_to_xml()) results.append((source_id, _results)) context = {'hit_id': self.hit_id, 'attributes': attributes, 'results': results} return template.render(Context(context)) def export_to_apf(self): results = [] for item in RankingTask.objects.filter(hit=self): for _result in item.rankingresult_set.all(): _apf_output = _result.export_to_apf() if _apf_output: results.append(_apf_output) return u"\n".join(results) def compute_agreement_scores(self): _raw = self.export_to_apf() if not _raw: return None else: _raw = _raw.split('\n') _data = [_line.split(',') for _line in _raw] try: _data = [(x[0], x[1], x[2]) for x in _data] except IndexError: return None _task = AnnotationTask(data=_data) try: _alpha = _task.alpha() _kappa = _task.kappa() _pi = _task.pi() _S = _task.S() except ZeroDivisionError, msg: LOGGER.debug(msg) return None return (_alpha, _kappa, _pi, _S) class Project(models.Model): name = models.CharField( blank=False, db_index=True, max_length=100, null=False, unique=True, validators=[RegexValidator(regex=r'[a-zA-Z0-9\-]{1,100}')], ) users = models.ManyToManyField( User, blank=True, db_index=True, null=True, ) HITs = models.ManyToManyField( HIT, blank=True, db_index=True, null=True, ) def __str__(self): return '<project id="{0}" name="{1}" users="{2}" HITs="{3}" />'.format(self.id, self.name, self.users.count(), self.HITs.count()) class RankingTask(models.Model): hit = models.ForeignKey( HIT, db_index=True ) item_xml = models.TextField( help_text="XML source for this RankingTask instance.", validators=[validate_segment_xml], verbose_name="RankingTask source XML" ) attributes = None source = None reference = None translations = None class Meta: ordering = ('id',) verbose_name = "RankingTask instance" verbose_name_plural = "RankingTask instances" def __init__(self, *args, **kwargs): super(RankingTask, self).__init__(*args, **kwargs) self.reload_dynamic_fields() def __unicode__(self): return u'<ranking-task id="{0}">'.format(self.id) def save(self, *args, **kwargs): self.full_clean() super(RankingTask, self).save(*args, **kwargs) def reload_dynamic_fields(self): if self.item_xml: try: _item_xml = fromstring(self.item_xml) self.attributes = _item_xml.attrib _source = _item_xml.find('source') if _source is not None: self.source = (_source.text, _source.attrib) _reference = _item_xml.find('reference') if _reference is not None: self.reference = (_reference.text, _reference.attrib) self.translations = [] for _translation in _item_xml.iterfind('translation'): self.translations.append((_translation.text, _translation.attrib)) except ParseError: self.source = None self.reference = None self.translations = None class RankingResult(models.Model): item = models.ForeignKey( RankingTask, db_index=True ) user = models.ForeignKey( User, db_index=True ) duration = models.TimeField(blank=True, null=True, editable=False) completion = models.DateTimeField(auto_now_add=True, blank=True, null=True, editable=False) def readable_duration(self): return '{}'.format(self.duration) raw_result = models.TextField(editable=False, blank=False) results = None systems = 0 class Meta: ordering = ('id',) verbose_name = "RankingResult object" verbose_name_plural = "RankingResult objects" def __init__(self, *args, **kwargs): super(RankingResult, self).__init__(*args, **kwargs) self.reload_dynamic_fields() def __unicode__(self): return u'<ranking-result id="{0}">'.format(self.id) def reload_dynamic_fields(self): if self.raw_result and self.raw_result != 'SKIPPED': try: self.results = self.raw_result.split(',') self.results = [int(x) for x in self.results] self.systems = sum([len(x[1]['system'].split(',')) for x in self.item.translations]) except Exception, msg: self.results = msg def export_to_xml(self): return self.export_to_ranking_xml() def export_to_ranking_xml(self): template = get_template('wmt16/ranking_result.xml') _attr = self.item.attributes.items() attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr]) skipped = self.results is None translations = [] if not skipped: for index, translation in enumerate(self.item.translations): _items = translation[1].items() _attr = ' '.join(['{}="{}"'.format(k, v) for k, v in _items]) _rank = self.results[index] translations.append((_attr, _rank)) context = { 'attributes': attributes, 'duration': '{}'.format(self.duration), 'skipped': skipped, 'translations': translations, 'user': self.user, } return template.render(Context(context)) def export_to_pairwise_csv(self): skipped = self.results is None if skipped: return None try: srcIndex = self.item.source[1]["id"] except: srcIndex = -1 _src_lang = self.item.hit.hit_attributes['source-language'] _trg_lang = self.item.hit.hit_attributes['target-language'] csv_data = [] csv_data.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) csv_data.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) csv_data.append(srcIndex) csv_data.append(srcIndex) csv_data.append(self.user.username) base_values = csv_data systems = set() for index, translation in enumerate(self.item.translations): name = translation[1]['system'].replace(',', '+') rank = self.results[index] systems.add((name, rank)) csv_output = [] from itertools import combinations for (sysA, sysB) in combinations(systems, 2): expandedA = sysA[0].split('+') expandedB = sysB[0].split('+') for singleA in expandedA: for singleB in expandedB: csv_local = [] csv_local.extend(base_values) csv_local.append(singleA) csv_local.append(str(sysA[1])) csv_local.append(singleB) csv_local.append(str(sysB[1])) csv_local.append(str(self.item.id)) csv_joint = u",".join(csv_local) if not csv_joint in csv_output: csv_output.append(csv_joint) if len(expandedA) > 1: for (singleA1, singleA2) in combinations(expandedA, 2): csv_local = [] csv_local.extend(base_values) csv_local.append(singleA1) csv_local.append(str(sysA[1])) csv_local.append(singleA2) csv_local.append(str(sysA[1])) csv_local.append(str(self.item.id)) csv_joint = u",".join(csv_local) if not csv_joint in csv_output: csv_output.append(csv_joint) if len(expandedB) > 1: for (singleB1, singleB2) in combinations(expandedB, 2): csv_local = [] csv_local.extend(base_values) csv_local.append(singleB1) csv_local.append(str(sysB[1])) csv_local.append(singleB2) csv_local.append(str(sysB[1])) csv_local.append(str(self.item.id)) csv_joint = u",".join(csv_local) if not csv_joint in csv_output: csv_output.append(csv_joint) return u"\n".join(csv_output) def export_to_ranking_csv(self): raise NotImplementedError("not ready yet") ranking_csv_data = [] try: ranking_csv_data.append(self.item.source[1]["id"]) except: ranking_csv_data.append(-1) _src_lang = self.item.hit.hit_attributes['source-language'] _trg_lang = self.item.hit.hit_attributes['target-language'] ranking_csv_data.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) ranking_csv_data.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) ranking_csv_data.append(self.user.username) ranking_csv_data.append(str(datetime_to_seconds(self.duration))) skipped = self.results is None translations = [] if not skipped: for index, translation in enumerate(self.item.translations): _word_count = len(translation[0].split()) _rank = self.results[index] translations.append((_rank, _word_count)) for rank, word_count in translations: ranking_csv_data.append(str(rank)) ranking_csv_data.append(str(word_count)) return u",".join(ranking_csv_data) def export_to_csv(self, expand_multi_systems=False): item = self.item hit = self.item.hit values = [] _src_lang = hit.hit_attributes['source-language'] _trg_lang = hit.hit_attributes['target-language'] _systems = [] for translation in item.translations: _systems.append(translation[1]['system']) values.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) values.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) values.append(item.source[1]['id']) values.append('-1') values.append(item.source[1]['id']) values.append(self.user.username) base_values = values if not self.results: self.results = [-1] * len(_systems) _system_names = [] _system_ranks = [] for _result_index, _system in enumerate(_systems): if expand_multi_systems: _local_systems = _system.split(',') _local_results = [str(self.results[_result_index])] * len(_local_systems) _system_names.extend(_local_systems) _system_ranks.extend(_local_results) else: _system_names.append(_system.replace(',', '+')) _system_ranks.append(str(self.results[_result_index])) if len(_system_names) % 5 > 0: _missing_systems = 5 - len(_system_names) % 5 for x in range(_missing_systems): _system_names.append('PLACEHOLDER') _system_ranks.append('-1') all_values = [] for _base_index in range(len(_system_names))[::5]: current_values = list(base_values) current_ranks = [] for _current_index in range(len(_system_names))[_base_index:_base_index+5]: current_values.append('-1') current_values.append(str(_system_names[_current_index])) current_ranks.append(_system_ranks[_current_index]) current_values.extend(current_ranks) all_values.append(u",".join(current_values)) return u"\n".join(all_values) def export_to_apf(self): if not self.results: return None item = self.item hit = self.item.hit _systems = [] for translation in item.translations: _systems.append(translation[1]['system']) from itertools import combinations, product results = [] for a, b in combinations(range(5), 2): _c = self.user.username _i = '{0}.{1}.{2}'.format(item.source[1]['id'], a+1, b+1) _individualA = _systems[a].split(',') _individualB = _systems[b].split(',') for _systemA, _systemB in product(_individualA, _individualB): _verdict = '?' if self.results[a] > self.results[b]: _verdict = '>' elif self.results[a] < self.results[b]: _verdict = '<' elif self.results[a] == self.results[b]: _verdict = '=' _v = '{0}{1}{2}'.format(str(_systemA), _verdict, str(_systemB)) results.append('{0},{1},{2}'.format(_c, _i, _v)) return u'\n'.join(results) @receiver(models.signals.post_save, sender=RankingResult) def update_user_hit_mappings(sender, instance, created, **kwargs): hit = instance.item.hit user = instance.user results = RankingResult.objects.filter(user=user, item__hit=hit) if len(results) > 2: from appraise.wmt16.views import _compute_next_task_for_user LOGGER.debug('Deleting stale User/HIT mapping {0}->{1}'.format( user, hit)) hit.users.add(user) for project in hit.project_set.all(): UserHITMapping.objects.filter(user=user, project=project, hit=hit).delete() _compute_next_task_for_user(user, project, hit.language_pair) @receiver(models.signals.post_delete, sender=RankingResult)
BSD 3-Clause New or Revised License
zubara/mneflow
mneflow/models.py
LFCNN.get_output_correlations
python
def get_output_correlations(self, y_true): corr_to_output = [] y_true = y_true.numpy() flat_feats = self.tc_out.reshape(self.tc_out.shape[0], -1) if self.dataset.h_params['target_type'] in ['float', 'signal']: for y_ in y_true.T: rfocs = np.array([spearmanr(y_, f)[0] for f in flat_feats.T]) corr_to_output.append(rfocs.reshape(self.out_weights.shape[:-1])) elif self.dataset.h_params['target_type'] == 'int': y_true = y_true/np.linalg.norm(y_true, ord=1, axis=0)[None, :] flat_div = np.linalg.norm(flat_feats, 1, axis=0)[None, :] flat_feats = flat_feats/flat_div for y_ in y_true.T: rfocs = 2. - np.sum(np.abs(flat_feats - y_[:, None]), 0) corr_to_output.append(rfocs.reshape(self.out_weights.shape[:-1])) corr_to_output = np.dstack(corr_to_output) if np.any(np.isnan(corr_to_output)): corr_to_output[np.isnan(corr_to_output)] = 0 return corr_to_output
Computes a similarity metric between each of the extracted features and the target variable. The metric is a Manhattan distance for dicrete targets, and Spearman correlation for continuous targets.
https://github.com/zubara/mneflow/blob/40200a0bd90e155597e96feb61517e6186fc9670/mneflow/models.py#L721-L753
import tensorflow as tf import numpy as np from mne import channels, evoked, create_info from scipy.signal import freqz, welch from scipy.stats import spearmanr from matplotlib import pyplot as plt from matplotlib import patches as ptch from matplotlib import collections from .layers import LFTConv, VARConv, DeMixing, Dense, TempPooling from tensorflow.keras.layers import SeparableConv2D, Conv2D, DepthwiseConv2D from tensorflow.keras.layers import Flatten, Dropout, BatchNormalization from tensorflow.keras.initializers import Constant from tensorflow.keras import regularizers as k_reg, constraints, layers import csv import os def uniquify(seq): un = [] [un.append(i) for i in seq if not un.count(i)] return un class BaseModel(): def __init__(self, Dataset, specs=dict()): self.specs = specs self.dataset = Dataset self.specs.setdefault('model_path', self.dataset.h_params['savepath']) self.model_path = specs['model_path'] self.input_shape = (self.dataset.h_params['n_seq'], self.dataset.h_params['n_t'], self.dataset.h_params['n_ch']) self.y_shape = Dataset.h_params['y_shape'] self.out_dim = np.prod(self.y_shape) self.inputs = layers.Input(shape=(self.input_shape)) self.rate = specs.setdefault('dropout', 0.0) self.trained = False self.y_pred = self.build_graph() def build(self, optimizer="adam", loss=None, metrics=None, learn_rate=3e-4): self.km = tf.keras.Model(inputs=self.inputs, outputs=self.y_pred) params = {"optimizer": tf.optimizers.get(optimizer).from_config({"learning_rate":learn_rate})} if loss: params["loss"] = tf.keras.losses.get(loss) if metrics: if not isinstance(metrics, list): metrics = [metrics] params["metrics"] = [tf.keras.metrics.get(metric) for metric in metrics] if self.dataset.h_params["target_type"] in ['float', 'signal']: params.setdefault("loss", tf.keras.losses.MeanSquaredError(name='MSE')) params.setdefault("metrics", tf.keras.metrics.MeanAbsoluteError(name="MAE")) elif self.dataset.h_params["target_type"] in ['int']: params.setdefault("loss", tf.nn.softmax_cross_entropy_with_logits) params.setdefault("metrics", tf.keras.metrics.CategoricalAccuracy(name="cat_ACC")) self.km.compile(optimizer=params["optimizer"], loss=params["loss"], metrics=params["metrics"]) print('Input shape:', self.input_shape) print('y_pred:', self.y_pred.shape) print('Initialization complete!') def build_graph(self): flat = Flatten()(self.inputs) self.fc = Dense(size=self.out_dim, nonlin=tf.identity, specs=self.specs) y_pred = self.fc(flat) return y_pred def train(self, n_epochs, eval_step=None, min_delta=1e-6, early_stopping=3, mode='single_fold'): stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=min_delta, patience=early_stopping, restore_best_weights=True) if not eval_step: train_size = self.dataset.h_params['train_size'] eval_step = train_size // self.dataset.h_params['train_batch'] + 1 self.train_params = [n_epochs, eval_step, early_stopping, mode] if mode == 'single_fold': self.t_hist = self.km.fit(self.dataset.train, validation_data=self.dataset.val, epochs=n_epochs, steps_per_epoch=eval_step, shuffle=True, validation_steps=self.dataset.validation_steps, callbacks=[stop_early], verbose=2) self.v_loss, self.v_metric = self.evaluate(self.dataset.val) self.v_loss_sd = 0 self.v_metric_sd = 0 print("Training complete: loss: {}, Metric: {}".format(self.v_loss, self.v_metric)) self.update_log() elif mode == 'cv': n_folds = len(self.dataset.h_params['folds'][0]) print("Running cross-validation with {} folds".format(n_folds)) metrics = [] losses = [] for jj in range(n_folds): print("fold:", jj) train, val = self.dataset._build_dataset(self.dataset.h_params['train_paths'], train_batch=self.dataset.training_batch, test_batch=self.dataset.validation_batch, split=True, val_fold_ind=jj) self.t_hist = self.km.fit(train, validation_data=val, epochs=n_epochs, steps_per_epoch=eval_step, shuffle=True, validation_steps=self.dataset.validation_steps, callbacks=[stop_early], verbose=2) loss, metric = self.evaluate(val) losses.append(loss) metrics.append(metric) if jj < n_folds -1: self.shuffle_weights() else: print("Fold: {} Loss: {:.4f}, Metric: {:.4f}".format(jj, loss, metric)) self.cv_losses = losses self.cv_metrics = metrics self.v_loss = np.mean(losses) self.v_metric = np.mean(metrics) self.v_loss_sd = np.std(losses) self.v_metric_sd = np.std(metrics) print("{} with {} folds completed. Loss: {:.4f} +/- {:.4f}. Metric: {:.4f} +/- {:.4f}".format(mode, n_folds, np.mean(losses), np.std(losses), np.mean(metrics), np.std(metrics))) self.update_log() return self.cv_losses, self.cv_metrics elif mode == "loso": n_folds = len(self.dataset.h_params['test_paths']) print("Running leave-one-subject-out CV with {} subject".format(n_folds)) metrics = [] losses = [] for jj in range(n_folds): print("fold:", jj) test_subj = self.dataset.h_params['test_paths'][jj] train_subjs = self.dataset.h_params['train_paths'].copy() train_subjs.pop(jj) train, val = self.dataset._build_dataset(train_subjs, train_batch=self.dataset.training_batch, test_batch=self.dataset.validation_batch, split=True, val_fold_ind=0) self.t_hist = self.km.fit(train, validation_data=val, epochs=n_epochs, steps_per_epoch=eval_step, shuffle=True, validation_steps=self.dataset.validation_steps, callbacks=[stop_early], verbose=2) test = self.dataset._build_dataset(test_subj, test_batch=None, split=False) loss, metric = self.evaluate(test) losses.append(loss) metrics.append(metric) if jj < n_folds -1: self.shuffle_weights() else: self.cv_losses = losses self.cv_metrics = metrics self.v_loss = np.mean(losses) self.v_metric = np.mean(metrics) self.v_loss_sd = np.std(losses) self.v_metric_sd = np.std(metrics) self.update_log() print("{} with {} folds completed. Loss: {:.4f} +/- {:.4f}. Metric: {:.4f} +/- {:.4f}".format(mode, n_folds, np.mean(losses), np.std(losses), np.mean(metrics), np.std(metrics))) return self.cv_losses, self.cv_metrics def shuffle_weights(self): print("Re-shuffling weights between folds") weights = self.km.get_weights() weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights] self.km.set_weights(weights) def plot_hist(self): plt.plot(self.t_hist.history['loss']) plt.plot(self.t_hist.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() def update_log(self): appending = os.path.exists(self.model_path + self.scope + '_log.csv') log = dict() log['data_id'] = self.dataset.h_params['data_id'] log['data_path'] = self.dataset.h_params['savepath'] log['y_shape'] = np.prod(self.dataset.h_params['y_shape']) log['fs'] = str(self.dataset.h_params['fs']) log.update(self.specs) log['nepochs'], log['eval_step'], log['early_stopping'], log['mode'] = self.train_params log['v_metric'] = self.v_metric log['v_loss'] = self.v_metric log['v_metric_sd'] = self.v_metric_sd log['v_loss_sd'] = self.v_metric_sd tr_loss, tr_perf = self.evaluate(self.dataset.train) log['tr_metric'] = tr_perf log['tr_loss'] = tr_loss if 'test_paths' in self.dataset.h_params and log['mode'] != 'loso': t_loss, t_metric = self.evaluate(self.dataset.h_params['test_paths']) print("Updating log: test loss: {:.4f} test metric: {:.4f}".format(t_loss, t_metric)) log['test_metric'] = t_metric log['test_loss'] = t_loss else: log['test_metric'] = "NA" log['test_loss'] = "NA" self.log = log with open(self.model_path + self.scope + '_log.csv', 'a') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=self.log.keys()) if not appending: writer.writeheader() writer.writerow(self.log) def save(self): print("Not implemented") def restore(self): print("Not implemented") def predict(self, dataset=None): if not dataset: print("No dataset specified using validation dataset (Default)") dataset = self.dataset.val elif isinstance(dataset, str) or isinstance(dataset, (list, tuple)): dataset = self.dataset._build_dataset(dataset, split=False, test_batch=None, repeat=True) elif not isinstance(dataset, tf.data.Dataset): print("Specify dataset") return None, None y_pred = self.km.predict(dataset, steps=self.dataset.validation_steps) y_true = [row[1] for row in dataset.take(1)][0] y_true = y_true.numpy() return y_true, y_pred def evaluate(self, dataset=False): if not dataset: print("No dataset specified using validation dataset (Default)") dataset = self.dataset.val elif isinstance(dataset, str) or isinstance(dataset, (list, tuple)): dataset = self.dataset._build_dataset(dataset, split=False, test_batch=None, repeat=True) elif not isinstance(dataset, tf.data.Dataset): print("Specify dataset") return None, None losses, metrics = self.km.evaluate(dataset, steps=self.dataset.validation_steps) return losses, metrics class LFCNN(BaseModel): def __init__(self, Dataset, specs=dict()): self.scope = 'lfcnn' specs.setdefault('filter_length', 7) specs.setdefault('n_latent', 32) specs.setdefault('pooling', 2) specs.setdefault('stride', 2) specs.setdefault('padding', 'SAME') specs.setdefault('pool_type', 'max') specs.setdefault('nonlin', tf.nn.relu) specs.setdefault('l1', 3e-4) specs.setdefault('l2', 0) specs.setdefault('l1_scope', ['fc', 'demix', 'lf_conv']) specs.setdefault('l2_scope', []) specs.setdefault('maxnorm_scope', []) super(LFCNN, self).__init__(Dataset, specs) def build_graph(self): self.dmx = DeMixing(size=self.specs['n_latent'], nonlin=tf.identity, axis=3, specs=self.specs) self.dmx_out = self.dmx(self.inputs) self.tconv = LFTConv(size=self.specs['n_latent'], nonlin=self.specs['nonlin'], filter_length=self.specs['filter_length'], padding=self.specs['padding'], specs=self.specs ) self.tconv_out = self.tconv(self.dmx_out) self.pool = TempPooling(pooling=self.specs['pooling'], pool_type=self.specs['pool_type'], stride=self.specs['stride'], padding=self.specs['padding'], ) self.pooled = self.pool(self.tconv_out) dropout = Dropout(self.specs['dropout'], noise_shape=None)(self.pooled) self.fin_fc = Dense(size=self.out_dim, nonlin=tf.identity, specs=self.specs) y_pred = self.fin_fc(dropout) return y_pred def _get_spatial_covariance(self, dataset): n1_covs = [] for x, y in dataset.take(5): n1cov = tf.tensordot(x[0,0], x[0,0], axes=[[0], [0]]) n1_covs.append(n1cov) cov = tf.reduce_mean(tf.stack(n1_covs, axis=0), axis=0) return cov def compute_patterns(self, data_path=None, output='patterns'): if not data_path: print("Computing patterns: No path specified, using validation dataset (Default)") ds = self.dataset.val elif isinstance(data_path, str) or isinstance(data_path, (list, tuple)): ds = self.dataset._build_dataset(data_path, split=False, test_batch=None, repeat=True) elif isinstance(data_path, mneflow.data.Dataset): if hasattr(data_path, 'test'): ds = data_path.test else: ds = data_path.val elif isinstance(data_path, tf.data.Dataset): ds = data_path else: raise AttributeError('Specify dataset or data path.') X, y = [row for row in ds.take(1)][0] self.out_w_flat = self.fin_fc.w.numpy() self.out_weights = np.reshape(self.out_w_flat, [-1, self.dmx.size, self.out_dim]) self.out_biases = self.fin_fc.b.numpy() self.feature_relevances = self.get_component_relevances(X, y) tc_out = self.pool(self.tconv(self.dmx(X)).numpy()) X = X - tf.reduce_mean(X, axis=-2, keepdims=True) X = tf.transpose(X, [3, 0, 1, 2]) X = tf.reshape(X, [X.shape[0], -1]) self.dcov = tf.matmul(X, tf.transpose(X)) demx = self.dmx.w.numpy() self.lat_tcs = np.dot(demx.T, X) del X if 'patterns' in output: self.patterns = np.dot(self.dcov, demx) else: self.patterns = demx kern = self.tconv.filters.numpy() self.filters = np.squeeze(kern) self.tc_out = np.squeeze(tc_out) self.corr_to_output = self.get_output_correlations(y) def get_component_relevances(self, X, y): model_weights = self.km.get_weights() base_loss, base_performance = self.km.evaluate(X, y, verbose=0) feature_relevances_loss = [] n_out_t = self.out_weights.shape[0] n_out_y = self.out_weights.shape[-1] zeroweights = np.zeros((n_out_t,)) for i in range(self.specs["n_latent"]): loss_per_class = [] for jj in range(n_out_y): new_weights = self.out_weights.copy() new_bias = self.out_biases.copy() new_weights[:, i, jj] = zeroweights new_bias[jj] = 0 new_weights = np.reshape(new_weights, self.out_w_flat.shape) model_weights[-2] = new_weights model_weights[-1] = new_bias self.km.set_weights(model_weights) loss = self.km.evaluate(X, y, verbose=0)[0] loss_per_class.append(base_loss - loss) feature_relevances_loss.append(np.array(loss_per_class)) self.component_relevance_loss = np.array(feature_relevances_loss)
BSD 3-Clause New or Revised License
nextcord/nextcord
nextcord/abc.py
GuildChannel.changed_roles
python
def changed_roles(self) -> List[Role]: ret = [] g = self.guild for overwrite in filter(lambda o: o.is_role(), self._overwrites): role = g.get_role(overwrite.id) if role is None: continue role = copy.copy(role) role.permissions.handle_overwrite(overwrite.allow, overwrite.deny) ret.append(role) return ret
List[:class:`~nextcord.Role`]: Returns a list of roles that have been overridden from their default values in the :attr:`~nextcord.Guild.roles` attribute.
https://github.com/nextcord/nextcord/blob/5b2c64cf4fd0e593f032ec6c2465682e9b67f767/nextcord/abc.py#L414-L427
from __future__ import annotations import copy import asyncio from typing import ( Any, Callable, Dict, List, Optional, TYPE_CHECKING, Protocol, Sequence, Tuple, TypeVar, Union, overload, runtime_checkable, ) from .iterators import HistoryIterator from .context_managers import Typing from .enums import ChannelType from .errors import InvalidArgument, ClientException from .mentions import AllowedMentions from .permissions import PermissionOverwrite, Permissions from .role import Role from .invite import Invite from .file import File from .voice_client import VoiceClient, VoiceProtocol from .sticker import GuildSticker, StickerItem from . import utils __all__ = ( 'Snowflake', 'User', 'PrivateChannel', 'GuildChannel', 'Messageable', 'Connectable', ) T = TypeVar('T', bound=VoiceProtocol) if TYPE_CHECKING: from datetime import datetime from .client import Client from .user import ClientUser from .asset import Asset from .state import ConnectionState from .guild import Guild from .member import Member from .channel import CategoryChannel from .embeds import Embed from .message import Message, MessageReference, PartialMessage from .channel import TextChannel, DMChannel, GroupChannel, PartialMessageable from .threads import Thread from .enums import InviteTarget from .ui.view import View from .types.channel import ( PermissionOverwrite as PermissionOverwritePayload, Channel as ChannelPayload, GuildChannel as GuildChannelPayload, OverwriteType, ) PartialMessageableChannel = Union[TextChannel, Thread, DMChannel, PartialMessageable] MessageableChannel = Union[PartialMessageableChannel, GroupChannel] SnowflakeTime = Union["Snowflake", datetime] MISSING = utils.MISSING class _Undefined: def __repr__(self) -> str: return 'see-below' _undefined: Any = _Undefined() @runtime_checkable class Snowflake(Protocol): __slots__ = () id: int @runtime_checkable class User(Snowflake, Protocol): __slots__ = () name: str discriminator: str avatar: Asset bot: bool @property def display_name(self) -> str: raise NotImplementedError @property def mention(self) -> str: raise NotImplementedError @runtime_checkable class PrivateChannel(Snowflake, Protocol): __slots__ = () me: ClientUser class _Overwrites: __slots__ = ('id', 'allow', 'deny', 'type') ROLE = 0 MEMBER = 1 def __init__(self, data: PermissionOverwritePayload): self.id: int = int(data['id']) self.allow: int = int(data.get('allow', 0)) self.deny: int = int(data.get('deny', 0)) self.type: OverwriteType = data['type'] def _asdict(self) -> PermissionOverwritePayload: return { 'id': self.id, 'allow': str(self.allow), 'deny': str(self.deny), 'type': self.type, } def is_role(self) -> bool: return self.type == 0 def is_member(self) -> bool: return self.type == 1 GCH = TypeVar('GCH', bound='GuildChannel') class GuildChannel: __slots__ = () id: int name: str guild: Guild type: ChannelType position: int category_id: Optional[int] _state: ConnectionState _overwrites: List[_Overwrites] if TYPE_CHECKING: def __init__(self, *, state: ConnectionState, guild: Guild, data: Dict[str, Any]): ... def __str__(self) -> str: return self.name @property def _sorting_bucket(self) -> int: raise NotImplementedError def _update(self, guild: Guild, data: Dict[str, Any]) -> None: raise NotImplementedError async def _move( self, position: int, parent_id: Optional[Any] = None, lock_permissions: bool = False, *, reason: Optional[str], ) -> None: if position < 0: raise InvalidArgument('Channel position cannot be less than 0.') http = self._state.http bucket = self._sorting_bucket channels: List[GuildChannel] = [c for c in self.guild.channels if c._sorting_bucket == bucket] channels.sort(key=lambda c: c.position) try: channels.remove(self) except ValueError: return else: index = next((i for i, c in enumerate(channels) if c.position >= position), len(channels)) channels.insert(index, self) payload = [] for index, c in enumerate(channels): d: Dict[str, Any] = {'id': c.id, 'position': index} if parent_id is not _undefined and c.id == self.id: d.update(parent_id=parent_id, lock_permissions=lock_permissions) payload.append(d) await http.bulk_channel_update(self.guild.id, payload, reason=reason) async def _edit(self, options: Dict[str, Any], reason: Optional[str]) -> Optional[ChannelPayload]: try: parent = options.pop('category') except KeyError: parent_id = _undefined else: parent_id = parent and parent.id try: options['rate_limit_per_user'] = options.pop('slowmode_delay') except KeyError: pass try: rtc_region = options.pop('rtc_region') except KeyError: pass else: options['rtc_region'] = None if rtc_region is None else str(rtc_region) try: video_quality_mode = options.pop('video_quality_mode') except KeyError: pass else: options['video_quality_mode'] = int(video_quality_mode) lock_permissions = options.pop('sync_permissions', False) try: position = options.pop('position') except KeyError: if parent_id is not _undefined: if lock_permissions: category = self.guild.get_channel(parent_id) if category: options['permission_overwrites'] = [c._asdict() for c in category._overwrites] options['parent_id'] = parent_id elif lock_permissions and self.category_id is not None: category = self.guild.get_channel(self.category_id) if category: options['permission_overwrites'] = [c._asdict() for c in category._overwrites] else: await self._move(position, parent_id=parent_id, lock_permissions=lock_permissions, reason=reason) overwrites = options.get('overwrites', None) if overwrites is not None: perms = [] for target, perm in overwrites.items(): if not isinstance(perm, PermissionOverwrite): raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}') allow, deny = perm.pair() payload = { 'allow': allow.value, 'deny': deny.value, 'id': target.id, } if isinstance(target, Role): payload['type'] = _Overwrites.ROLE else: payload['type'] = _Overwrites.MEMBER perms.append(payload) options['permission_overwrites'] = perms try: ch_type = options['type'] except KeyError: pass else: if not isinstance(ch_type, ChannelType): raise InvalidArgument('type field must be of type ChannelType') options['type'] = ch_type.value if options: return await self._state.http.edit_channel(self.id, reason=reason, **options) def _fill_overwrites(self, data: GuildChannelPayload) -> None: self._overwrites = [] everyone_index = 0 everyone_id = self.guild.id for index, overridden in enumerate(data.get('permission_overwrites', [])): overwrite = _Overwrites(overridden) self._overwrites.append(overwrite) if overwrite.type == _Overwrites.MEMBER: continue if overwrite.id == everyone_id: everyone_index = index tmp = self._overwrites if tmp: tmp[everyone_index], tmp[0] = tmp[0], tmp[everyone_index] @property
MIT License
yangfly/sfd.gluoncv
sfd/data/transform.py
bbox_iob
python
def bbox_iob(bbox_a, bbox_b): if bbox_a.shape[1] < 4 or bbox_b.shape[1] < 4: raise IndexError("Bounding boxes axis 1 must have at least length 4") tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2]) br = np.minimum(bbox_a[:, None, 2:4], bbox_b[:, 2:4]) + 1 area_i = np.prod(br - tl, axis=2) * (tl < br).all(axis=2) area_a = np.prod(bbox_a[:, 2:4] - bbox_a[:, :2] + 1, axis=1) return area_i / area_a[:, None]
Calculate Intersection-Over-Object(IOB) of two bounding boxes. ! differenct between Fast R-CNN bbox_overlaps and gluon-cv bbox_iou Parameters ---------- bbox_a : numpy.ndarray, object bbox An ndarray with shape :math:`(N, 4)`. bbox_b : numpy.ndarray, crop bbox An ndarray with shape :math:`(M, 4)`. Returns ------- numpy.ndarray An ndarray with shape :math:`(N, M)` indicates IOU between each pairs of bounding boxes in `bbox_a` and `bbox_b`.
https://github.com/yangfly/sfd.gluoncv/blob/163cfe9870b2a82212672a6f47cb882957bdfb63/sfd/data/transform.py#L214-L240
from __future__ import division from __future__ import absolute_import import random import numpy as np import mxnet as mx from gluoncv.data.transforms import bbox as gbbox from gluoncv.data.transforms import image as gimage from gluoncv.data.transforms.experimental.image import random_color_distort __all__ = ['SFDTrainTransform', 'SFDValTransform'] class SFDTrainTransform(object): def __init__(self, width, height, anchors=None, iou_thresh=(0.35, 0.1), topk=6, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), **kwargs): self._width = width self._height = height self._anchors = anchors self._mean = mean self._std = std if anchors is None: return from nn import SFDTargetGenerator self._target_generator = SFDTargetGenerator(iou_thresh, topk, negative_mining_ratio=-1, **kwargs) def __call__(self, src, label): img = random_color_distort(src) h, w, _ = img.shape bbox, crop = random_crop_with_constraints(label, (w, h)) x0, y0, w, h = crop img = mx.image.fixed_crop(img, x0, y0, w, h) h, w, _ = img.shape interp = np.random.randint(0, 5) img = gimage.imresize(img, self._width, self._height, interp=interp) bbox = gbbox.resize(bbox, (w, h), (self._width, self._height)) h, w, _ = img.shape img, flips = gimage.random_flip(img, px=0.5) bbox = gbbox.flip(bbox, (w, h), flip_x=flips[0]) img = mx.nd.image.to_tensor(img) img = mx.nd.image.normalize(img, mean=self._mean, std=self._std) if self._anchors is None: return img, bbox gt_bboxes = mx.nd.array(bbox[:,:4]).expand_dims(0) gt_ids = mx.nd.zeros((1, gt_bboxes.shape[1], 1), dtype=gt_bboxes.dtype) cls_targets, box_targets, _ = self._target_generator( self._anchors, None, gt_bboxes, gt_ids) return img, cls_targets[0], box_targets[0] class SFDValTransform(object): def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): self._mean = mean self._std = std def __call__(self, src, label): img = mx.nd.image.to_tensor(src) img = mx.nd.image.normalize(img, mean=self._mean, std=self._std) return img, mx.nd.array(label, dtype=img.dtype) def random_crop_with_constraints(bbox, size, min_scale=0.3, max_scale=1, min_object_overlap=0.95, min_aspect_ratio=0.9, max_aspect_ratio=1.1, max_trial=50, eps=1e-5): candidates = [] assert max_scale == 1, "required max_scale=1 but got {}".format(max_scale) mis, mas, mir, mar = min_scale, max_scale, min_aspect_ratio, max_aspect_ratio sample_params = [ [ 1, 1, 1, 1], [ 1, 1, mir, mar], [mis, mas, 1, 1], [mis, mas, mir, mar]] w, h = size for i in range(4): mis, mas, mir, mar = sample_params[i] for _ in range(max_trial): scale = random.uniform(mis, mas) aspect_ratio = random.uniform( max(mir, scale ** 2), min(mar, 1 / (scale ** 2))) if w >= h * aspect_ratio: crop_h = h * scale crop_w = crop_h * aspect_ratio else: crop_w = w * scale crop_h = crop_w / aspect_ratio crop_h, crop_w = int(crop_h), int(crop_w) crop_t = random.randrange(h - crop_h + 1) crop_l = random.randrange(w - crop_w + 1) crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h)) iob = bbox_iob(bbox, crop_bb[np.newaxis]).flatten() iob = iob[iob > 0] if len(iob) >= bbox.shape[0] * 0.75 and iob.min() >= min_object_overlap - eps: if i != 3: candidates.append((crop_l, crop_t, crop_w, crop_h)) else: candidates.extend([(crop_l, crop_t, crop_w, crop_h)]*6) break while candidates: crop = candidates.pop(np.random.randint(0, len(candidates))) new_bbox = gbbox.crop(bbox, crop, allow_outside_center=False) if new_bbox.size < 1: continue new_crop = (crop[0], crop[1], crop[2], crop[3]) return new_bbox, new_crop min_len = int(min(h, w) * random.uniform(min_scale, max_scale)) crop_h, crop_w = min_len, min_len for _ in range(max_trial): crop_t = random.randrange(h - crop_h + 1) crop_l = random.randrange(w - crop_w + 1) crop = (crop_l, crop_t, crop_w, crop_h) new_bbox = gbbox.crop(bbox, crop, allow_outside_center=False) if new_bbox.size >= bbox.size * 0.5: return new_bbox, crop return bbox, (0, 0, w, h)
Apache License 2.0
tatumdmortimer/popgen-stats
ancestralReconstruction.py
make_unaligned_fasta
python
def make_unaligned_fasta(dnaDirectory, groupsDict, coreGenes, genomes, og): print "Collecting core genes" def make_fasta(group): proteins = groupsDict[group] out = open(group + '/' + group + '.fasta', 'w') records = [] outgroup_gene = og ingroup_genes = [] for protein in proteins: seqID = protein.split('|')[0] if seqID in genomes: protein = protein.split('|')[1] newRec = seqRecordDict[protein] newRec.description = "" records.append(newRec) if og in newRec.id: outgroup_gene = newRec.id else: ingroup_genes.append(newRec.id) SeqIO.write(records, out, 'fasta') return (group, ingroup_genes, outgroup_gene) files = listdir_fullpath(dnaDirectory) seqRecordDict = {} seqIDs = [] for f in files: handle = open(f, 'r') for record in SeqIO.parse(handle, 'fasta'): seqRecordDict[record.id] = record pool = ThreadPool(args.threads) seqIDs = pool.map(make_fasta, coreGenes) pool.close() pool.join() return seqIDs
Reads through files in provided directory to find gene sequences that match the proteins in the groups dictionary
https://github.com/tatumdmortimer/popgen-stats/blob/eecdc4b10ea860cfd49e4fd21daa3b93b009350d/ancestralReconstruction.py#L122-L156
import sys, os, argparse, subprocess, shlex, glob from datetime import datetime from multiprocessing import cpu_count from multiprocessing.dummy import Pool as ThreadPool from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio.Alphabet import IUPAC TRANSLATOR_X_PATH = "/opt/PepPrograms/translatorx_vLocal.pl" LAZARUS_PATH = "/opt/PepPrograms/project-lazarus/lazarus.py" RAXML_PATH = "/opt/PepPrograms/standard-RAxML/raxmlHPC-PTHREADS-AVX" class FullPaths(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values))) def listdir_fullpath(d): return [os.path.join(d, f) for f in os.listdir(d)] def is_dir(dirname): if not os.path.isdir(dirname): msg = "{0} is not a directory".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname def is_file(filename): if not os.path.isfile(filename): msg = "{0} is not a file".format(filename) raise argparse.ArgumentTypeError(msg) else: return filename def get_args(): parser = argparse.ArgumentParser(description='Ancestral reconstruction of\ core genome') parser.add_argument("groups", help="OrthoMCL groups file", action=FullPaths, type=is_file) parser.add_argument("genomes", help="File listing genomes to be included in\ the analysis- outgroup last", action=FullPaths, type=is_file) parser.add_argument("genes", help="Directory with .fasta files of nucleotide sequences for genomes", action=FullPaths, type=is_dir) parser.add_argument("-t", "--threads", help="Number of threads to use (default: 2)", type=int, default=2, choices=range(2, cpu_count())) return parser.parse_args() def check_paths(): for i in [TRANSLATOR_X_PATH, LAZARUS_PATH, RAXML_PATH]: if not os.path.isfile(i): msg = "{0} does not exist".format(i) print msg sys.exit() def call_with_log(cmd): cmd = cmd.format(**(kvmap)) logfile = open(wd + current_datetime+".log", "a+") logfile.write("Executing command: " + cmd + "\n") logfile.flush() ret = subprocess.call(shlex.split(cmd), stdout=logfile, stderr=logfile) if(ret != 0): print("Pipeline did not complete successfully. \n Command : \n\n" + cmd + "\n\n returned with non-zero code: " + str(ret)) logfile.close() def read_groups_file(inFileName): print "Reading groups file" inFile = open(inFileName, 'r') groupsDict = {} for line in inFile: line = line.strip() entries = line.split(':') groupName = entries[0] groupProteins = entries[1][1:].split(' ') groupsDict[groupName] = groupProteins inFile.close() print len(groupsDict) return groupsDict def get_core_genes(groupsDict, genomes): coreGenes = set() for group in groupsDict: genomeList = [] proteinList = groupsDict[group] for protein in proteinList: ids = protein.split('|') genomeID = ids[0] genomeList.append(genomeID) genomeSet = set(genomeList) if set(genomes.keys()).issubset(genomeSet): if len(genomeList) == len(genomeSet): coreGenes.add(group) print len(coreGenes) return coreGenes
MIT License
warmdev/sublimeoutline
common.py
hijack_window
python
def hijack_window(): settings = sublime.load_settings('outline.sublime-settings') command = settings.get("outline_hijack_new_window") if command: if command == "jump_list": sublime.set_timeout(lambda: sublime.windows()[-1].run_command("outline_jump_list"), 1) else: sublime.set_timeout(lambda: sublime.windows()[-1].run_command("outline", {"immediate": True}), 1)
Execute on loading plugin or on new window open; allow to open FB automatically in ST3
https://github.com/warmdev/sublimeoutline/blob/81867c52d2447bf03e3a10a269352f6d94408adc/common.py#L129-L139
from __future__ import print_function import re, os, fnmatch, sys, itertools import sublime from sublime import Region from os.path import isdir, join, basename if sublime.platform() == 'windows': import ctypes ST3 = int(sublime.version()) >= 3000 if ST3: MARK_OPTIONS = sublime.DRAW_NO_OUTLINE else: MARK_OPTIONS = 0 OS = sublime.platform() NT = OS == 'windows' LIN = OS == 'linux' OSX = OS == 'osx' RE_FILE = re.compile(r'^(\s*)([^\\//].*)$') PARENT_SYM = u"⠤" def first(seq, pred): return next((item for item in seq if pred(item)), None) def sort_nicely(names): convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] names.sort(key=alphanum_key) def print(*args, **kwargs): if not (ST3 or NT): args = (s.encode('utf-8') if isinstance(s, unicode) else str(s) for s in args) else: args = (s if isinstance(s, str if ST3 else unicode) else str(s) for s in args) sep, end = kwargs.get('sep', ' '), kwargs.get('end', '\n') sys.stdout.write(sep.join(s for s in args) + end) def set_proper_scheme(view): if view.settings().get('outline_rename_mode', False): outline_settings = sublime.load_settings('outline-rename-mode.sublime-settings') else: outline_settings = sublime.load_settings('outline.sublime-settings') if view.settings().get('color_scheme') == outline_settings.get('color_scheme'): return view.settings().set('color_scheme', outline_settings.get('color_scheme')) def calc_width(view): width = view.settings().get('outline_width', 0.3) if isinstance(width, float): width -= width//1 elif isinstance(width, int if ST3 else long): wport = view.viewport_extent()[0] width = 1 - round((wport - width) / wport, 2) if width >= 1: width = 0.9 else: sublime.error_message(u'FileBrowser:\n\noutline_width set to ' u'unacceptable type "%s", please change it.\n\n' u'Fallback to default 0.3 for now.' % type(width)) width = 0.3 return width or 0.1 def get_group(groups, nag): if groups <= 4 and nag < 2: group = 1 if nag == 0 else 0 elif groups == 4 and nag >= 2: group = 3 if nag == 2 else 2 else: group = nag - 1 return group def relative_path(rpath): if rpath: rpath = rpath[0] if rpath[~0] != os.sep: rpath = os.path.split(rpath)[0] + os.sep if rpath == os.sep: rpath = '' return rpath
MIT License
gepd/deviot
beginning/check_requirements.py
DeviotCheckRequirementsCommand.check_python
python
def check_python(self): logger.debug("check_python") global _version _version = self.get_python_version() if(_version == "0"): self.check_symlink() if(_version == "0"): logger.debug("no python detected") translate = I18n().translate msg = translate('deviot_need_python') btn = translate('button_download_python') url = 'https://www.python.org/downloads/' open_link = sublime.ok_cancel_dialog(msg, btn) if(open_link): sublime.run_command('open_url', {'url': url}) return False return True
Python requirement Check if python 2 is installed
https://github.com/gepd/deviot/blob/150caea06108369b30210eb287a580fcff4904af/beginning/check_requirements.py#L82-L109
from __future__ import absolute_import import logging import sublime import sublime_plugin from re import sub from threading import Thread from ..api import deviot from ..libraries.I18n import I18n from ..libraries.syntax import Syntax from ..libraries.thread_progress import ThreadProgress _version = '' _symlink = 'python2' logger = deviot.create_logger('Deviot') class DeviotCheckRequirementsCommand(sublime_plugin.WindowCommand): def run(self): self.installed = deviot.get_sysetting('installed', False) if(not self.installed): deviot.set_logger_level('DEBUG') else: deviot.set_logger_level('ERROR') logger.debug("Command executed") thread = Thread(target=self.check) thread.start() ThreadProgress(thread, 'processing', '') def check(self): logger.debug("New thread started") logger.debug("Installed: %s", self.installed) if(bool(self.installed)): return Syntax() if(not self.check_python()): return from . import install_pio if(not self.check_pio()): install_pio.InstallPIO() else: install_pio.already_installed() def get_python_version(self, symlink='python'): logger.debug("get_python_version") version = "0" cmd = [symlink, "--version"] logger.debug("cmd: %s", cmd) out = deviot.run_command(cmd) logger.debug("output: %s", out) if(out[0] == 0): version = sub(r'\D', '', out[1]) logger.debug("return: %s", version) return version
Apache License 2.0
extensiveautomation/extensiveautomation-server
src/ea/serverengine/ProjectsManager.py
instance
python
def instance(): return PM
Returns the singleton
https://github.com/extensiveautomation/extensiveautomation-server/blob/e4e7f3b76d8b94bf715c4345d29dd2b05bca92e5/src/ea/serverengine/ProjectsManager.py#L461-L465
import os import shutil from ea.serverengine import DbManager from ea.serverengine import UsersManager from ea.libs import Settings, Logger DEFAULT_PRJ_ID = "1" class ProjectsManager(Logger.ClassLogger): def __init__(self, context): self.tb_projects = 'projects' self.repoTests = '%s/%s' % (Settings.getDirExec(), Settings.get('Paths', 'tests')) self.context = context self.__cache = [] self.loadCache() self.info('Deploying folders projects and reserved folders...') self.createDirProjects() def loadCache(self): self.trace("Updating memory cache with projects from database") code, projects_list = self.getProjectsFromDB() if code == self.context.CODE_ERROR: raise Exception("Unable to get projects from database") self.__cache = projects_list self.trace("Projects cache Size=%s" % len(self.__cache)) def cache(self): return self.__cache def createDirProjects(self): self.trace("creating projects folders if missing") code, projects_list = self.getProjectsFromDB() if code != self.context.CODE_OK: return for prj in projects_list: if not os.path.exists("%s/%s" % (self.repoTests, prj["id"])): os.mkdir("%s/%s" % (self.repoTests, prj["id"])) def getProjects(self, user): if user not in UsersManager.instance().cache(): self.error('Get project for Login=%s not found in cache' % (user)) return False if UsersManager.instance().cache()[user]['administrator']: projects_list = [] for p in self.cache(): projects_dict = {} projects_dict['project_id'] = int(p['id']) projects_dict['name'] = p['name'] projects_list.append(projects_dict) else: user_projects = UsersManager.instance().cache()[user]['projects'] projects_list = [] for p in user_projects: projects_dict = {} projects_dict['project_id'] = int(p) projects_dict['name'] = self.getProjectName(prjId=int(p)) projects_list.append(projects_dict) return projects_list def checkProjectsAuthorization(self, user, projectId): if user not in UsersManager.instance().cache(): self.error( 'Check project access for Login=%s not found in cache' % (user)) return False granted = False user_profile = UsersManager.instance().cache()[user] if user_profile['administrator']: granted=True else: for p in user_profile['projects']: if int(p) == int(projectId): granted = True break self.trace('Check project access for Login=%s and ProjectID=%s Result=%s' % (user, projectId, str(granted) ) ) return granted def getDefaultProjectForUser(self, user): pid = 1 found = False for u, profile in UsersManager.instance().cache().items(): if u == user: pid = profile['defaultproject'] found = True break if not found: self.error('no default project returned for User=%s' % user) self.trace('Get default project for User=%s Result=%s' % (user, pid)) return pid def getProjectID(self, name): pid = 0 found = False for p in self.cache(): if p["name"] == name: pid = p["id"] found = True break if not found: self.error('no project id returned with name=%s' % name) self.trace('Get project ID with Name=%s Result=%s ' % (name, pid)) return pid def getProjectName(self, prjId): prjName = 'Common' found = False for p in self.cache(): if int(p["id"]) == int(prjId): prjName = p["name"] found = True break if not found: self.error('no project name returned with id=%s' % prjId) self.trace('Get project name with Id=%s Result=%s' % (prjId, prjName)) return prjName def addProject(self, prjId): self.trace('creating the project %s' % prjId) ret = False try: res = os.path.exists("%s/%s" % (self.repoTests, prjId)) if res: self.trace('project %s already exist' % prjId) ret = False else: try: os.mkdir("%s/%s" % (self.repoTests, prjId)) os.mkdir("%s/%s/@Recycle" % (self.repoTests, prjId)) os.mkdir("%s/%s/@Sandbox" % (self.repoTests, prjId)) except Exception as e: self.error( "unable to create the project %s: %s" % (prjId, str(e))) ret = False else: ret = True except Exception as e: self.error("add project error: %s" % str(e)) return ret def delProject(self, prjId): self.trace('deleting the project %s' % prjId) ret = False try: res = os.path.exists("%s/%s" % (self.repoTests, prjId)) if not res: self.trace('project %s does not exist' % prjId) ret = False else: shutil.rmtree("%s/%s" % (self.repoTests, prjId)) ret = True except OSError as e: self.trace(e) ret = False except Exception as e: self.error("del project error: %s" % str(e)) ret = False return ret def addProjectToDB(self, projectName): if not len(projectName): self.error("project name is empty") return (self.context.CODE_ERROR, "project name is empty") sql = """SELECT * FROM `%s` WHERE name=?""" % (self.tb_projects) success, dbRows = DbManager.instance().querySQL(query=sql, arg1=projectName) if not success: self.error("unable to read project's table") return (self.context.CODE_ERROR, "unable to read project's table") if len(dbRows): return (self.context.CODE_ALREADY_EXISTS, "this name (%s) already exists" % projectName) sql = """INSERT INTO `%s`(`name`, `active`) VALUES(?, '1')""" % ( self.tb_projects) success, lastRowId = DbManager.instance().querySQL(query=sql, insertData=True, arg1=projectName) if not success: self.error("unable to insert project") return (self.context.CODE_ERROR, "unable to insert project") added = self.addProject(prjId=int(lastRowId)) if not added: self.error("unable to add project") return (self.context.CODE_ERROR, "unable to add project") self.loadCache() return (self.context.CODE_OK, "%s" % int(lastRowId)) def updateProjectFromDB(self, projectName, projectId): projectId = str(projectId) if int(projectId) == 1: self.error("update the default project not authorized") return (self.context.CODE_ERROR, "update the default project is not authorized") if not len(projectName): self.error("project name is empty") return (self.context.CODE_ERROR, "the project name is empty") sql = """SELECT * FROM `%s` WHERE id=?""" % (self.tb_projects) success, dbRows = DbManager.instance().querySQL(query=sql, arg1=projectId) if not success: self.error("unable to read project id") return (self.context.CODE_ERROR, "unable to read project id") if not len(dbRows): return (self.context.CODE_NOT_FOUND, "this project id does not exist") sql = """SELECT * FROM `%s` WHERE name=?""" % (self.tb_projects) success, dbRows = DbManager.instance().querySQL(query=sql, arg1=projectName) if not success: self.error("unable to read project's table") return (self.context.CODE_ERROR, "unable to read project's table") if len(dbRows): return (self.context.CODE_ALREADY_EXISTS, "this name already exists") sql = """UPDATE `%s` SET name=? WHERE id=?""" % (self.tb_projects) success, _ = DbManager.instance().querySQL(query=sql, arg1=projectName, arg2=projectId) if not success: self.error("unable to update project by id") return (self.context.CODE_ERROR, "unable to update project by id") self.loadCache() return (self.context.CODE_OK, "") def delProjectFromDB(self, projectId): projectId = str(projectId) if int(projectId) == 1: self.error("delete the default project not authorized") return (self.context.CODE_ERROR, "delete the default project not authorized") sql = """SELECT * FROM `%s` WHERE id=?""" % (self.tb_projects) success, dbRows = DbManager.instance().querySQL(query=sql, arg1=projectId) if not success: self.error("unable to read project id") return (self.context.CODE_ERROR, "unable to read project id") if not len(dbRows): return (self.context.CODE_NOT_FOUND, "this project id does not exist") sql = """SELECT COUNT(*) as nbrelation FROM `relations-projects` WHERE project_id=?""" success, dbRows = DbManager.instance().querySQL( query=sql, columnName=True, arg1=projectId) if not success: self.error("unable to read project relations") return (self.context.CODE_ERROR, "unable to read project relations") if dbRows[0]["nbrelation"]: msg = "unable to remove project because linked to user(s)=%s" % dbRows[ 0]["nbrelation"] return (self.context.CODE_ERROR, msg) sql = """DELETE FROM `%s` WHERE id=?""" % (self.tb_projects) success, _ = DbManager.instance().querySQL(query=sql, arg1=projectId) if not success: self.error("unable to remove project by id") return (self.context.CODE_ERROR, "unable to remove project by id") deleted = self.delProject(prjId=int(projectId)) if not deleted: self.error("unable to delete project") return (self.context.CODE_ERROR, "unable to delete project") self.loadCache() return (self.context.CODE_OK, "") def getProjectsFromDB(self): sql = """SELECT * FROM `%s`""" % (self.tb_projects) success, dbRows = DbManager.instance().querySQL(query=sql, columnName=True) if not success: self.error("unable to read project's table") return (self.context.CODE_ERROR, []) return (self.context.CODE_OK, dbRows) def getProjectFromDB(self, projectName=None, projectId=None): sql_args = () sql = """SELECT * FROM `%s`""" % (self.tb_projects) sql += """ WHERE """ if projectName is not None: sql += """name LIKE ?""" sql_args += ("%%%s%%" % projectName,) if projectId is not None: sql += """ id=?""" sql_args += (projectId,) success, dbRows = DbManager.instance().querySQL(query=sql, columnName=True, args=sql_args) if not success: self.error("unable to search project table") return (self.context.CODE_ERROR, "unable to search project table") return (self.context.CODE_OK, dbRows) PM = None
MIT License
cogent3/cogent3
src/cogent3/parse/nexus.py
parse_nexus_tree
python
def parse_nexus_tree(tree_f): trans_table = None tree_info = get_tree_info(tree_f) check_tree_info(tree_info) header_s, trans_table_s, dnd_s = split_tree_info(tree_info) if trans_table_s: trans_table = parse_trans_table(trans_table_s) dnd = parse_dnd(dnd_s) return trans_table, dnd
returns a dict mapping taxa # to name from the translation table, and a dict mapping tree name to dnd string; takes a handle for a Nexus formatted file as input
https://github.com/cogent3/cogent3/blob/3d98bddc0aef2bf7fea21b9a89de76b01f3d2da8/src/cogent3/parse/nexus.py#L27-L38
import re from collections import defaultdict from cogent3.parse.record import RecordError from cogent3.util.io import open_ __author__ = "Catherine Lozupone" __copyright__ = "Copyright 2007-2021, The Cogent Project" __credits__ = ["Catherine Lozuopone", "Rob Knight", "Micah Hamady", "Gavin Huttley"] __license__ = "BSD-3" __version__ = "2021.10.12a1" __maintainer__ = "Catherine Lozupone" __email__ = "lozupone@colorado.edu" __status__ = "Production" strip = str.strip
BSD 3-Clause New or Revised License
dynamicyieldprojects/funnel-rocket
frocket/common/validation/query_validator.py
QueryValidator._validate_timeframe
python
def _validate_timeframe(self, dataset_mintime: float = None, dataset_maxtime: float = None): def validate_scale(ts_in_query, ts_in_dataset, dataset_ts_name: str): if len(str(int(ts_in_query))) != len(str(int(ts_in_dataset))): message = f"Given timestamp {ts_in_query} doesn't appear to be in same scale as " f"{dataset_ts_name} ({ts_in_dataset}). Query timeframe is: {timeframe}" raise QueryValidationError(message, kind=ValidationErrorKind.TYPE_MISMATCH) timeframe = self._expanded_query.get('timeframe', None) if not timeframe: return fromtime = timeframe.get('from', None) totime = timeframe.get('to', None) if fromtime is not None and dataset_mintime is not None: validate_scale(fromtime, dataset_mintime, 'minimum timestamp in dataset') if totime is not None and dataset_maxtime is not None: validate_scale(totime, dataset_maxtime, 'maximum timestamp in dataset') if fromtime is not None and totime is not None: if totime <= fromtime: raise QueryValidationError(f"Value of 'to' (exclusive) should be larger than 'from' (inclusive) " f"in query timeframe: {timeframe}", kind=ValidationErrorKind.SCHEMA)
If a timeframe object is included in the query (either from, to, or both), validate that to > from, but also that the timestamps seem to be consistent with the dataset's timestamps. Currently, Funnel Rocket itself is not opinionated re. the timestamp resolution (seconds? milliseconds? second fractions as decimal places after the dot?), and only validates that the no. of digits seems ok. TODO backlog (optional?) strict timestamp specification TODO backlog when warnings are supported, warn client when query timeframe is fully outside dataset timeframe
https://github.com/dynamicyieldprojects/funnel-rocket/blob/70963fddc0881cebdc6da1af2654d412f95d660c/frocket/common/validation/query_validator.py#L268-L299
import argparse import json import logging import sys from typing import List, Union import difflib import jsonschema from frocket.common.helpers.utils import terminal_green, terminal_red from frocket.common.validation.result import QueryValidationResult from frocket.common.validation.error import ValidationErrorKind, QueryValidationError from frocket.datastore.registered_datastores import get_datastore from frocket.common.dataset import DatasetColumnType, DatasetInfo, DatasetShortSchema from frocket.common.validation.consts import QUERY_SCHEMA, AGGREGATIONS_PATHS, SINGLE_FILTER_PATHS, FILTER_ARRAY_PATHS, VALID_IDENTIFIER_PATTERN, UNIQUE_IDENTIFIER_SCOPES, OPERATORS_BY_COLTYPE, VALUE_TYPES_BY_COLTYPE, NUMERIC_COLTYPES, RELATION_OPS, DEFAULT_RELATION_OP, CONDITION_COLUMN_PREFIX, map_condition_names from frocket.common.validation.relation_parser import RelationParser from frocket.common.validation.visitor_functions import _to_verbose_filter, _to_verbose_target, _add_default_target, _validate_aggregation, _expand_aggregations, _validate_or_set_include_zero from frocket.common.validation.path_visitor import PathVisitor, PathVisitorCallback from frocket.engine.relation_to_pandas import relation_to_pandas_query logger = logging.getLogger(__name__) class QueryValidator: def __init__(self, source_query: dict, dataset: DatasetInfo = None, short_schema: DatasetShortSchema = None): self._source_query = source_query self._expanded_query = None self._dataset = dataset self._short_schema = short_schema self._used_columns = None self._condition_mapping = None self._used_conditions = None self._relation_elements = None def expand_and_validate(self, schema_only: bool = False) -> QueryValidationResult: try: if not schema_only and not (self._dataset and self._short_schema): raise QueryValidationError( message="Provide a dataset and its short schema, or use schema_only=True", kind=ValidationErrorKind.INVALID_ARGUMENTS) try: jsonschema.validate(instance=self._source_query, schema=QUERY_SCHEMA) except jsonschema.exceptions.ValidationError as ve: raise QueryValidationError(message=ve.message, kind=ValidationErrorKind.SCHEMA) self._expanded_query = json.loads(json.dumps(self._source_query)) self._validate_and_expand_schema() if not schema_only: self._validate_columns() self._validate_timeframe(dataset_mintime=self._short_schema.min_timestamp, dataset_maxtime=self._short_schema.max_timestamp) return QueryValidationResult(success=True, source_query=self._source_query, expanded_query=self._expanded_query, used_columns=self._used_columns, used_conditions=self._used_conditions, named_conditions=self._condition_mapping.names, relation_elements=self._relation_elements, warnings=None) except Exception as e: if not isinstance(e, QueryValidationError): logger.exception("Unexpected error") return QueryValidationResult.from_exception(e, self._source_query) def _validate_and_expand_schema(self): self._modify(paths=SINGLE_FILTER_PATHS, list_to_items=False, func=_to_verbose_filter) self._modify(paths='query.conditions.target', list_to_items=False, func=_to_verbose_target) self._visit(paths='query.conditions', func=_add_default_target) self._modify(paths=AGGREGATIONS_PATHS, list_to_items=False, func=_expand_aggregations) self._visit(paths='query.conditions', func=_validate_or_set_include_zero) self._visit(paths=AGGREGATIONS_PATHS, func=_validate_aggregation) for scope in UNIQUE_IDENTIFIER_SCOPES: self._validate_unique_identifiers(scope) self._validate_condition_names() self._expand_and_parse_relation() self._validate_timeframe() jsonschema.validate(instance=self._expanded_query, schema=QUERY_SCHEMA) def _visit(self, paths: Union[str, List[str]], func: PathVisitorCallback, modifiable: bool = False, list_to_items: bool = True): if type(paths) is str: paths = [paths] for path in paths: p = PathVisitor(root=self._expanded_query, path=path, modifiable=modifiable, list_to_items=list_to_items) p.visit(func) def _modify(self, paths: Union[str, List[str]], list_to_items: bool, func: PathVisitorCallback) -> None: self._visit(paths, modifiable=True, list_to_items=list_to_items, func=func) def _collect(self, paths: Union[str, List[str]], list_to_items: bool = True) -> list: results = [] self._visit(paths, modifiable=False, list_to_items=list_to_items, func=lambda v: results.append(v)) return results def _validate_unique_identifiers(self, scope: str) -> None: def add_identifier(v): if v: if v in seen_values: raise QueryValidationError(message=f"Identifier name '{v}' is not unique in scope '{scope}'", kind=ValidationErrorKind.SCHEMA) elif not VALID_IDENTIFIER_PATTERN.match(v): raise QueryValidationError(message=f"Invalid identifier name '{v}'", kind=ValidationErrorKind.SCHEMA) seen_values.add(v) seen_values = set() self._visit(paths=scope, modifiable=False, list_to_items=True, func=add_identifier) def _validate_columns(self): used_columns = [self._dataset.group_id_column, self._dataset.timestamp_column] all_filters = self._collect(paths=SINGLE_FILTER_PATHS, list_to_items=False) + self._collect(paths=FILTER_ARRAY_PATHS, list_to_items=True) for f in all_filters: self._validate_column(name=f['column'], op=f['op'], value=f['value']) used_columns.append(f['column']) for name in self._collect(paths=[f"{path}.column" for path in AGGREGATIONS_PATHS]): self._validate_column(name) used_columns.append(name) sum_by_column_paths = ["query.conditions.target.column"] + [f"{path}.otherColumn" for path in AGGREGATIONS_PATHS] sum_by_columns = self._collect(paths=sum_by_column_paths) for name in sum_by_columns: self._validate_column(name=name, expected_types=NUMERIC_COLTYPES) used_columns.append(name) self._used_columns = list(set(used_columns)) def _validate_column(self, name: str, expected_types: List[DatasetColumnType] = None, op: str = None, value: Union[int, float, str, bool] = None): coltype = self._short_schema.columns.get(name, None) if not coltype: raise QueryValidationError(f"Column '{name}' not in dataset", kind=ValidationErrorKind.DATASET_MISMATCH) if expected_types and coltype not in expected_types: raise QueryValidationError(f"Column '{name}' is of type {coltype.value}, but should be one of " f"{[t.value for t in expected_types]}", kind=ValidationErrorKind.TYPE_MISMATCH) if op: if op not in OPERATORS_BY_COLTYPE[coltype]: raise QueryValidationError(f"Operator '{op}' is not applicable for column '{name}' " f"of type {coltype.value}", kind=ValidationErrorKind.TYPE_MISMATCH) if value is not None: assert op if type(value) not in VALUE_TYPES_BY_COLTYPE[coltype]: value_as_str = (f'"{value}"' if type(value) is str else str(value)) + f" (type {type(value).__name__.upper()})" raise QueryValidationError(f"Value {value_as_str} " f"is not applicable for column '{name}' of type {coltype.value}", kind=ValidationErrorKind.TYPE_MISMATCH) def _validate_condition_names(self): self._condition_mapping = map_condition_names(self._expanded_query) for name in self._condition_mapping.names.keys(): if name in RELATION_OPS: raise QueryValidationError(f"'{name}' is not a valid condition name", kind=ValidationErrorKind.SCHEMA) def _expand_and_parse_relation(self): relation = None simple_op = None found_relations = self._collect('query.relation') assert len(found_relations) in [0, 1] if not found_relations: if not self._collect('query.conditions'): return else: simple_op = DEFAULT_RELATION_OP self._expanded_query['query']['relation'] = simple_op else: relation = found_relations[0].strip().lower() if relation in RELATION_OPS: simple_op = relation if simple_op: all_condition_ids = [f"${i}" for i in range(self._condition_mapping.count)] relation = f" {simple_op} ".join(all_condition_ids) self._expanded_query['query']['relation'] = relation if len(relation) == 0: raise QueryValidationError("Relation cannot be an empty string", kind=ValidationErrorKind.RELATION) parser = RelationParser(self._expanded_query) self._relation_elements = parser.parse() self._used_conditions = parser.used_conditions
Apache License 2.0
michael-lazar/rtv
rtv/submission_page.py
SubmissionPage.comment_urlview
python
def comment_urlview(self): data = self.get_selected_item() comment = data.get('body') or data.get('text') or data.get('url_full') if comment: self.term.open_urlview(comment) else: self.term.flash()
Open the selected comment with the URL viewer
https://github.com/michael-lazar/rtv/blob/b3d5bf16a70dba685e05db35308cc8a6d2b7f7aa/rtv/submission_page.py#L181-L190
from __future__ import unicode_literals from . import docs from .content import SubmissionContent from .page import Page, PageController, logged_in from .objects import Navigator, Command class SubmissionController(PageController): character_map = {} class SubmissionPage(Page): BANNER = docs.BANNER_SUBMISSION FOOTER = docs.FOOTER_SUBMISSION name = 'submission' def __init__(self, reddit, term, config, oauth, url=None, submission=None): super(SubmissionPage, self).__init__(reddit, term, config, oauth) self.controller = SubmissionController(self, keymap=config.keymap) if url: self.content = SubmissionContent.from_url( reddit, url, term.loader, max_comment_cols=config['max_comment_cols']) else: self.content = SubmissionContent( submission, term.loader, max_comment_cols=config['max_comment_cols']) self.nav = Navigator(self.content.get, page_index=-1) def handle_selected_page(self): if not self.selected_page: pass elif self.selected_page.name == 'subscription': self.selected_page = self.selected_page.loop() elif self.selected_page.name in ('subreddit', 'submission', 'inbox'): self.active = False else: raise RuntimeError(self.selected_page.name) def refresh_content(self, order=None, name=None): order = order or self.content.order url = name or self.content.name if order == 'ignore': order = None with self.term.loader('Refreshing page'): self.content = SubmissionContent.from_url( self.reddit, url, self.term.loader, order=order, max_comment_cols=self.config['max_comment_cols']) if not self.term.loader.exception: self.nav = Navigator(self.content.get, page_index=-1) @SubmissionController.register(Command('SORT_1')) def sort_content_hot(self): self.refresh_content(order='hot') @SubmissionController.register(Command('SORT_2')) def sort_content_top(self): self.refresh_content(order='top') @SubmissionController.register(Command('SORT_3')) def sort_content_rising(self): self.refresh_content(order='rising') @SubmissionController.register(Command('SORT_4')) def sort_content_new(self): self.refresh_content(order='new') @SubmissionController.register(Command('SORT_5')) def sort_content_controversial(self): self.refresh_content(order='controversial') @SubmissionController.register(Command('SUBMISSION_TOGGLE_COMMENT')) def toggle_comment(self): current_index = self.nav.absolute_index self.content.toggle(current_index) if self.nav.inverted: data = self.content.get(current_index) if data['hidden'] or self.nav.cursor_index != 0: window = self._subwindows[-1][0] n_rows, _ = window.getmaxyx() self.nav.flip(len(self._subwindows) - 1) self.nav.top_item_height = n_rows @SubmissionController.register(Command('SUBMISSION_EXIT')) def exit_submission(self): self.active = False @SubmissionController.register(Command('SUBMISSION_OPEN_IN_BROWSER')) def open_link(self): data = self.get_selected_item() if data['type'] == 'Submission': link = self.prompt_and_select_link() if link: self.config.history.add(link) self.term.open_link(link) elif data['type'] == 'Comment': link = self.prompt_and_select_link() if link: self.term.open_link(link) else: self.term.flash() @SubmissionController.register(Command('SUBMISSION_OPEN_IN_PAGER')) def open_pager(self): n_rows, n_cols = self.term.stdscr.getmaxyx() if self.config['max_pager_cols'] is not None: n_cols = min(n_cols, self.config['max_pager_cols']) data = self.get_selected_item() if data['type'] == 'Submission': text = '\n\n'.join((data['permalink'], data['text'])) self.term.open_pager(text, wrap=n_cols) elif data['type'] == 'Comment': text = '\n\n'.join((data['permalink'], data['body'])) self.term.open_pager(text, wrap=n_cols) else: self.term.flash() @SubmissionController.register(Command('SUBMISSION_POST')) @logged_in def add_comment(self): self.reply() @SubmissionController.register(Command('DELETE')) @logged_in def delete_comment(self): if self.get_selected_item()['type'] == 'Comment': self.delete_item() else: self.term.flash() @SubmissionController.register(Command('SUBMISSION_OPEN_IN_URLVIEWER'))
MIT License
whbrewer/spc
src/spc/user_data.py
more
python
def more(): user = root.authorized() app = request.query.app cid = request.query.cid filepath = request.query.filepath path_list = filepath.split("/") owner = path_list[1] if re.search("/", cid): (_, c) = cid.split("/") else: c = cid shared = jobs(cid=c).shared if owner != user and shared != "True" and user != "admin": return template('error', err="access forbidden") contents = slurp_file(filepath) contents = cgi.escape(contents) params = { 'cid': c, 'contents': contents, 'app': app, 'user': user, 'fn': filepath } return template('more', params)
given a form with the attribute plotpath, output the file to the browser
https://github.com/whbrewer/spc/blob/859f15e0fcb3f5f7d84d420f4757ae0a42c5837a/src/spc/user_data.py#L44-L71
from bottle import Bottle, request, template, redirect, static_file import os, re, sys, shutil, urllib, traceback, cgi, time, json import argparse as ap try: import requests except: print "INFO: not importing requests... only needed for remote workers" from common import slurp_file from model import db, users, jobs import config user_dir = 'user_data' upload_dir = '_uploads' routes = Bottle() def bind(app): global root root = ap.Namespace(**app) @routes.get('/' + user_dir + '/<filepath:path>') def get_user_data(filepath): user = root.authorized() path_list = filepath.split("/") owner = path_list[0] cid = path_list[2] shared = jobs(cid=cid).shared if owner != user and shared != "True" and user != "admin": return template('error', err="access forbidden") return static_file(filepath, root=user_dir) @routes.get('/more')
MIT License
msu-mlsys-lab/arch2vec
search_methods/reinforce_darts.py
Env.get_init_state
python
def get_init_state(self): rand_indices = random.randint(0, self.features.shape[0]) self.visited[rand_indices] = True return self.features[rand_indices], self.genotype[rand_indices]
:return: 1 x dim
https://github.com/msu-mlsys-lab/arch2vec/blob/ea01b0cf1295305596ee3c05fa1b6eb14e303512/search_methods/reinforce_darts.py#L70-L76
import os import sys sys.path.insert(0, os.getcwd()) import argparse import json import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from models.pretraining_nasbench101 import configs from utils.utils import load_json, preprocessing, one_hot_darts from preprocessing.gen_isomorphism_graphs import process from models.model import Model from torch.distributions import MultivariateNormal from darts.cnn.train_search import Train class Env(object): def __init__(self, name, seed, cfg, data_path=None, save=False): self.name = name self.seed = seed self.model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim, num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda() self.dir_name = 'pretrained/dim-{}'.format(args.dim) if not os.path.exists(os.path.join(self.dir_name, 'model-darts.pt')): exit() self.model.load_state_dict(torch.load(os.path.join(self.dir_name, 'model-darts.pt').format(args.dim))['model_state']) self.visited = {} self.features = [] self.genotype = [] self.embedding = {} self._reset(data_path, save) def _reset(self, data_path, save): if not save: print("extract arch2vec on DARTS search space ...") dataset = load_json(data_path) print("length of the dataset: {}".format(len(dataset))) self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt') if os.path.exists(self.f_path): print('{} is already saved'.format(self.f_path)) exit() print('save to {}'.format(self.f_path)) counter = 0 self.model.eval() for k, v in dataset.items(): adj = torch.Tensor(v[0]).unsqueeze(0).cuda() ops = torch.Tensor(one_hot_darts(v[1])).unsqueeze(0).cuda() adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep']) with torch.no_grad(): x, _ = self.model._encoder(ops, adj) self.embedding[counter] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'genotype': process(v[2])} print("{}/{}".format(counter, len(dataset))) counter += 1 torch.save(self.embedding, self.f_path) print("finished arch2vec extraction") exit() else: self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt') print("load arch2vec from: {}".format(self.f_path)) self.embedding = torch.load(self.f_path) for ind in range(len(self.embedding)): self.features.append(self.embedding[ind]['feature']) self.genotype.append(self.embedding[ind]['genotype']) self.features = torch.stack(self.features, dim=0) print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape))
Apache License 2.0
lvtk/lvtk
waflib/Node.py
Node.srcpath
python
def srcpath(self): return self.path_from(self.ctx.srcnode)
Returns the relative path seen from the source directory ``../src/foo.cpp`` :rtype: string
https://github.com/lvtk/lvtk/blob/c9e351c480c7f335ced85cbe1ce599e43ae72d4c/waflib/Node.py#L894-L900
import os, re, sys, shutil from waflib import Utils, Errors exclude_regs = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/*.swp **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/BitKeeper **/.git **/.git/** **/.gitignore **/.bzr **/.bzrignore **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/.arch-ids **/{arch} **/_darcs **/_darcs/** **/.intlcache **/.DS_Store''' def ant_matcher(s, ignorecase): reflags = re.I if ignorecase else 0 ret = [] for x in Utils.to_list(s): x = x.replace('\\', '/').replace('//', '/') if x.endswith('/'): x += '**' accu = [] for k in x.split('/'): if k == '**': accu.append(k) else: k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.').replace('+', '\\+') k = '^%s$' % k try: exp = re.compile(k, flags=reflags) except Exception as e: raise Errors.WafError('Invalid pattern: %s' % k, e) else: accu.append(exp) ret.append(accu) return ret def ant_sub_filter(name, nn): ret = [] for lst in nn: if not lst: pass elif lst[0] == '**': ret.append(lst) if len(lst) > 1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def ant_sub_matcher(name, pats): nacc = ant_sub_filter(name, pats[0]) nrej = ant_sub_filter(name, pats[1]) if [] in nrej: nacc = [] return [nacc, nrej] class Node(object): dict_class = dict __slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir') def __init__(self, name, parent): self.name = name self.parent = parent if parent: if name in parent.children: raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent)) parent.children[name] = self def __setstate__(self, data): self.name = data[0] self.parent = data[1] if data[2] is not None: self.children = self.dict_class(data[2]) def __getstate__(self): return (self.name, self.parent, getattr(self, 'children', None)) def __str__(self): return self.abspath() def __repr__(self): return self.abspath() def __copy__(self): raise Errors.WafError('nodes are not supposed to be copied') def read(self, flags='r', encoding='latin-1'): return Utils.readf(self.abspath(), flags, encoding) def write(self, data, flags='w', encoding='latin-1'): Utils.writef(self.abspath(), data, flags, encoding) def read_json(self, convert=True, encoding='utf-8'): import json object_pairs_hook = None if convert and sys.hexversion < 0x3000000: try: _type = unicode except NameError: _type = str def convert(value): if isinstance(value, list): return [convert(element) for element in value] elif isinstance(value, _type): return str(value) else: return value def object_pairs(pairs): return dict((str(pair[0]), convert(pair[1])) for pair in pairs) object_pairs_hook = object_pairs return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook) def write_json(self, data, pretty=True): import json indent = 2 separators = (',', ': ') sort_keys = pretty newline = os.linesep if not pretty: indent = None separators = (',', ':') newline = '' output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline self.write(output, encoding='utf-8') def exists(self): return os.path.exists(self.abspath()) def isdir(self): return os.path.isdir(self.abspath()) def chmod(self, val): os.chmod(self.abspath(), val) def delete(self, evict=True): try: try: if os.path.isdir(self.abspath()): shutil.rmtree(self.abspath()) else: os.remove(self.abspath()) except OSError: if os.path.exists(self.abspath()): raise finally: if evict: self.evict() def evict(self): del self.parent.children[self.name] def suffix(self): k = max(0, self.name.rfind('.')) return self.name[k:] def height(self): d = self val = -1 while d: d = d.parent val += 1 return val def listdir(self): lst = Utils.listdir(self.abspath()) lst.sort() return lst def mkdir(self): if self.isdir(): return try: self.parent.mkdir() except OSError: pass if self.name: try: os.makedirs(self.abspath()) except OSError: pass if not self.isdir(): raise Errors.WafError('Could not create the directory %r' % self) try: self.children except AttributeError: self.children = self.dict_class() def find_node(self, lst): if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] if lst and lst[0].startswith('\\\\') and not self.parent: node = self.ctx.root.make_node(lst[0]) node.cache_isdir = True return node.find_node(lst[1:]) cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: ch = cur.children except AttributeError: cur.children = self.dict_class() else: try: cur = ch[x] continue except KeyError: pass cur = self.__class__(x, cur) if not cur.exists(): cur.evict() return None if not cur.exists(): cur.evict() return None return cur def make_node(self, lst): if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: cur = cur.children[x] except AttributeError: cur.children = self.dict_class() except KeyError: pass else: continue cur = self.__class__(x, cur) return cur def search_node(self, lst): if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur else: try: cur = cur.children[x] except (AttributeError, KeyError): return None return cur def path_from(self, node): c1 = self c2 = node c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while not c1 is c2: lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent if c1.parent: lst.extend(['..'] * up) lst.reverse() return os.sep.join(lst) or '.' else: return self.abspath() def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val = os.sep elif not self.parent.name: val = os.sep + self.name else: val = self.parent.abspath() + os.sep + self.name self.cache_abspath = val return val if Utils.is_win32: def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val = '' elif not self.parent.name: val = self.name + os.sep else: val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name self.cache_abspath = val return val def is_child_of(self, node): p = self diff = self.height() - node.height() while diff > 0: diff -= 1 p = p.parent return p is node def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): dircont = self.listdir() try: lst = set(self.children.keys()) except AttributeError: self.children = self.dict_class() else: if remove: for x in lst - set(dircont): self.children[x].evict() for name in dircont: npats = accept(name, pats) if npats and npats[0]: accepted = [] in npats[0] node = self.make_node([name]) isdir = node.isdir() if accepted: if isdir: if dir: yield node elif src: yield node if isdir: node.cache_isdir = True if maxdepth: for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove, quiet=quiet): yield k def ant_glob(self, *k, **kw): src = kw.get('src', True) dir = kw.get('dir') excl = kw.get('excl', exclude_regs) incl = k and k[0] or kw.get('incl', '**') remove = kw.get('remove', True) maxdepth = kw.get('maxdepth', 25) ignorecase = kw.get('ignorecase', False) quiet = kw.get('quiet', False) pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase)) if kw.get('generator'): return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)) it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet) if kw.get('flat'): return ' '.join(x.path_from(self) for x in it) return list(it) def is_src(self): cur = self x = self.ctx.srcnode y = self.ctx.bldnode while cur.parent: if cur is y: return False if cur is x: return True cur = cur.parent return False def is_bld(self): cur = self y = self.ctx.bldnode while cur.parent: if cur is y: return True cur = cur.parent return False def get_src(self): cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: lst.reverse() return x.make_node(lst) if cur is x: return self lst.append(cur.name) cur = cur.parent return self def get_bld(self): cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: return self if cur is x: lst.reverse() return self.ctx.bldnode.make_node(lst) lst.append(cur.name) cur = cur.parent lst.reverse() if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'): lst[0] = lst[0][0] return self.ctx.bldnode.make_node(['__root__'] + lst) def find_resource(self, lst): if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if not node: node = self.get_src().find_node(lst) if node and node.isdir(): return None return node def find_or_declare(self, lst): if isinstance(lst, str) and os.path.isabs(lst): node = self.ctx.root.make_node(lst) else: node = self.get_bld().make_node(lst) node.parent.mkdir() return node def find_dir(self, lst): if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.find_node(lst) if node and not node.isdir(): return None return node def change_ext(self, ext, ext_in=None): name = self.name if ext_in is None: k = name.rfind('.') if k >= 0: name = name[:k] + ext else: name = name + ext else: name = name[:- len(ext_in)] + ext return self.parent.find_or_declare([name]) def bldpath(self): return self.path_from(self.ctx.bldnode)
ISC License
humancompatibleai/evaluating-rewards
src/evaluating_rewards/rewards/preferences.py
PreferenceComparisonTrainer._get_returns
python
def _get_returns(self): pred_rewards = tf.reshape(self.model.reward, [2, self._batch_size, -1]) returns = tf.reduce_sum(pred_rewards, axis=2) return returns
Computes the undiscounted returns of each trajectory. Returns: A Tensor of shape (2, batch_size) consisting of the sum of the rewards of each trajectory.
https://github.com/humancompatibleai/evaluating-rewards/blob/7b99ec9b415d805bd77041f2f7807d112dec9802/src/evaluating_rewards/rewards/preferences.py#L186-L200
import logging import math from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Type from imitation.data import rollout, types import numpy as np import pandas as pd from stable_baselines.common import policies, vec_env import tensorflow as tf from evaluating_rewards.rewards import base class TrajectoryPreference(NamedTuple): traja: types.Trajectory trajb: types.Trajectory label: int def _extend_placeholders(ph, name): return tf.placeholder(shape=(None, None) + ph.shape, dtype=ph.dtype, name=name) def _concatenate(preferences: List[TrajectoryPreference], attr: str, idx: slice) -> np.ndarray: traja = np.stack([getattr(p.traja, attr)[idx] for p in preferences]) trajb = np.stack([getattr(p.trajb, attr)[idx] for p in preferences]) stacked = np.stack([traja, trajb]) return stacked.reshape((-1,) + stacked.shape[3:]) def _slice_trajectory(trajectory: types.Trajectory, start: int, end: int) -> types.Trajectory: infos = trajectory.infos[start:end] if trajectory.infos is not None else None return types.Trajectory( obs=trajectory.obs[start : end + 1], acts=trajectory.acts[start:end], infos=infos, ) def generate_trajectories( venv: vec_env.VecEnv, policy: policies.BasePolicy, trajectory_length: int, num_trajectories: int ) -> Sequence[types.Trajectory]: def sample_until(episodes: Sequence[types.Trajectory]): episode_lengths = np.array([len(t.acts) for t in episodes]) num_trajs = episode_lengths // trajectory_length return np.sum(num_trajs) >= num_trajectories episodes = rollout.generate_trajectories(policy, venv, sample_until) trajectories = [] for episode in episodes: ep_len = len(episode.acts) remainder = ep_len % trajectory_length offset = 0 if remainder == 0 else np.random.randint(remainder) n_trajs = ep_len // trajectory_length for i in range(n_trajs): start = offset + i * trajectory_length end = start + trajectory_length trajectory = _slice_trajectory(episode, start, end) trajectories.append(trajectory) trajectories = trajectories[:num_trajectories] assert len(trajectories) == num_trajectories return trajectories class PreferenceComparisonTrainer: def __init__( self, model: base.RewardModel, model_params: Iterable[tf.Tensor], batch_size: int, optimizer: Type[tf.train.Optimizer] = tf.train.AdamOptimizer, optimizer_kwargs: Dict[str, Any] = None, weight_l2_reg: float = 0.0, reward_l2_reg: float = 1e-4, accuracy_threshold: float = 0.5, ): self._model = model self._model_params = model_params self._batch_size = batch_size self._reward_l2_reg = reward_l2_reg self._regularization_weight = weight_l2_reg self._accuracy_threshold = accuracy_threshold self._preference_labels = tf.placeholder(shape=(None,), dtype=tf.int32, name="preferred") train_losses = self._get_loss_ops() self._train_pure_loss = train_losses["pure_loss"] self._train_loss = train_losses["train_loss"] self._train_acc = train_losses["accuracy"] optimizer_kwargs = optimizer_kwargs or {} self._optimizer = optimizer(**optimizer_kwargs) self._train_op = self._optimizer.minimize(self._train_loss) def _get_regularizer(self): num_params = 0 for t in self._model_params: assert t.shape.is_fully_defined() num_params += np.prod(t.shape.as_list()) return sum(tf.nn.l2_loss(t) for t in self._model_params) / num_params
Apache License 2.0
jmchilton/galaxy-central
galaxy/interfaces/root.py
Universe.last_dataset_id
python
def last_dataset_id( self, trans ): trans.response.set_content_type("text/plain") history = trans.get_history() if len( history.datasets ) > 0: return history.datasets[-1].id else: return -1
Returns the largest (last) dataset id
https://github.com/jmchilton/galaxy-central/blob/31e2fd3a32b06ddfba06ae5b044efdce1d93f08c/galaxy/interfaces/root.py#L53-L64
import logging, os, sets, string, shutil import re, socket from galaxy import util, datatypes, jobs, web, util import common from cgi import escape log = logging.getLogger( __name__ ) class Universe(common.Root): pref_cookie_name = 'universe_prefs' @web.expose def generate_error( self, trans ): raise Exception( "Fake error!" ) @web.expose def default(self, trans, target1=None, target2=None, **kwd): return 'This link may not be followed from within Galaxy.' @web.expose def index(self, trans, id=None, tool_id=None, mode=None, **kwd): if mode: trans.set_cookie(name=self.pref_cookie_name, value=mode) else: mode = trans.get_cookie(name=self.pref_cookie_name) result = trans.fill_template('index_frames.tmpl', mode=mode) return [ result ] @web.expose def tool_menu( self, trans ): return trans.fill_template('tool_menu.tmpl', toolbox=self.get_toolbox() ) @web.expose def last_hid( self, trans ): trans.response.set_content_type("text/plain") history = trans.get_history() if len( history.datasets ) > 0: return len( history.datasets ) + 1 else: return -1 return str(maxhid) @web.expose
MIT License
getmetamapper/metamapper
utils/postgres/compilers.py
PostgresInsertCompiler._build_conflict_target
python
def _build_conflict_target(self): conflict_target = [] if not isinstance(self.query.conflict_target, list): raise SuspiciousOperation( ( "%s is not a valid conflict target, specify " "a list of column names, or tuples with column " "names and hstore key." ) % str(self.query.conflict_target) ) def _assert_valid_field(field_name): field_name = self._normalize_field_name(field_name) if self._get_model_field(field_name): return raise SuspiciousOperation( ( "%s is not a valid conflict target, specify " "a list of column names, or tuples with column " "names and hstore key." ) % str(field_name) ) for field_name in self.query.conflict_target: _assert_valid_field(field_name) if isinstance(field_name, tuple): conflict_target.append( "(%s->'%s')" % (self._format_field_name(field_name), field_name[1]) ) else: conflict_target.append(self._format_field_name(field_name)) return "(%s)" % ",".join(conflict_target)
Builds the `conflict_target` for the ON CONFLICT clause.
https://github.com/getmetamapper/metamapper/blob/0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f/utils/postgres/compilers.py#L135-L176
from django.core.exceptions import SuspiciousOperation from django.db.models import Model from django.db.models.fields.related import RelatedField from django.db.models.sql.compiler import SQLInsertCompiler, SQLUpdateCompiler from django.db.utils import ProgrammingError from utils.postgres.expressions import HStoreValue class PostgresUpdateCompiler(SQLUpdateCompiler): def as_sql(self): self._prepare_query_values() return super().as_sql() def _prepare_query_values(self): new_query_values = [] for field, model, val in self.query.values: if isinstance(val, dict): val = HStoreValue(val) new_query_values.append((field, model, val)) self.query.values = new_query_values class PostgresInsertCompiler(SQLInsertCompiler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.qn = self.connection.ops.quote_name def as_sql(self, return_id=False): queries = [ self._rewrite_insert(sql, params, return_id) for sql, params in super().as_sql() ] return queries def execute_sql(self, return_id=False): with self.connection.cursor() as cursor: rows = [] for sql, params in self.as_sql(return_id): cursor.execute(sql, params) try: rows.extend(cursor.fetchall()) except ProgrammingError: pass return [ { column.name: row[column_index] for column_index, column in enumerate(cursor.description) if row } for row in rows ] def _rewrite_insert(self, sql, params, return_id=False): returning = ( self.qn(self.query.model._meta.pk.attname) if return_id else "*" ) return self._rewrite_insert_on_conflict( sql, params, self.query.conflict_action.value, returning ) def _rewrite_insert_on_conflict(self, sql, params, conflict_action, returning): update_columns = ", ".join( [ "{0} = EXCLUDED.{0}".format(self.qn(field.column)) for field in self.query.update_fields ] ) conflict_target = self._build_conflict_target() index_predicate = self.query.index_predicate sql_template = ( "{insert} ON CONFLICT {conflict_target} DO {conflict_action}" ) if index_predicate: sql_template = "{insert} ON CONFLICT {conflict_target} WHERE {index_predicate} DO {conflict_action}" if conflict_action == "UPDATE": sql_template += " SET {update_columns}" sql_template += " RETURNING {returning}" return ( sql_template.format( insert=sql, conflict_target=conflict_target, conflict_action=conflict_action, update_columns=update_columns, returning=returning, index_predicate=index_predicate, ), params, )
BSD 2-Clause Simplified License
watson-developer-cloud/python-sdk
ibm_watson/text_to_speech_v1.py
TextToSpeechV1.__init__
python
def __init__( self, authenticator: Authenticator = None, service_name: str = DEFAULT_SERVICE_NAME, ) -> None: if not authenticator: authenticator = get_authenticator_from_environment(service_name) BaseService.__init__(self, service_url=self.DEFAULT_SERVICE_URL, authenticator=authenticator) self.configure_service(service_name)
Construct a new client for the Text to Speech service. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md about initializing the authenticator of your choice.
https://github.com/watson-developer-cloud/python-sdk/blob/f9a32d46d5ae31d1e43c9530e829248a3b9e0219/ibm_watson/text_to_speech_v1.py#L67-L84
from enum import Enum from typing import BinaryIO, Dict, List import json from ibm_cloud_sdk_core import BaseService, DetailedResponse from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment from ibm_cloud_sdk_core.utils import convert_model from .common import get_sdk_headers class TextToSpeechV1(BaseService): DEFAULT_SERVICE_URL = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com' DEFAULT_SERVICE_NAME = 'text_to_speech'
Apache License 2.0
bungnoid/gltools
tools/spaces.py
Spaces.add
python
def add(self,ctrl,targetList=[],abrTargetList=[],nameTag=''): if not len(abrTargetList): abrTargetList = targetList spacesNode = '' try: spacesNode = self.getSpacesNode(ctrl) except: result = self.create(ctrl,targetList,abrTargetList,nameTag) return result spacesNodeConstraint = self.getSpacesConstraint(ctrl) for target in targetList: if not mc.objExists(target): raise UserInputError('Target object '+target+' does not exists!') targetListSize = len(mc.parentConstraint(spacesNodeConstraint,q=True,tl=True)) for i in range(len(targetList)): mc.parentConstraint(targetList[i],spacesNodeConstraint,mo=True,w=0.0) for ch in self.channels: mc.setAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].to'+ch,l=False,k=True) translateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].targetOffsetTranslate')[0] rotateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(targetListSize)+'].targetOffsetRotate')[0] mc.setAttr(spacesNode+'.defaultOffset',l=False) mc.setAttr(spacesNode+'.defaultOffset['+str(targetListSize)+'].dot',translateOffset[0],translateOffset[1],translateOffset[2]) mc.setAttr(spacesNode+'.defaultOffset['+str(targetListSize)+'].dor',rotateOffset[0],rotateOffset[1],rotateOffset[2]) mc.setAttr(spacesNode+'.defaultOffset',l=True) mc.connectAttr(spacesNode+'.tot', spacesNodeConstraint+'.target['+str(targetListSize)+'].tot',f=True) mc.connectAttr(spacesNode+'.tor', spacesNodeConstraint+'.target['+str(targetListSize)+'].tor',f=True) targetListSize += 1 weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True) for i in range(len(weightAliasList)): if not mc.objExists(spacesNode+'.'+weightAliasList[i]): mc.addAttr(spacesNode,ln=weightAliasList[i],k=True,min=0.0,max=1.0,dv=0.0) mc.connectAttr(spacesNode+'.'+weightAliasList[i],spacesNodeConstraint+'.'+weightAliasList[i],f=True) enumString = mc.addAttr(spacesNode +'.spaces',q=True,en=True) + ':' for abr in abrTargetList: enumString += abr+':' mc.addAttr(spacesNode+'.spaces',e=True,en=enumString) return [spacesNode,spacesNodeConstraint]
add to an existing spaces node @param targetList: list of target transforms for the space node constraint @type targetList: list @param abrTargetList: list of abreviated target names. Used in UI. @type abrTargetList: list @param ctrl: Control to be parented to spaces node @type ctrl: str @param nameTag: Shortened, descriptive name for control. Used in UI. @type nameTag: str
https://github.com/bungnoid/gltools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/tools/spaces.py#L171-L242
import maya.cmds as mc class UserInputError(Exception): pass class Spaces(object): def __init__(self): self.allChannels = ['t','tx','ty','tz','r','rx','ry','rz','s','sx','sy','sz'] self.channels = self.allChannels[0:8] self.transform = ['transform','joint'] self.worldNode = 'spaces_wld01_loc' self.managerUI = 'spacesUI' self.uiRCL = 'spacesRCL' self.uiKeyCBG = 'spacesKeyCBG' self.uiKeyPreviousCBG = 'spacesKeyPrevCBG' self.uiMaintainPosCBG = 'spacesMaintainPosCBG' self.uiAllOMG = 'spacesAllOMG' def create(self,ctrl,targetList=[],abrTargetList=[],nameTag='',worldParent=''): if not len(abrTargetList): abrTargetList = targetList for target in targetList: if not mc.objExists(target): raise UserInputError('Target object '+target+' does not exists!') par = '' try: par = self.getSpacesNode(ctrl) except: par = mc.listRelatives(ctrl,p=True) if par == None: par = mc.group(ctrl,n=ctrl+'_buf') else: par = par[0] else: result = self.add(ctrl,targetList,abrTargetList,nameTag) return result if not mc.objExists(self.worldNode): self.worldNode = mc.createNode('transform',n=self.worldNode) if len(worldParent): if mc.objExists(worldParent): mc.parent(self.worldNode,worldParent) else: if len(worldParent): currentWorldParent = mc.listRelatives(self.worldNode,p=1)[0] print('Spaces WORLD node already exists and is parented to '+currentWorldParent+'!!') targetList.insert(0,self.worldNode) targetList.insert(0,par) abrTargetList.insert(0,'SuperMover') abrTargetList.insert(0,'Default') spacesNode = mc.duplicate(par,rr=1,rc=1,n=ctrl+'_spn')[0] mc.delete(mc.listRelatives(spacesNode,ad=1)) for ch in self.allChannels: mc.setAttr(spacesNode+'.'+ch,l=False,k=False) mc.parent(spacesNode,par) mc.parent(ctrl,spacesNode) mc.addAttr(spacesNode,ln='targetOffsetTranslate',sn='tot',at='double3') mc.addAttr(spacesNode,ln='targetOffsetTranslateX',sn='totx',at='double',p='targetOffsetTranslate') mc.addAttr(spacesNode,ln='targetOffsetTranslateY',sn='toty',at='double',p='targetOffsetTranslate') mc.addAttr(spacesNode,ln='targetOffsetTranslateZ',sn='totz',at='double',p='targetOffsetTranslate') mc.addAttr(spacesNode,ln='targetOffsetRotate',sn='tor',at='double3') mc.addAttr(spacesNode,ln='targetOffsetRotateX',sn='torx',at='double',p='targetOffsetRotate') mc.addAttr(spacesNode,ln='targetOffsetRotateY',sn='tory',at='double',p='targetOffsetRotate') mc.addAttr(spacesNode,ln='targetOffsetRotateZ',sn='torz',at='double',p='targetOffsetRotate') for ch in self.channels: mc.setAttr(spacesNode+'.to'+ch,k=True) mc.addAttr(spacesNode,ln='defaultOffset',at='compound',numberOfChildren=2,m=True) mc.addAttr(spacesNode,ln='defaultOffsetTranslate',sn='dot',at='double3',p='defaultOffset') mc.addAttr(spacesNode,ln='defaultOffsetTranslateX',sn='dotx',at='double',p='defaultOffsetTranslate') mc.addAttr(spacesNode,ln='defaultOffsetTranslateY',sn='doty',at='double',p='defaultOffsetTranslate') mc.addAttr(spacesNode,ln='defaultOffsetTranslateZ',sn='dotz',at='double',p='defaultOffsetTranslate') mc.addAttr(spacesNode,ln='defaultOffsetRotate',sn='dor',at='double3',p='defaultOffset') mc.addAttr(spacesNode,ln='defaultOffsetRotateX',sn='dorx',at='doubleAngle',p='defaultOffsetRotate') mc.addAttr(spacesNode,ln='defaultOffsetRotateY',sn='dory',at='doubleAngle',p='defaultOffsetRotate') mc.addAttr(spacesNode,ln='defaultOffsetRotateZ',sn='dorz',at='doubleAngle',p='defaultOffsetRotate') enumString = '' for abr in abrTargetList: enumString += abr+':' if not mc.objExists(spacesNode+'.spaces'): mc.addAttr(spacesNode,ln='spaces',at='enum',en=enumString) mc.setAttr(spacesNode+'.spaces',k=1) else: mc.addAttr(spacesNode+'.spaces',e=1,en=enumString) if not len(nameTag): nameTag = ctrl if not mc.objExists(spacesNode+'.nameTag'): mc.addAttr(spacesNode,ln='nameTag',dt='string') mc.setAttr(spacesNode+'.nameTag',nameTag,type='string') spacesNodeConstraint = '' for i in range(len(targetList)): if not i: spacesNodeConstraint = mc.parentConstraint(targetList[i],spacesNode,n=ctrl+'_pcn',w=0.0)[0] else: mc.parentConstraint(targetList[i],spacesNode,mo=True,w=0.0) for ch in self.channels: mc.setAttr(spacesNodeConstraint+'.target['+str(i)+'].to'+ch,l=False,k=True) translateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(i)+'].targetOffsetTranslate')[0] rotateOffset = mc.getAttr(spacesNodeConstraint+'.target['+str(i)+'].targetOffsetRotate')[0] mc.setAttr(spacesNode+'.defaultOffset',l=False) mc.setAttr(spacesNode+'.defaultOffset['+str(i)+'].dot',translateOffset[0],translateOffset[1],translateOffset[2]) mc.setAttr(spacesNode+'.defaultOffset['+str(i)+'].dor',rotateOffset[0],rotateOffset[1],rotateOffset[2]) mc.setAttr(spacesNode+'.defaultOffset',l=True) weightAliasList = mc.parentConstraint(spacesNodeConstraint,q=True,weightAliasList=True) for i in range(len(targetList)): mc.addAttr(spacesNode,ln=weightAliasList[i],min=0.0,max=1.0,dv=0.0) mc.setAttr(spacesNode+'.'+weightAliasList[i],l=False,k=True) mc.connectAttr(spacesNode+'.'+weightAliasList[i], spacesNodeConstraint+'.'+weightAliasList[i], f=True) translateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(i)+'].dot')[0] rotateOffset = mc.getAttr(spacesNode+'.defaultOffset['+str(i)+'].dor')[0] mc.setAttr(spacesNode+'.tot',translateOffset[0],translateOffset[1],translateOffset[2],l=False) mc.setAttr(spacesNode+'.tor',rotateOffset[0],rotateOffset[1],rotateOffset[2],l=False) mc.connectAttr(spacesNode+'.tot', spacesNodeConstraint+'.target['+str(i)+'].tot',f=True) mc.connectAttr(spacesNode+'.tor', spacesNodeConstraint+'.target['+str(i)+'].tor',f=True) self.switch(ctrl,'Default',0) return [spacesNode,spacesNodeConstraint]
MIT License
bluesky/bluesky
bluesky/tests/test_plans.py
_retrieve_motor_positions
python
def _retrieve_motor_positions(doc_collector, motors): motor_names = [_.name for _ in motors] desc = next(iter(doc_collector.event.keys())) event_list = doc_collector.event[desc] positions = {k: [] for k in motor_names} for event in event_list: for name in positions.keys(): positions[name].append(event["data"][name]) return positions
Retrieves the motor positions for the completed run. Parameters ---------- `doc_collector`: DocCollector DocCollector object that contains data from a single run `motors`: list the list of motors for which positions should be collected. Returns ------- the dictionary: {'motor_name_1': list of positions, 'motor_name_2': list of positions, ...}
https://github.com/bluesky/bluesky/blob/759f9c55dce97dc47513cca749a69dd861bdf58d/bluesky/tests/test_plans.py#L393-L422
from distutils.version import LooseVersion import pytest import inspect from bluesky.tests.utils import DocCollector import bluesky.plans as bp import bluesky.plan_stubs as bps import numpy as np import numpy.testing as npt import pandas as pd import random import re import collections from bluesky.tests.utils import MsgCollector from bluesky.plan_patterns import chunk_outer_product_args, outer_product def _validate_start(start, expected_values): plan_md_key = [ 'plan_pattern_module', 'plan_pattern_args', 'plan_type', 'plan_pattern', 'plan_name', 'num_points', 'plan_args', 'detectors'] for k in plan_md_key: assert k in start for k, v in expected_values.items(): assert start[k] == v def test_plan_header(RE, hw): args = [] args.append((bp.grid_scan([hw.det], hw.motor, 1, 2, 3, hw.motor1, 4, 5, 6, hw.motor2, 7, 8, 9, snake_axes=True), {'motors': ('motor', 'motor1', 'motor2'), 'extents': ([1, 2], [4, 5], [7, 8]), 'shape': (3, 6, 9), 'snaking': (False, True, True), 'plan_pattern_module': 'bluesky.plan_patterns', 'plan_pattern': 'outer_product', 'plan_name': 'grid_scan'})) args.append((bp.inner_product_scan([hw.det], 9, hw.motor, 1, 2, hw.motor1, 4, 5, hw.motor2, 7, 8), {'motors': ('motor', 'motor1', 'motor2')})) for plan, target in args: c = DocCollector() RE(plan, c.insert) for s in c.start: _validate_start(s, target) def test_ops_dimension_hints(RE, hw): det = hw.det motor = hw.motor motor1 = hw.motor1 c = DocCollector() RE.subscribe(c.insert) RE(bp.grid_scan([det], motor, -1, 1, 7, motor1, 0, 2, 3)) st = c.start[0] assert 'dimensions' in st['hints'] assert st['hints']['dimensions'] == [ (m.hints['fields'], 'primary') for m in (motor, motor1)] def test_mesh_pseudo(hw, RE): p3x3 = hw.pseudo3x3 sig = hw.sig d = DocCollector() RE.subscribe(d.insert) rs = RE(bp.grid_scan([sig], p3x3.pseudo1, 0, 3, 5, p3x3.pseudo2, 7, 10, 7)) if RE.call_returns_result: uid = rs.run_start_uids[0] else: uid = rs[0] df = pd.DataFrame([_['data'] for _ in d.event[d.descriptor[uid][0]['uid']]]) for k in p3x3.describe(): assert k in df for k in sig.describe(): assert k in df assert all(df[sig.name] == 0) assert all(df[p3x3.pseudo3.name] == 0) def test_rmesh_pseudo(hw, RE): p3x3 = hw.pseudo3x3 p3x3.set(1, -2, 100) init_pos = p3x3.position sig = hw.sig d = DocCollector() RE.subscribe(d.insert) rs = RE(bp.rel_grid_scan([sig], p3x3.pseudo1, 0, 3, 5, p3x3.pseudo2, 7, 10, 7)) if RE.call_returns_result: uid = rs.run_start_uids[0] else: uid = rs[0] df = pd.DataFrame([_['data'] for _ in d.event[d.descriptor[uid][0]['uid']]]) for k in p3x3.describe(): assert k in df for k in sig.describe(): assert k in df assert all(df[sig.name] == 0) assert all(df[p3x3.pseudo3.name] == 100) assert len(df) == 35 assert min(df[p3x3.pseudo1.name]) == 1 assert init_pos == p3x3.position def test_relative_pseudo(hw, RE, db): RE.subscribe(db.insert) p = hw.pseudo3x3 p.set(1, 1, 1) base_pos = p.position rs = RE(bp.relative_inner_product_scan([p], 5, p.pseudo1, -1, 1, p.pseudo2, -2, -1)) if RE.call_returns_result: uid = rs.run_start_uids[0] else: uid = rs[0] tb1 = db[uid].table().drop('time', 1) assert p.position == base_pos rs = RE(bp.relative_inner_product_scan([p], 5, p.real1, 1, -1, p.real2, 2, 1)) if RE.call_returns_result: uid = rs.run_start_uids[0] else: uid = rs[0] tb2 = db[uid].table().drop('time', 1) assert p.position == base_pos assert set(tb1) == set(tb2) assert len(tb1) == len(tb2) def get_hint(c): h = c.hints['fields'] return h[0] if h else c.name for c in list(p.pseudo_positioners) + list(p.real_positioners): col = get_hint(c) print(col) assert (tb1[col] == tb2[col]).all() assert (tb1[get_hint(p.pseudo1)] == np.linspace(0, 2, 5)).all() def test_reset_wrapper(hw, RE): p = hw.pseudo3x3 m_col = MsgCollector() RE.msg_hook = m_col RE(bp.relative_inner_product_scan([], 1, p.pseudo1, 0, 1, p.pseudo2, 0, 1)) expecte_objs = [p, None, None, p, None, p, None, None, p, None, None, p, p, None] assert len(m_col.msgs) == 14 assert [m.obj for m in m_col.msgs] == expecte_objs @pytest.mark.parametrize('pln', [bps.mv, bps.mvr]) def test_pseudo_mv(hw, RE, pln): p = hw.pseudo3x3 m_col = MsgCollector() RE.msg_hook = m_col RE(pln(p.pseudo1, 1, p.pseudo2, 1)) expecte_objs = [p, None] assert len(m_col.msgs) == 2 assert [m.obj for m in m_col.msgs] == expecte_objs def _good_per_step_factory(): def per_step_old(detectors, motor, step): yield from bps.null() def per_step_extra(detectors, motor, step, some_kwarg=None): yield from bps.null() def per_step_exact(detectors, motor, step, take_reading=None): yield from bps.null() def per_step_kwargs(detectors, motor, step, **kwargs): yield from bps.null() def per_nd_step(detectors, post_cache, *args, **kwargs): yield from bps.null() return pytest.mark.parametrize( "per_step", [per_step_old, per_step_extra, per_step_exact, per_step_kwargs], ids=["no kwargs", "extra kwargs", "exact signature", "with kwargs"], ) @_good_per_step_factory() def test_good_per_step_signature(hw, per_step): list(bp.scan([hw.det], hw.motor, -1, 1, 5, per_step=per_step)) def _bad_per_step_factory(): def too_few(detectors, motor): def too_many(detectors, motor, step, bob): def extra_required_kwarg(detectors, motor, step, *, some_kwarg): def wrong_names(a, b, c, take_reading=None): def per_step_only_args(*args): def per_nd_step_extra(detectors, step, pos_cache, extra_no_dflt): def per_nd_step_bad_pos(detectors, step, pos_cache, *, extra_no_dflt): def all_wrong(a, b, c=None, *args, d=None, g, **kwargs): return pytest.mark.parametrize( "per_step", [too_few, too_many, extra_required_kwarg, wrong_names, per_step_only_args], ids=["too few", "too many", "required kwarg", "bad name", "args only"], ) @_bad_per_step_factory() def test_bad_per_step_signature(hw, per_step): sig = inspect.signature(per_step) print(f'*** test bad_per_step {sig} ***\n') with pytest.raises( TypeError, match=re.escape( "per_step must be a callable with the signature \n " "<Signature (detectors, step, pos_cache)> or " "<Signature (detectors, motor, step)>. \n" "per_step signature received: {}".format(sig) ), ): list(bp.scan([hw.det], hw.motor, -1, 1, 5, per_step=per_step)) def require_ophyd_1_4_0(): ophyd = pytest.importorskip("ophyd") if LooseVersion(ophyd.__version__) < LooseVersion('1.4.0'): pytest.skip("Needs ophyd 1.4.0 for realistic ophyd.sim Devices.") @pytest.mark.parametrize("val", [0, None, "aardvark"]) def test_rd_dflt(val): ophyd = pytest.importorskip("ophyd") require_ophyd_1_4_0() sig = ophyd.Signal(value="0", name="sig") def tester(obj, dflt): ret = yield from bps.rd(obj, default_value=dflt) assert ret is dflt list(tester(sig, val)) @pytest.mark.parametrize("val", [0, None, "aardvark"]) def test_rd(RE, val): ophyd = pytest.importorskip("ophyd") require_ophyd_1_4_0() sig = ophyd.Signal(value="0", name="sig") def tester(obj, val): yield from bps.mv(sig, val) ret = yield from bps.rd(obj, default_value=object()) assert ret == val RE(tester(sig, val)) def test_rd_fails(hw): require_ophyd_1_4_0() obj = hw.det obj.noise.kind = "hinted" hints = obj.hints.get("fields", []) msg = re.escape( f"Your object {obj} ({obj.name}.{obj.dotted_name}) " + f"has {len(hints)} items hinted ({hints}). We " ) with pytest.raises(ValueError, match=msg): list(bps.rd(obj)) obj.noise.kind = "normal" obj.val.kind = "normal" msg = re.escape( f"Your object {obj} ({obj.name}.{obj.dotted_name}) " + f"and has {len(obj.read_attrs)} read attrs. We " ) with pytest.raises(ValueError, match=msg): list(bps.rd(obj)) obj.read_attrs = [] msg = re.escape( f"Your object {obj} ({obj.name}.{obj.dotted_name}) " + f"and has {len(obj.read_attrs)} read attrs. We " ) with pytest.raises(ValueError, match=msg): list(bps.rd(obj)) @pytest.mark.parametrize("kind", ["hinted", "normal"]) def test_rd_device(hw, RE, kind): require_ophyd_1_4_0() called = False hw.det.val.kind = kind def tester(obj): nonlocal called direct_read = yield from bps.read(obj) rd_read = yield from bps.rd(obj) sig_read = yield from bps.rd(obj.val) assert rd_read == direct_read["det"]["value"] assert sig_read == rd_read called = True RE(tester(hw.det)) assert called
BSD 3-Clause New or Revised License
poliastro/poliastro
src/poliastro/earth/atmosphere/coesa76.py
COESA76.pressure
python
def pressure(self, alt, geometric=True): z, h = self._check_altitude(alt, r0, geometric=geometric) i = self._get_index(z, self.zb_levels) Tb = self.Tb_levels[i] Lb = self.Lb_levels[i] hb = self.hb_levels[i] pb = self.pb_levels[i] if z < 86 * u.km: if Lb == 0.0 * u.K / u.km: p = pb * np.exp(-alpha * (h - hb) / Tb) else: T = self.temperature(z) p = pb * (Tb / T) ** (alpha / Lb) else: A, B, C, D, E = self._get_coefficients_avobe_86(z, p_coeff) z = z.to(u.km).value p = np.exp(A * z ** 4 + B * z ** 3 + C * z ** 2 + D * z + E) * u.Pa return p.to(u.Pa)
Solves pressure at given altitude. Parameters ---------- alt: ~astropy.units.Quantity Geometric/Geopotential altitude. geometric: bool If `True`, assumes geometric altitude kind. Returns ------- p: ~astropy.units.Quantity Pressure at given altitude.
https://github.com/poliastro/poliastro/blob/d0a13af27e5971e3435c9a762942041201ee13a6/src/poliastro/earth/atmosphere/coesa76.py#L185-L229
import numpy as np from astropy import units as u from astropy.io import ascii from astropy.utils.data import get_pkg_data_filename from poliastro.earth.atmosphere.base import COESA R = 8314.32 * u.J / u.kmol / u.K R_air = 287.053 * u.J / u.kg / u.K k = 1.380622e-23 * u.J / u.K Na = 6.022169e-26 / u.kmol g0 = 9.80665 * u.m / u.s ** 2 r0 = 6356.766 * u.km M0 = 28.9644 * u.kg / u.kmol P0 = 101325 * u.Pa T0 = 288.15 * u.K Tinf = 1000 * u.K gamma = 1.4 alpha = 34.1632 * u.K / u.km beta = 1.458e-6 * (u.kg / u.s / u.m / (u.K) ** 0.5) S = 110.4 * u.K coesa76_data = ascii.read(get_pkg_data_filename("data/coesa76.dat")) b_levels = coesa76_data["b"].data zb_levels = coesa76_data["Zb [km]"].data * u.km hb_levels = coesa76_data["Hb [km]"].data * u.km Tb_levels = coesa76_data["Tb [K]"].data * u.K Lb_levels = coesa76_data["Lb [K/km]"].data * u.K / u.km pb_levels = coesa76_data["pb [mbar]"].data * u.mbar p_data = ascii.read(get_pkg_data_filename("data/coesa76_p.dat")) rho_data = ascii.read(get_pkg_data_filename("data/coesa76_rho.dat")) z_coeff = p_data["z [km]"].data * u.km p_coeff = [ p_data["A"].data, p_data["B"].data, p_data["C"].data, p_data["D"].data, p_data["E"].data, ] rho_coeff = [ rho_data["A"].data, rho_data["B"].data, rho_data["C"].data, rho_data["D"].data, rho_data["E"].data, ] class COESA76(COESA): def __init__(self): super().__init__( b_levels, zb_levels, hb_levels, Tb_levels, Lb_levels, pb_levels ) def _get_coefficients_avobe_86(self, z, table_coeff): i = self._get_index(z, z_coeff) coeff_list = [] for X_set in table_coeff: coeff_list.append(X_set[i]) return coeff_list def temperature(self, alt, geometric=True): z, h = self._check_altitude(alt, r0, geometric=geometric) i = self._get_index(z, self.zb_levels) Tb = self.Tb_levels[i] Lb = self.Lb_levels[i] hb = self.hb_levels[i] if z < self.zb_levels[7]: Tm = Tb + Lb * (h - hb) T = Tm elif self.zb_levels[7] <= z and z < self.zb_levels[8]: T = 186.87 * u.K elif self.zb_levels[8] <= z and z < self.zb_levels[9]: Tc = 263.1905 * u.K A = -76.3232 * u.K a = -19.9429 * u.km T = Tc + A * (1 - ((z - self.zb_levels[8]) / a) ** 2) ** 0.5 elif self.zb_levels[9] <= z and z < self.zb_levels[10]: T = 240 * u.K + Lb * (z - self.zb_levels[9]) else: T10 = 360.0 * u.K _gamma = self.Lb_levels[9] / (Tinf - T10) epsilon = (z - self.zb_levels[10]) * (r0 + self.zb_levels[10]) / (r0 + z) T = Tinf - (Tinf - T10) * np.exp(-_gamma * epsilon) return T.to(u.K)
MIT License
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_sidecar.py
V1beta1Sidecar.stdin
python
def stdin(self, stdin): self._stdin = stdin
Sets the stdin of this V1beta1Sidecar. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501 :param stdin: The stdin of this V1beta1Sidecar. # noqa: E501 :type: bool
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_sidecar.py#L530-L539
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1Sidecar(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'script': 'str', 'security_context': 'V1SecurityContext', 'startup_probe': 'V1Probe', 'stdin': 'bool', 'stdin_once': 'bool', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str', 'workspaces': 'list[V1beta1WorkspaceUsage]' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'script': 'script', 'security_context': 'securityContext', 'startup_probe': 'startupProbe', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir', 'workspaces': 'workspaces' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name='', ports=None, readiness_probe=None, resources=None, script=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, workspaces=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._readiness_probe = None self._resources = None self._script = None self._security_context = None self._startup_probe = None self._stdin = None self._stdin_once = None self._termination_message_path = None self._termination_message_policy = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self._workspaces = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe self.name = name if ports is not None: self.ports = ports if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if script is not None: self.script = script if security_context is not None: self.security_context = security_context if startup_probe is not None: self.startup_probe = startup_probe if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir if workspaces is not None: self.workspaces = workspaces @property def args(self): return self._args @args.setter def args(self, args): self._args = args @property def command(self): return self._command @command.setter def command(self, command): self._command = command @property def env(self): return self._env @env.setter def env(self, env): self._env = env @property def env_from(self): return self._env_from @env_from.setter def env_from(self, env_from): self._env_from = env_from @property def image(self): return self._image @image.setter def image(self, image): self._image = image @property def image_pull_policy(self): return self._image_pull_policy @image_pull_policy.setter def image_pull_policy(self, image_pull_policy): self._image_pull_policy = image_pull_policy @property def lifecycle(self): return self._lifecycle @lifecycle.setter def lifecycle(self, lifecycle): self._lifecycle = lifecycle @property def liveness_probe(self): return self._liveness_probe @liveness_probe.setter def liveness_probe(self, liveness_probe): self._liveness_probe = liveness_probe @property def name(self): return self._name @name.setter def name(self, name): if self.local_vars_configuration.client_side_validation and name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def ports(self): return self._ports @ports.setter def ports(self, ports): self._ports = ports @property def readiness_probe(self): return self._readiness_probe @readiness_probe.setter def readiness_probe(self, readiness_probe): self._readiness_probe = readiness_probe @property def resources(self): return self._resources @resources.setter def resources(self, resources): self._resources = resources @property def script(self): return self._script @script.setter def script(self, script): self._script = script @property def security_context(self): return self._security_context @security_context.setter def security_context(self, security_context): self._security_context = security_context @property def startup_probe(self): return self._startup_probe @startup_probe.setter def startup_probe(self, startup_probe): self._startup_probe = startup_probe @property def stdin(self): return self._stdin @stdin.setter
Apache License 2.0
orange-opensource/pydcop
pydcop/dcop/objects.py
create_agents
python
def create_agents( name_prefix: str, indexes: Union[Iterable, Tuple[Iterable]], default_route: float = 1, routes: Dict[str, float] = None, default_hosting_costs: float = 0, hosting_costs: Dict[str, float] = None, separator: str = "_", **kwargs: Union[str, int, float], ) -> Dict[Union[str, Tuple[str, ...]], AgentDef]: agents = {} if isinstance(indexes, tuple): for combi in itertools.product(*indexes): name = name_prefix + separator.join(combi) agents[tuple(combi)] = AgentDef( name, default_route=default_route, routes=routes, default_hosting_costs=default_hosting_costs, hosting_costs=hosting_costs, **kwargs, ) elif isinstance(indexes, range): digit_count = len(str(indexes.stop - 1)) for i in indexes: name = f"{name_prefix}{i:0{digit_count}d}" agents[name] = AgentDef( name, default_route=default_route, routes=routes, default_hosting_costs=default_hosting_costs, hosting_costs=hosting_costs, **kwargs, ) elif hasattr(indexes, "__iter__"): for i in indexes: name = name_prefix + str(i) agents[name] = AgentDef( name, default_route=default_route, routes=routes, default_hosting_costs=default_hosting_costs, hosting_costs=hosting_costs, **kwargs, ) else: raise TypeError("indexes must be an iterable or a tuple of iterables") return agents
Mass creation of agents definitions. Parameters ---------- name_prefix: str Used as prefix when naming the agents. indexes: non-tuple iterable of indexes or tuple of iterable of indexes If it not a tuple, an AgentDef is be created for each of the index. If it is a tuple of iterable, an AgentDef is created for every possible combinations of values from `indexes`. default_route: float The default cost of a route when not specified in routes. routes: dictionary of agents name, as string, to float Attribute a specific route cost between this agent and the agents whose names are used as key in the dictionary default_hosting_costs The default hosting for a computation when not specified in hosting_costs. hosting_costs: dictionary of computation name, as string, to float Attribute a specific cost for hosting the computations whose names are used as key in the dictionary. separator: str kwargs: dictionary Returns ------- dict A dictionary ( index -> AgentDef) where index is a string or a tuple of string. See Also -------- create_variables Examples -------- When passing an iterable of indexes: >>> agts = create_agents('a', ['1', '2', '3'], ... default_route=2, default_hosting_costs=7) >>> assert isinstance(agts['a2'], AgentDef) When passing a range: >>> agts = create_agents('a', range(20), ... default_route=2, default_hosting_costs=7) >>> assert isinstance(agts['a08'], AgentDef)
https://github.com/orange-opensource/pydcop/blob/a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed/pydcop/dcop/objects.py#L879-L975
import random from numbers import Real from typing import Callable, Sized from typing import Iterable, Any, Dict, Union, Tuple import itertools from typing import List from pydcop.utils.expressionfunction import ExpressionFunction from pydcop.utils.simple_repr import SimpleRepr, SimpleReprException VariableName = str class Domain(Sized, SimpleRepr, Iterable[Any]): def __init__(self, name: str, domain_type: str, values: Iterable) -> None: self._name = name self._domain_type = domain_type self._values = tuple(values) @property def type(self) -> str: return self._domain_type @property def name(self) -> str: return self._name @property def values(self) -> Iterable: return self._values def __iter__(self): return self._values.__iter__() def __getitem__(self, index): return self._values[index] def __len__(self): return len(self._values) def __contains__(self, v): return v in self._values def __eq__(self, o: object) -> bool: if not isinstance(o, Domain): return False if self.name == o.name and self.values == o.values and self.type == o.type: return True return False def __str__(self): return "VariableDomain({})".format(self.name) def __repr__(self): return "VariableDomain({}, {}, {})".format(self.name, self.type, self.values) def __hash__(self): return hash((self._name, self._domain_type, self._values)) def index(self, val): for i, v in enumerate(self._values): if val == v: return i raise ValueError(str(val) + " is not in the domain " + self._name) def to_domain_value(self, val: str): for i, v in enumerate(self._values): if str(v) == val: return i, v raise ValueError(str(val) + " is not in the domain " + self._name) VariableDomain = Domain binary_domain = Domain("binary", "binary", [0, 1]) class Variable(SimpleRepr): has_cost = False def __init__( self, name: str, domain: Union[Domain, Iterable[Any]], initial_value=None ) -> None: self._name = name if not hasattr(domain, "__iter__") and not isinstance(domain, VariableDomain): raise ValueError( "Invalid domain, must be an iterable or " "VariableDomain " ) if not isinstance(domain, Domain): domain = Domain("d_" + name, "unkown", domain) self._domain = domain if initial_value is not None and initial_value not in self.domain.values: raise ValueError( "Invalid initial value {}, not in domain values" " {}".format(initial_value, self.domain.values) ) self._initial_value = initial_value @property def name(self) -> str: return self._name @property def domain(self) -> Domain: return self._domain @property def initial_value(self): return self._initial_value def cost_for_val(self, val) -> float: return 0 def __str__(self): return "Variable({})".format(self.name) def __repr__(self): return "Variable({}, {}, {})".format(self.name, self.initial_value, self.domain) def __eq__(self, other): if type(self) != type(other): return False if ( self.name == other.name and self.initial_value == other.initial_value and self.domain == other.domain ): return True return False def __hash__(self): return hash((self._name, self._domain, self._initial_value)) def clone(self): return Variable(self.name, self.domain, initial_value=self.initial_value) def create_variables( name_prefix: str, indexes: Union[str, Tuple, Iterable], domain: Domain, separator: str = "_", ) -> Dict[Union[str, Tuple[str, ...]], Variable]: variables = {} if isinstance(indexes, tuple): for combi in itertools.product(*indexes): name = name_prefix + separator.join(combi) variables[tuple(combi)] = Variable(name, domain) elif isinstance(indexes, range): digit_count = len(str(indexes.stop - 1)) for i in indexes: name = f"{name_prefix}{i:0{digit_count}d}" variables[name] = Variable(name, domain) elif hasattr(indexes, "__iter__"): for i in indexes: name = name_prefix + str(i) variables[name] = Variable(name, domain) else: raise TypeError("indexes must be an iterable or a tuple of iterables") return variables class BinaryVariable(Variable): def __init__(self, name: str, initial_value=0) -> None: super().__init__(name, binary_domain, initial_value) def __str__(self): return "BinaryVariable({})".format(self.name) def __repr__(self): return "BinaryVariable({}, {})".format(self.name, self.initial_value) def clone(self): return BinaryVariable(self.name, initial_value=self.initial_value) def create_binary_variables( name_prefix: str, indexes, separator: str = "_" ) -> Dict[Union[str, Tuple], BinaryVariable]: variables = {} if isinstance(indexes, tuple): for combi in itertools.product(*indexes): name = name_prefix + separator.join(combi) variables[tuple(combi)] = BinaryVariable(name) elif hasattr(indexes, "__iter__"): for i in indexes: name = name_prefix + str(i) variables[name] = BinaryVariable(name) else: raise TypeError("indexes must be an iterable or a tuple of iterables") return variables class VariableWithCostDict(Variable): has_cost = True def __init__( self, name: str, domain: Union[VariableDomain, Iterable[Any]], costs: Dict[Any, float], initial_value=None, ) -> None: super().__init__(name, domain, initial_value) self._costs = costs def cost_for_val(self, val) -> float: try: return self._costs[val] except KeyError: return 0.0 def __str__(self): return "VariableWithCostDict({})".format(self.name) def __repr__(self): return "VariableWithCostDict" "({}, {}, {}, {})".format( self.name, self.initial_value, self.domain, self._costs ) def __eq__(self, other): if type(self) != type(other): return False if ( self.name == other.name and self.initial_value == other.initial_value and self.domain == other.domain and self._costs == other._costs ): return True return False def __hash__(self): return super().__hash__() ^ hash(tuple(self._costs.values())) def clone(self): return VariableWithCostDict( self.name, self.domain, self._costs, initial_value=self.initial_value ) class VariableWithCostFunc(Variable): has_cost = True def __init__( self, name: str, domain: Union[VariableDomain, Iterable[Any]], cost_func: Union[Callable[..., float], ExpressionFunction], initial_value: Any = None, ) -> None: super().__init__(name, domain, initial_value) if hasattr(cost_func, "variable_names"): if ( len(cost_func.variable_names) != 1 or name not in cost_func.variable_names ): raise ValueError( "Cost function for var {} must have a single " "variable, which must be the same as " 'the variable : "{} != {}'.format( name, name, cost_func.variable_names ) ) self._cost_func = cost_func def cost_for_val(self, val) -> float: if hasattr(self._cost_func, "variable_names"): return self._cost_func(**{self.name: val}) else: return self._cost_func(val) def __str__(self): return "VariableWithCostFunc({})".format(self.name) def __repr__(self): return "VariableWithCostFunc" "({}, {}, {}, {})".format( self.name, self.initial_value, self.domain, self._cost_func ) def __eq__(self, other): if type(self) != type(other): return False if ( self.name == other.name and self.initial_value == other.initial_value and self.domain == other.domain ): if [self.cost_for_val(v) for v in self.domain] == [ other.cost_for_val(v) for v in other.domain ]: return True return False def __hash__(self): costs = [self.cost_for_val(v) for v in self.domain] return super().__hash__() ^ hash(tuple(costs)) def clone(self): return VariableWithCostFunc( self.name, self.domain, self._cost_func, initial_value=self._initial_value ) def _simple_repr(self): if not hasattr(self._cost_func, "_simple_repr"): raise SimpleReprException( "Cannot take a simple repr from a " "variable with arbitrary cost function, " "use an ExpressionFunction instead" ) else: return super()._simple_repr() class VariableNoisyCostFunc(VariableWithCostFunc): has_cost = True def __init__( self, name: str, domain: Union[VariableDomain, Iterable[Any]], cost_func, initial_value=None, noise_level: float = 0.02, ) -> None: super().__init__(name, domain, cost_func, initial_value) self._noise_level = noise_level self._costs = {} for d in domain: self._costs[d] = super().cost_for_val(d) + random.uniform(0, noise_level) @property def noise_level(self) -> float: return self._noise_level def cost_for_val(self, val) -> float: return self._costs[val] def __str__(self): return "VariableNoisyCostFunc({})".format(self.name) def __repr__(self): return "VariableNoisyCostFunc" "({}, {}, {}, {}, {})".format( self.name, self.initial_value, self.domain, self._cost_func, self._noise_level, ) def __eq__(self, other): if type(self) != type(other): return False if ( self.name == other.name and self.noise_level == other.noise_level and self.domain == other.domain and self._cost_func == other._cost_func and self.initial_value == other.initial_value ): return True return False def __hash__(self): costs = [ super(VariableNoisyCostFunc, self).cost_for_val(d) for d in self.domain ] return Variable.__hash__(self) ^ hash(tuple(costs)) def clone(self): return VariableNoisyCostFunc( self.name, self.domain, self._cost_func, initial_value=self.initial_value, noise_level=self._noise_level, ) class ExternalVariable(Variable): def __init__( self, name: str, domain: Union[VariableDomain, Iterable[Any]], value=None ) -> None: super().__init__(name, domain) self._cb = [] self._value = list(domain.values)[0] self.value = value @property def value(self): return self._value @value.setter def value(self, val): if val == self._value: return if val not in self._domain: raise ValueError( "Invalid value {} for sensor variable {}".format(val, self._name) ) self._value = val self._fire(val) def subscribe(self, callback): self._cb.append(callback) def unsubscribe(self, callback): self._cb.remove(callback) def _fire(self, value): for cb in self._cb: cb(value) def clone(self): return ExternalVariable(self.name, self.domain, self.value) class AgentDef(SimpleRepr): def __init__( self, name: str, default_route: float = 1, routes: Dict[str, float] = None, default_hosting_cost: float = 0, hosting_costs: Dict[str, float] = None, **kwargs: Union[str, int, float], ) -> None: super().__init__() self._name = name self._attr = kwargs self._default_hosting_cost = default_hosting_cost self._hosting_costs = hosting_costs if hosting_costs is not None else {} self._default_route = default_route self._routes = routes if routes is not None else {} @property def name(self) -> str: return self._name def hosting_cost(self, computation: str) -> float: try: return self._hosting_costs[computation] except KeyError: return self.default_hosting_cost @property def default_hosting_cost(self) -> float: return self._default_hosting_cost @property def hosting_costs(self) -> Dict[str, float]: return self._hosting_costs @property def default_route(self) -> float: return self._default_route @property def routes(self) -> Dict[str, float]: return self._routes def route(self, other_agt: str) -> float: if self.name == other_agt: return 0 try: return self._routes[other_agt] except KeyError: return self.default_route def extra_attr(self) -> Dict[str, Any]: if self._attr is None: return dict() return self._attr def __getattr__(self, item): try: return self._attr[item] except KeyError: raise AttributeError("No attribute " + str(item) + " on " + str(self)) def __getstate__(self): return (self._name, self._hosting_costs, self.default_hosting_cost, self._attr) def __setstate__(self, state): ( self._name, self._hosting_costs, self._default_hosting_cost, self._attr, ) = state def __str__(self): return "AgentDef({})".format(self.name) def __repr__(self): return "AgentDef({}, {})".format(self.name, self._attr) def __eq__(self, other): if type(other) != AgentDef: return False if ( self.name == other.name and self.hosting_costs == other.hosting_costs and self._attr == other._attr and self.default_hosting_cost == other.default_hosting_cost ): return True return False
BSD 3-Clause New or Revised License
haowen-xu/tfsnippet
tfsnippet/ops/control_flows.py
smart_cond
python
def smart_cond(cond, true_fn, false_fn, name=None): if is_tensor_object(cond): return tf.cond(cond, true_fn, false_fn, name=name) else: if cond: return true_fn() else: return false_fn()
Execute `true_fn` or `false_fn` according to `cond`. Args: cond (bool or tf.Tensor): A bool constant or a tensor. true_fn (() -> tf.Tensor): The function of the true branch. false_fn (() -> tf.Tensor): The function of the false branch. Returns: tf.Tensor: The output tensor.
https://github.com/haowen-xu/tfsnippet/blob/63adaf04d2ffff8dec299623627d55d4bacac598/tfsnippet/ops/control_flows.py#L9-L27
import tensorflow as tf from tfsnippet.utils import add_name_arg_doc, is_tensor_object __all__ = ['smart_cond'] @add_name_arg_doc
MIT License
asahiliu/pointdetectron
models/backbone_module_SA2_denseaspp3_6_12.py
Pointnet2Backbone.__init__
python
def __init__(self, input_feature_dim=0): super().__init__() self.sa1 = PointnetSAModuleVotes( npoint=2048, radius=0.2, nsample=64, mlp=[input_feature_dim, 64, 64, 128], use_xyz=True, normalize_xyz=True ) self.sa2 = PointnetSAModuleVotes( npoint=1024, radius=0.4, nsample=32, mlp=[128, 128, 128, 256], use_xyz=True, normalize_xyz=True ) self.sa2_d3 = PointnetSAModuleVotes( npoint=1024, radius=0.8, nsample=32*3, mlp=[256, 128, 128, 256], use_xyz=True, normalize_xyz=True, dilation = 3 ) self.sa2_d6 = PointnetSAModuleVotes( npoint=1024, radius=1.2, nsample=32*6, mlp=[256+256, 128, 128, 256], use_xyz=True, normalize_xyz=True, dilation = 6 ) self.sa2_d12 = PointnetSAModuleVotes( npoint=1024, radius=1.8, nsample=32*12, mlp=[256+256+256, 128, 128, 256], use_xyz=True, normalize_xyz=True, dilation = 12 ) ''' self.sa3 = PointnetSAModuleVotes( npoint=512, radius=0.8, nsample=16, mlp=[256, 128, 128, 256], use_xyz=True, normalize_xyz=True ) self.sa4 = PointnetSAModuleVotes( npoint=256, radius=1.2, nsample=16, mlp=[256, 128, 128, 256], use_xyz=True, normalize_xyz=True ) self.fp1 = PointnetFPModule(mlp=[256+256,256,256]) self.fp2 = PointnetFPModule(mlp=[256+256,256,256]) '''
self.sa3 = PointnetSAModuleVotes( npoint=512, radius=0.8, nsample=16, mlp=[256, 128, 128, 256], use_xyz=True, normalize_xyz=True ) self.sa4 = PointnetSAModuleVotes( npoint=256, radius=1.2, nsample=16, mlp=[256, 128, 128, 256], use_xyz=True, normalize_xyz=True ) self.fp1 = PointnetFPModule(mlp=[256+256,256,256]) self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
https://github.com/asahiliu/pointdetectron/blob/634ffadfbc50d2f0dea4434e11d5977640f34004/models/backbone_module_SA2_denseaspp3_6_12.py#L32-L111
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(ROOT_DIR) sys.path.append(os.path.join(ROOT_DIR, 'utils')) sys.path.append(os.path.join(ROOT_DIR, 'pointnet2')) from pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule class Pointnet2Backbone(nn.Module):
MIT License
openstack/cinder
cinder/api/contrib/admin_actions.py
BackupAdminController._reset_status
python
def _reset_status(self, req, id, body): context = req.environ['cinder.context'] status = body['os-reset_status']['status'] update = {'status': status.lower()} msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) self._notify_reset_status(context, id, 'reset_status.start') self.backup_api.reset_status(context=context, backup_id=id, status=update['status'])
Reset status on the resource.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/api/contrib/admin_actions.py#L295-L308
from http import HTTPStatus from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import strutils import webob from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import admin_actions from cinder.api import validation from cinder import backup from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import volume from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class AdminController(wsgi.Controller): collection = None def __init__(self, *args, **kwargs): super(AdminController, self).__init__(*args, **kwargs) self.resource_name = self.collection.rstrip('s') self.volume_api = volume.API() self.backup_api = backup.API() def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, req, body): raise NotImplementedError() def _notify_reset_status(self, context, id, message): raise NotImplementedError() def authorize(self, context, action_name, target_obj=None): context.authorize( 'volume_extension:%(resource)s_admin_actions:%(action)s' % {'resource': self.resource_name, 'action': action_name}, target_obj=target_obj) def _remove_worker(self, context, id): res = db.worker_destroy(context, resource_type=self.collection.title(), resource_id=id) if res: LOG.debug('Worker entry for %s with id %s has been deleted.', self.collection, id) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): def _clean_volume_attachment(context, id): attachments = ( db.volume_attachment_get_all_by_volume_id(context, id)) for attachment in attachments: db.volume_detached(context.elevated(), id, attachment.id) db.volume_admin_metadata_delete(context.elevated(), id, 'attached_mode') context = req.environ['cinder.context'] update = self.validate_update(req, body=body) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) self._notify_reset_status(context, id, 'reset_status.start') self._update(context, id, update) self._remove_worker(context, id) if update.get('attach_status') == 'detached': _clean_volume_attachment(context, id) self._notify_reset_status(context, id, 'reset_status.end') @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-force_delete') def _force_delete(self, req, id, body): context = req.environ['cinder.context'] resource = self._get(context, id) self.authorize(context, 'force_delete', target_obj=resource) self._delete(context, resource, force=True) class VolumeAdminController(AdminController): collection = 'volumes' def _notify_reset_status(self, context, id, message): volume = objects.Volume.get_by_id(context, id) volume_utils.notify_about_volume_usage(context, volume, message) def _update(self, *args, **kwargs): context = args[0] volume_id = args[1] volume = objects.Volume.get_by_id(context, volume_id) self.authorize(context, 'reset_status', target_obj=volume) db.volume_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.volume_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete(*args, **kwargs) @validation.schema(admin_actions.reset) def validate_update(self, req, body): update = {} body = body['os-reset_status'] status = body.get('status', None) attach_status = body.get('attach_status', None) migration_status = body.get('migration_status', None) if status: update['status'] = status.lower() if attach_status: update['attach_status'] = attach_status.lower() if migration_status: update['migration_status'] = migration_status.lower() if update['migration_status'] == 'none': update['migration_status'] = None return update @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-force_detach') @validation.schema(admin_actions.force_detach) def _force_detach(self, req, id, body): context = req.environ['cinder.context'] volume = self._get(context, id) self.authorize(context, 'force_detach', target_obj=volume) connector = body['os-force_detach'].get('connector', None) try: self.volume_api.terminate_connection(context, volume, connector) except exception.VolumeBackendAPIException: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) attachment_id = body['os-force_detach'].get('attachment_id', None) try: self.volume_api.detach(context, volume, attachment_id) except messaging.RemoteError as error: if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: msg = _("Error force detaching volume - %(err_type)s: " "%(err_msg)s") % {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: raise @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-migrate_volume') @validation.schema(admin_actions.migrate_volume, mv.BASE_VERSION, mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER)) @validation.schema(admin_actions.migrate_volume_v316, mv.VOLUME_MIGRATE_CLUSTER) def _migrate_volume(self, req, id, body): context = req.environ['cinder.context'] volume = self._get(context, id) self.authorize(context, 'migrate_volume', target_obj=volume) params = body['os-migrate_volume'] cluster_name, host = common.get_cluster_host(req, params, mv.VOLUME_MIGRATE_CLUSTER) force_host_copy = strutils.bool_from_string(params.get( 'force_host_copy', False), strict=True) lock_volume = strutils.bool_from_string(params.get( 'lock_volume', False), strict=True) self.volume_api.migrate_volume(context, volume, host, cluster_name, force_host_copy, lock_volume) @wsgi.action('os-migrate_volume_completion') @validation.schema(admin_actions.migrate_volume_completion) def _migrate_volume_completion(self, req, id, body): context = req.environ['cinder.context'] volume = self._get(context, id) self.authorize(context, 'migrate_volume_completion', target_obj=volume) params = body['os-migrate_volume_completion'] new_volume_id = params['new_volume'] new_volume = self._get(context, new_volume_id) error = params.get('error', False) ret = self.volume_api.migrate_volume_completion(context, volume, new_volume, error) return {'save_volume_id': ret} class SnapshotAdminController(AdminController): collection = 'snapshots' def _notify_reset_status(self, context, id, message): snapshot = objects.Snapshot.get_by_id(context, id) volume_utils.notify_about_snapshot_usage(context, snapshot, message) @validation.schema(admin_actions.reset_status_snapshot) def validate_update(self, req, body): status = body['os-reset_status']['status'] update = {'status': status.lower()} return update def _update(self, *args, **kwargs): context = args[0] snapshot_id = args[1] fields = args[2] snapshot = objects.Snapshot.get_by_id(context, snapshot_id) self.authorize(context, 'reset_status', target_obj=snapshot) snapshot.update(fields) snapshot.save() def _get(self, *args, **kwargs): return self.volume_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete_snapshot(*args, **kwargs) class BackupAdminController(AdminController): collection = 'backups' def _notify_reset_status(self, context, id, message): backup = objects.Backup.get_by_id(context, id) volume_utils.notify_about_backup_usage(context, backup, message) def _get(self, *args, **kwargs): return self.backup_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.backup_api.delete(*args, **kwargs) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reset_status') @validation.schema(admin_actions.reset_status_backup)
Apache License 2.0
rosuav/shed
BL2_find_items.py
decode_tree
python
def decode_tree(bits): if bits.get(1) == "1": return int(bits.get(8), 2) return (decode_tree(bits), decode_tree(bits))
Decode a (sub)tree from the given sequence of bits Returns either a length-one bytes, or a tuple of two trees (left and right). Consumes either a 1 bit and then eight data bits, or a 0 bit and then two subtrees.
https://github.com/rosuav/shed/blob/e5d8264c604c59b8ca9fb47f1f0d6fb14584bf3e/BL2_find_items.py#L800-L810
import argparse import base64 import binascii import collections import hashlib import itertools import json import math import os.path import struct import sys import random from fnmatch import fnmatch from dataclasses import dataclass from pprint import pprint import lzo from BL1_find_items import FunctionArg, Consumable import warnings; warnings.filterwarnings("ignore") loot_filter = FunctionArg("filter", 2) @loot_filter def level(usage, item, minlvl, maxlvl=None): minlvl = int(minlvl) if maxlvl is None: maxlvl = minlvl + 5 return minlvl <= item.grade <= int(maxlvl) @loot_filter def type(usage, item, type): return type in item.type del type @loot_filter def title(usage, item, tit): return item.title is not None and tit in item.title @loot_filter def loose(usage, item): return not usage.is_equipped() and usage.is_carried() synthesizer = FunctionArg("synth", 1) def strip_prefix(str): return str.split(".", 1)[1] def armor_serial(serial): return base64.b64encode(serial).decode("ascii").strip("=") def unarmor_serial(id): return base64.b64decode(id.strip("{}").encode("ascii") + b"====") def partnames(is_weapon): if is_weapon: return "body grip barrel sight stock elemental accessory1 accessory2".split() return "alpha beta gamma delta epsilon zeta eta theta".split() @synthesizer def money(savefile): savefile.money[0] += 5000000 @synthesizer def eridium(savefile): savefile.money[1] += 500 @synthesizer def seraph(savefile): savefile.money[2] += 500 @synthesizer def torgue(savefile): savefile.money[4] += 500 @synthesizer def xp(savefile, xp=None): min = math.ceil(60 * savefile.level ** 2.8 - 60) max = math.ceil(60 * (savefile.level + 1) ** 2.8 - 60) - 1 if xp is None: savefile.exp = max elif min <= int(xp) <= max: savefile.exp = int(xp) else: print("WARNING: Leaving XP unchanged - level %d needs %d-%d XP" % (savefile.level, min, max)) @synthesizer def boost(savefile): for i, weapon in enumerate(savefile.packed_weapon_data): weap = Asset.decode_asset_library(weapon.serial) if weap.grade < savefile.level and weapon.quickslot: weap.grade = weap.stage = savefile.level savefile.packed_weapon_data[i].serial = weap.encode_asset_library() for i, item in enumerate(savefile.packed_item_data): it = Asset.decode_asset_library(item.serial) if it and it.grade < savefile.level and item.equipped: it.grade = it.stage = savefile.level savefile.packed_item_data[i].serial = it.encode_asset_library() @synthesizer def invdup(savefile, level): levels = [int(l) for l in level.split(",") if l] if not levels: raise ValueError("C'mon, get on my level, man") for weapon in savefile.packed_weapon_data: weap = Asset.decode_asset_library(weapon.serial) if weap.grade not in levels and not weapon.quickslot: for level in levels: weap.grade = weap.stage = level weap.seed = random.randrange(1<<31) savefile.add_inventory(weap) for item in savefile.packed_item_data: it = Asset.decode_asset_library(item.serial) if it and it.grade not in levels and not item.equipped: for level in levels: it.grade = it.stage = level it.seed = random.randrange(1<<31) savefile.add_inventory(it) @synthesizer def item(savefile, bal): try: balance = get_balance_info(0, bal) is_weapon = 0 except KeyError: balance = get_balance_info(1, bal) is_weapon = 1 if "type" in balance: type = balance["type"] else: type = balance["types"][0] typeinfo = get_asset("Weapon Types" if is_weapon else "Item Types")[type] def p(part): b = balance.get(part) if b: return b[0] t = typeinfo.get(part + "_parts") if t: return t[0] return None def sp(name): return name and strip_prefix(name) lvl = savefile if isinstance(savefile, int) else savefile.level obj = Asset(seed=random.randrange(1<<31), is_weapon=is_weapon, type=sp(type), balance=bal, brand=sp(balance["manufacturers"][0]), grade=lvl, stage=lvl, pieces=[sp(p(n)) for n in partnames(is_weapon)], material=sp(p("material")), pfx=sp(typeinfo.get("prefixes", [None])[0]), title=sp(typeinfo.get("titles", [None])[0])) if isinstance(savefile, int): return obj savefile.add_inventory(obj) print("\nGiving", obj) @synthesizer def give(savefile, definitions): print() for definition in definitions.split(","): [id, *changes] = definition.split("-") serial = unarmor_serial(id) obj = Asset.decode_asset_library(serial) if obj.seed == obj.grade == 50: changes.insert(0, "l") obj.seed = random.randrange(1<<31) for change in changes: if not change: continue c = change[0].lower() if c == "l": obj.grade = obj.stage = int(change[1:] or savefile.level) savefile.add_inventory(obj) print("Giving", obj) def get_piece_options(obj): cls = "Weapon" if obj.is_weapon else "Item" config = get_asset_library_manager() setid, sublib, asset, cat = config["_find_asset"]["BalanceDefs"][obj.balance] allbal = get_asset(cls + " Balance") checkme = cat + "." + obj.balance pieces = [None] * len(obj.partnames) while checkme: print(checkme) if "parts" not in allbal[checkme] and "type" in allbal[checkme]: typeinfo = get_asset(cls + " Types")[allbal[checkme]["type"]] pieces = [p or typeinfo.get(part + "_parts") for p, part in zip(pieces, obj.partnames)] break parts = allbal[checkme]["parts"] pieces = [p or parts.get(part) for p, part in zip(pieces, obj.partnames)] checkme = allbal[checkme].get("base") return [p1 or [p2] for p1, p2 in zip(pieces, obj.pieces)] @synthesizer def crossproduct(savefile, baseid): baseid, *lockdown = baseid.split(",") if "_" in baseid: obj = item(50, baseid) else: obj = Asset.decode_asset_library(unarmor_serial(baseid)) print() print("Basis:", obj) pieces = get_piece_options(obj) interactive = False while "interactive or at least once": for fixed in lockdown: if fixed == "input": interactive = True continue if fixed.startswith("-") and fixed[1:] in obj.partnames: pieces[partnames.index(fixed[1:])] = [None] continue for n, p in enumerate(pieces): if fixed in p: pieces[n] = [fixed] break else: print("Couldn't find %r to lock down" % fixed) total = 1 for i, (n, opts) in enumerate(zip(obj.partnames, pieces)): for p in opts: if p and obj.pieces[i] and p.endswith(obj.pieces[i]): p = "\x1b[1m%s\x1b[0m" % p elif not obj.pieces[i] and p is None: p = "\x1b[1mNone\x1b[0m" print(n, p) n = " " * len(n) if not obj.pieces[i] and None not in opts: print(n, "\x1b[1mNone\x1b[0m") total *= len(opts) print("Will create", total, "objects.") lockdown = [] fixme = interactive and input() if fixme == "give" or fixme == "gr" or not fixme: for pp in itertools.product(*pieces): obj.seed = random.randrange(1<<31) obj.grade = obj.stage = savefile.level obj.pieces = [piece and strip_prefix(piece) for piece in pp] if total < 10: print(">", obj) savefile.add_inventory(obj) if not fixme: break if fixme == "gr": pieces = get_piece_options(obj) elif fixme == "q": break elif fixme == "reset": pieces = get_piece_options(obj) else: lockdown = [fixme] @synthesizer def tweak(savefile, baseid): if "_" in baseid: obj = item(50, baseid) else: obj = Asset.decode_asset_library(unarmor_serial(baseid)) obj.grade = obj.stage = savefile.level info = get_balance_info(obj.is_weapon, obj.balance) weap_item = "Weapon" if obj.is_weapon else "Item" config = get_asset_library_manager() setid, sublib, asset, cat = config["_find_asset"][weap_item + "Types"][obj.type] typeinfo = get_asset(weap_item + " Types")[cat + "." + obj.type] get_balance_options = { } def list_parts(part): b = info.get(part) if b: return b p = info.get("parts", {}).get(part) if p: return p t = typeinfo.get(part + "_parts") if t: return t return [None] def opt(f): get_balance_options[f.__name__] = f @opt def type(info): if "type" in info: if "types" not in info: info["types"] = [info["type"]] elif info["type"] not in info["types"]: info["types"].append(info["type"]) return info["types"] @opt def brand(info): return info["manufacturers"] @opt def material(info): return list_parts("material") @opt def pfx(info): return typeinfo.get("prefixes", [None]) @opt def title(info): return typeinfo.get("titles", [None]) import curses @curses.wrapper def _tweak(stdscr): curses.set_escdelay(10) filter = "" scroll = sel = 0 while "interactive": line = need = maxsel = selectme = 0 def printf(str="", *args, attr=curses.A_NORMAL, keep=3): if args: str = str % tuple(args) nonlocal line if line - scroll > stdscr.getmaxyx()[0] - keep: nonlocal need need += 1 return if line >= scroll: stdscr.addstr(line - scroll, 0, str, attr) stdscr.clrtoeol() line += 1 printf("Balance: %s", obj.balance, attr=curses.A_BOLD) def show_piece(key, active, options): printf("%s: %s", key, active, attr=curses.A_BOLD) if len(options) == 1 and str(options[0]).endswith(str(active)): return for opt in options: opt = strip_prefix(opt) if opt else "None" if filter in opt.lower(): nonlocal maxsel maxsel += 1 printf("%s\t%s", "->" if maxsel == sel else "", opt) if maxsel == sel: nonlocal selectme selectme = (key, opt) for attr, func in get_balance_options.items(): show_piece(attr, getattr(obj, attr), func(info)) printf() for n, piece in zip(obj.partnames, obj.pieces): show_piece(n, piece, list_parts(n)) printf(keep=2) for l in range(line, stdscr.getmaxyx()[0]): stdscr.move(l, 0) stdscr.clrtoeol() if need: printf("(+%d)> %s", need, filter, attr=curses.A_BOLD, keep=1) else: printf("> %s", filter, attr=curses.A_BOLD, keep=1) stdscr.refresh() key = stdscr.getkey() if key == "\x1b": if filter: filter = "" else: break elif key in ("KEY_SF", "kDN5") and need: scroll += 1 elif key in ("KEY_SR", "kUP5") and scroll: scroll -= 1 elif key == "KEY_DOWN": if sel < maxsel: sel += 1 else: sel = 1 elif key == "KEY_UP": if sel > 1: sel -= 1 else: sel = maxsel elif len(key) == 1 and ('A' <= key <= 'Z' or 'a' <= key <= 'z' or '0' <= key <= '9'): filter += key.lower() elif key == "KEY_BACKSPACE" and filter: filter = filter[:-1] elif key == "\n" and selectme: if selectme[0] in obj.partnames: obj.pieces[obj.partnames.index(selectme[0])] = selectme[1] if selectme[1] != "None" else None else: setattr(obj, selectme[0], selectme[1] if selectme[1] != "None" else None) elif key == "KEY_ENTER": savefile.add_inventory(obj) elif key == "KEY_IC": filter = repr(stdscr.getkey()) parser = argparse.ArgumentParser(description="Borderlands 2/Pre-Sequel save file reader") parser.add_argument("-2", "--bl2", help="Read Borderlands 2 savefiles", action="store_const", const="borderlands 2", dest="game") parser.add_argument("-p", "--tps", help="Read Borderlands The Pre-Sequel savefiles", action="store_const", const="borderlands the pre-sequel", dest="game") parser.set_defaults(game="borderlands 2") parser.add_argument("--proton", help="Read savefiles from Proton installation", action="store_const", const="proton", dest="platform") parser.add_argument("--native", help="Read savefiles from native Linux installation", action="store_const", const="native", dest="platform") parser.set_defaults(platform="native") parser.add_argument("--player", help="Choose which player (by Steam ID) to view savefiles of") parser.add_argument("--verify", help="Verify code internals by attempting to back-encode", action="store_true") parser.add_argument("--pieces", help="Show the individual pieces inside weapons/items", action="store_true") parser.add_argument("--raw", help="Show the raw details of weapons/items (spammy - use loot filters)", action="store_true") parser.add_argument("--itemids", help="Show the IDs of weapons/items", action="store_true") parser.add_argument("--synth", help="Synthesize a modified save file", type=synthesizer, nargs="*") parser.add_argument("-l", "--loot-filter", help="Show loot, optionally filtered to only what's interesting", type=loot_filter, nargs="*") parser.add_argument("-f", "--file", help="Process only one save file") parser.add_argument("--dir", help="Specify the savefile directory explicitly (ignores --proton/--native and --player)") parser.add_argument("--library", help="Add an item ID to the library") parser.add_argument("--compare", nargs=2, help="Compare two library items (or potential library items)") args = parser.parse_args() print(args) GAME = args.game library = { "borderlands 2": { "hwAAADLKv4T3Nj+nwWj5D93eEsI037K1X4yK8cYDK8sWhOzS7cRJ": "Lucid Florentine", "hwAAADIKvoT3NjynwWgFbZDeYkkhn4u8XMwIu9UDK6MWhOxyZrFJ": "Bulets Go Fasterifed Slagga", "hwAAADKKvoT5NjunwWhxDgXdssM0KLK9XOwK+tQDK6MWrOwC7KRJ": "Bladed Tattler", "hwAAADIHS+32AtYAwGjhy0mUAdMnD5q8mOMOut0DK4+33ajR/fdK": "Rapid Infinity", "hwAAADLClhT5FjHnwWg5bene4E4lbRm8nSIJxdMDKw8WpengZrVJ": "Onslaught Veruc", "hwAAADLKvwT4Nj2nwWiJr3XdckI2/9u4XoyK89oDK8MWjOyybQZI": "Rightsizing Yellow Jacket", "hwAAADIHS231AtYAwGgVywGYwdYnYey8nQMMutMDK4uw7aiB+fdK": "Corrosive Teapot", "hwAAADLKvwT5NtYAwGgtbVDfok4hoqu8XywJu8cDK8sWhOzSZrFJ": "Fervid Hellfire", "hwAAADIHS20A5TXPwWjVy1Ge8dMnX1a8mQMPut8DK7+3zagx/fdK": "Crammed Unkempt Harold", "hwAAADIKvgT4Nj2nwWj5L33dMkI07Nm1X2wK9sYDK8sWhOzybYRJ": "Consummate Crit", "BwAAADLCuhHxmSU8wOjSDKEfogGraRu+EzziescQoXr3uq5NbvU": "Longbow Bonus Package", "BwAAADLCudH52S+8IhTTDKHfpQHpUxu2E0jiWscQ4X33uq5N7vY": "Longbow Breath of Terramorphous", "BwAAADLCuhH52daAIhTTDKEfpQHy0xu5E0biWscQ4Xr3uq5N7v8": "Longbow Pandemic", "BwAAADJCvDLxmSU8wOjSDKFfgyJbuBi6EQnKascQIW/Uuq5QbvU": "Longbow Meteor Shower", "BwAAADLCshH52daAN1TfzH4jgiDqZBK8CQ+q6sYQYZkLt49NLv4": "Fire Storm", "BwAAADLCstH52daAN9TfzH6jgCBJjxK8CQeq6sYQYZkLtI9Nrv8": "Lightning Bolt", "BwAAADLCshH52daAN9TfzH4jgCCRihK8CQWq6sYQYZkLtY9NLv4": "Fireball", "BwAAADLCstH5WS68N9TfzH7jgCDRYRK8CQGq6sYQYZlLtI9Nbv8": "Magic Missile", "BwAAADLCstH5WS68NxTfzH7jgCB58xK8CQOq6sYQYZnLt49Nbv8": "Magic Missile (rare)", "BwAAADLCstH52daAN1TfzH7jgSBNXhK8CQ2q6sYQYZnLtI9Nrv8": "Chain Lightning", "BwAAADLCuhH52daAIhTTDKFfpQEo1Ru5EzriWscQ4Xr3uq5NLv4": "Longbow Fire Bee", "BwAAADLCshH5WS68N9TfTIsfpgHW0Bu9E1Ti+sYQYXo3pa5Nrv8": "Shock Kiss of Death", "BwAAADLCuhHxWS48wOjSzH5jrwGGwRu1E0LiescQYZkLuq5QbvU": "Rolling Thunder", "BwAAADLCutH52daAItTZDJ7fpAHhRBu/Ez7iuscQIX23uq5NLv4": "Sticky Longbow Bouncing Bonny", "BwAAADICvTLxWSW8IhTfDJ5fhCJ0iRi7EQvK6sYQoX23uu5QbvU": "O-Negative", "BwAAADJCs/L52daAItTZDKGfjSJ5qBi5EQ/KqscQ4W7Uuq5Q7v8": "Corrosive Crossfire", "BwAAADIiS20A5dYAwOjK7H6jgCEaBxK8Cgm6CsYQ4WQwsClY6fQ": "Blockade", "BwAAADIFS20A5dYAwGicy37j2gbb3xuqDW6qOccQYWcwsClY6aM": "The Bee", "BwAAADIFS20A5dYAwOiey36j2QbGjRuuDVaqWccQ4WcwsakEKaA": "The Transformer", "BwAAADIFS20A5dYAwCjOb4E8gCJh9Ri8EQXKSscQ4WcwsSlXaew": "Sponge", "BwAAADIFS20A5dYAwOjFy36jlwaOWhu9DQCq+cYQIWYwsKlW6fs": "Flame of the Firehawk", "BwAAADIFS20A5dYAwOjWS7qYugbmPhu7DWqq+cUQYWcwsSlYaeg": "Chitinous Turtle Shield", "BwAAADIFS20A5dYAwOjWy76YugZn0hu7DWqq+cUQ4WcwtqlWaeg": "Selected Turtle Shield", "BwAAADIFS20A5dYAwGidy36j2ga/nRukDXaq+cYQ4WcwsalW6aw": "Black Hole", "BwAAADIFS20A5dYAwGidy4yY2gbR0RukDXaq+cYQIWfwsWlX6aw": "Grounded Black Hole", "BwAAADIiS20A5dYAwGjK7H4jgSGEjRK9Cgu66sYQ4WcwsalWqfU": "Antagonist", "hwAAADINsYrkKkqfwWh1jdAYI8ki6Ti8n8yJu8cDK+/25C2z5zJN": "Banbury Volcano", "hwAAADJNsQrkKkufwWiBjagY08siXTC8n8yLu8cDKzv21C1D5FJN": "Monstrous Chère-amie", "hwAAADKNsQoA5dYAwGidjDhd4s8iQdS8mCyKu9EDK2P17C3D7zJN": "Kull Trespasser", "hwAAADINsArjKkufwWgdL60Yg0A0ere9n6yL+cYDKzv21C2jbARN": "Monstrous Pimpernel", "hwAAADLNsYrjKkufwWhFjagY08kiKwG8n0yJu8cDK+/21C3z5DJN": "Monstrous Railer", "hwAAADINsArkKkufwWhljZgYA8kiXSS8nAyOu9sDKwv21C3z5xJN": "Resource Invader", "hwAAADJEr5j3Dj/XwWihrbFeUUcm76O8XCOIxdkDKwPWvW4Ba9ZI": "Bonus Launcher", "hwAAADJErxj2Dj/XwWjRr6FeoUkmDs28XyOOxdUDKzvWvW6xZrZI": "Roket Pawket PRAZMA CANON", "hwAAADJEr5j3DjXXwWi5rcldgUomp+u8XYOIxd8DKwvWlW7Ba7ZI": "derp Duuurp!", "hwAAADIErpj3DjzXwWjRrylcUkUmgjS8XwOKxdUDK2fUlW7Ba9ZI": "hurty Zooka!", "hwAAADKEr5j3DjzXwWgRrSlcsk4mr128X4OJxdUDK2fUlW7BZnZI": "hurty Roaster", "hwAAADIErpj3DjLXwWhBreldMUgm5Qq8XOOIxdkDKwPWlW5BZ5ZI": "Bustling Bunny", "hwAAADJEr5j3DjPXwWjRrwFdUUcmYiK8XwOKxdUDK2fUtW4Ba9ZI": "Speeedee Launcher", "hwAAADLEr5j3DjHXwWi5rWld0UUmSgG8XYOIxd8DKwvWpW4xa5ZI": "dippity boom", "hwAAADJErpj3DjPXwWixrQFdEUgmCTC8X6OIxdUDKzvWtW6BZNZI": "Speeedee Badaboom", "BwAAADIBS20A5S1fPXd/xYkbgM3MtQqOBQSK/JcqOGg": "Heart of the Ancients", "BwAAADI+S20AZS+/OldkWoEUi/wcxQqOBQTKBSjdR5k": "Proficiency Relic", "BwAAADI+S20A5SO/OlcyAoEci+wcxQqOBQTKBSjdR5k": "Vitality Relic", "BwAAADI+S20AJSK/O1f9WYEbi/4cpQqOxfu1/BfdR5k": "Tenacity Relic", "BwAAADI+S20AZSO/Old8zIEdi/IcxQqOhQ3KBSjdR5k": "Stockpile Relic", "BwAAADI+S20AZS1fPXdS+IkdgMHMtQqOBQUK/BfdR5k": "Skin of the Ancients", "BwAAADIBS20A5SK/O1cVT4ECi6gcxQqOBQSK/Bfdx24": "The Afterburner", "hwAAADLClhT3Fj/nwWgBbWHeQE4l+Eu8nIIOxdUDK6MWpekQYLVJ": "Wyld Asss BlASSter", "hwAAADJKvgT5Nj+nwWh9bdjewksh0OW8X4wIu8cDK8sWjOxybfFJ": "Lucid SubMalevolent Grace", "hwAAADLKv4T4Nj6nwWh9bQDeAkQh1k28X4wIu8cDK8sWhOwSbbFJ": "Apt Venom", "hwAAADIKvoT3NjinwWjpbLDYEk8h0pa8X4wJu8cDKx8WrOwSYPFJ": "Feculent Chulainn", "BwAAADIFS20A5dYAwOjQy7OYrwaMDxu5DWaqGcYQoWfwtulXae4": "Patent Adaptive Shield", "hwAAADIKvgT4NjinwWj5L13YMkI0xaK1X2wK9sYDKx8WhOzybeRJ": "Miss Moxxi's Crit", "hwAAADIClRT4FjvnwWjNLx3foEI031G/mAKK99wDK/cWnelQbWRJ": "Corrosive Kitten", "BwAAADIFS20A5dYAwOjdi5GY0wbHwxuzDYqq+cYQoWcwsKlWabU": "Majestic Corrosive Spike Shield", "hwAAADLLqAb1MievwWh9TTiZ4skhwIu8HeyIutUDK+82jK6S5/FJ": "Sledge's Shotgun", "hwAAADLLqAbzMiavwWgFTVidIskhXYm8HwyJutkDK8c2/K5y5tFJ": "Original Deliverance", "hwAAADKLr4bzMjuvwWj1L5WfIkI0CkG/HqyK99oDK/c27K6ybSRI": "Practicable Slow Hand", "hwAAADKLr4bzMiSvwWj1L52eIkI0fK6/HqyK99oDK/c27K6ybSRI": "Scalable Slow Hand", "hwAAADJClhQA5T7nwWjJbDHeAE4lcVO8n2IJxd8DK7MWlemgZtVJ": "Slippery KerBlaster", "hwAAADJEr5j3DjPXwWhtLx1dAUA0ayS9XQOI+d4DK0vUlW4hb4RI": "fidle dee 12 Pounder", "hwAAADIHS231AjTPwWg1zHGesdMnRfi8ncMOutMDK/+3xaix4jdJ": "Neutralizing Hornet", "hwAAADLLqAYA5dYAwGh9L3VcaEA0OkK9HIyI+dADK8s05K6Sb8RJ": "Captain Blade's Orphan Maker", "BwAAADI+S20A5dYAwCjOToo8gAPythm2HQECKscQoWO1tGxQLAs": "Legendary Mechromancer", "BwAAADI+S20A5dYAwOjJzogdgAPgyRm9HQcC6sYQoWO1tGxQLAs": "Slayer of Terramorphous", "BwAAADI+S20ApS1fPXfwpIkegMfM1QqOBQSK/9coeJk": "Bone of the Ancients", "BwAAADI+S20A5dYAwOjOjoI8gAM37hGpHAkK6sYQoWO1tGxQLAs": "Legendary Catalyst", "BwAAADI+S20A5dYAwOjJzogdgAM/+xudEXKr+cYQoWO1tGxQLAs": "Slayer Of Terramorphous Class Mod", "BwAAADI+S20A5dYAwKjODoE8gAN1wBG8HAcK6sYQoWO1tGxQLAs": "Legendary Binder", "BwAAADI+S20A5dYAwCjJjpkdgAMXxxubEXyr+cYQoWO1tGxQLAs": "Legendary Siren", "BwAAADI+S20A5dYAwKjOzpsdgAPwuBuUEX6r+cYQIWO1tGxQLAs": "Chrono Binder", "BwAAADI+S20A5dYAwKjODpodgAMyehuUEX6r+cYQoWG1tGxQLAs": "Hell Binder", "BwAAADI+S20A5dYAwOjJzogdgAMXIBufEZarmccQoWO1tGxQLAs": "Slayer Of Terramorphous Class Mod", "BwAAADI+S20A5dYAwKjOzogdgAM/jBueEYCr2ccQoWN1t6xQLAs": "Slayer Of Terramorphous", "BwAAADI+S20A5dYAwKjOTqMdgANlvhugEYyrWccQoWG1tGxQLAs": "Lucky Hoarder", "BwAAADI+S20A5dYAwKjOzoA8gAPF5RG0HA0KSscQoWO1tGxQLAs": "Legendary Hoarder", "BwAAADI+S20A5dYAwCjJDoI8gAP3KRG7HA0KascQoWO1tGxQLAs": "Legendary Gunzerker", "BwAAADI+S20AJSlfPXdifokcgMPMlQqOBQSK/RcqeJk": "Blood of the Ancients", "BwAAADI+S20A5dYAwOjJzqAdgAMAXhuVEYKr2ccQoWO1t6xQLAs": "Legendary Berserker Class Mod", "BwAAADI+S20A5dYAwOjOzqAdgANdoxuVEYKr2ccQoWO1t6xRLAs": "Legendary Berserker Class Mod", "BwAAADI+S20A5dYAwOjJzogdgAN14hu8EWyruccQoWO1tOxQLAs": "Slayer Of Terramorphous", "BwAAADI+S20A5dYAwKjOTosdgAOj/Ru9EXSruccQIWB1tmxQLAs": "Diehard Veteran Class Mod", "BwAAADI+S20A5dYAwOjJDog8gAP0NR22HAUK6sYQoWO1tGxQLAs": "Slayer of Terramorphous Class Mod", }, "borderlands the pre-sequel": { "igAAADJ+ogDSPtYAPme6Lrha4k4gJV28ni4JxtkBKztVdG/CaBC3": "E-GUN", "igAAADJIsoD8PtYAPmfiDRUd2ME0uay5nk6I/9gBK6dUPG1S7sRI": "Tannis' Laser of Enlightenment", "igAAADJIsoD7PtYAPmcCL3BZUkkgN2G8ni4LxtkBK6dULG1CbrBI": "Miss Moxxi's Vibra-Pulse", "igAAADLIr4D8PtYAwGh9LuhaQk4gIsK8nG4JxtcBK8dV/G7ya5BF": "The ZX-1", "igAAADIIsgAA5dYAPmfmDU0dKME0sGS5no6I/9gBK6dUJG3S7oRI": "MINAC's Atonement", "igAAADLIsgAA5dYAPmfmDU0dKME0nDO5no6I/9gBK6dUPG3S7sRI": "MINAC's Atonement", "igAAADJIsgAA5dYAPmfmDU0dKME07Ve5no6I/9gBK6dUPG3S7sRI": "MINAC's Atonement", "igAAADLIsgD7Pgq3P2eWLDBaQk8ggYK8nY4OxsUBK0NVNG3yYFBI": "Excalibastard", "igAAADIIsoD7PtYAPmfCDcUd+MA0q1K5nq6L/9gBK1dVNG0C72RI": "Thunderfire", "igAAADJIsoD8Pgu3P2e2LsBYEk4gHLa8nc4KxtUBKxNVPG2yaLBI": "Lensed Mining Laser", "igAAADKIsoD8Pgu3wZeYD+1YQksgXUy5ne6K/9QBK0NVJG0ybLBI": "Heated Subdivided Splitter", "igAAADKIsoD8Pgu3zZeYDxVYAksgRBy5ne6K/9QBK0NVLG0SbFBI": "Catalyzing Lancer Railgun", "igAAADIIsgD7Pgu35ZeYDz1fokggSRW5ne6K/9QBK0NVJG0ibHBI": "Niveous Zero Beam", "igAAADKWt7z+RgtH6Zf0DQUYaMo0lK25XAiN/9wBK8eUj+0FbB9K": "%Cu+ie^_^ki||er", "igAAADLWsDzyRgpHP2eeDf3YpcY0Et25WOiI/8QBK1OVl+1F6eRL": "Incorporated Cheat Code", "igAAADKWtzzyRg1HP2eeDS3YpcY0kga5WOiI/8QBK1OVj+1F6QRK": "Accessible Cheat Code", "igAAADKWsDzyRghHP2eeDY3YpcY0vJe5WOiI/8QBK1OVn+1F6aRL": "All-In Cheat Code", "igAAADKWsDz/RglH0ZfkDjXfBUsvXJu5XmiJ/9YBK7OUj+0VbP9L": "Deft Fox", "igAAADJTvrb0UiFvxZeQDh2flMstKU25nq+M/9ABK7s0t6wk7z1J": "Large Spread", "igAAADJTvrb0UitvwWitDz2e9MM0zHi+XxSL+9IBK2s1v6zU7ARJ": "Berrigan", "igAAADLTv7b0Ui9vyZeQDs2fRMEtgma5nq+M/9ABK7s0p6wU7x1J": "Rocket Speed Launcher", "igAAADLTvzbzUihvP2fOT8CYRMIhAVi8ng+KxtEBKxc0p6xU7fFK": "Sparkling Volt Thrower", "igAAADIPtCIA5Qs/P2fSz7AftsMjt7i8nWiKxMcBK6t0vi0W7VNI": "Tl'kope Razorback", "igAAADIZsaLxeg0/P2fmzyicw8Mja3u8mKiKxNMBK0N1/q3z7VNK": "The Machine", "igAAADIZtCLweg8/15dYDm0flscoKae5n6iO/9QBK2N11i2G7XhK": "Auditing Sniper Rifle", "CgAAADKHslTznCI5wCjTCaHagAGPLBu9DQRSO+cQoWZ3tKtRa/0": "Bonus Package", "CgAAADKHslTz3NaAONHRCZ4agQFt1Bu+DQZS2+cQ4Wb3tatRK/A": "Quasar", "CgAAADLHoFT53Dx5OFHXCZFagQExsRu8CABSG+cQIWY3vKtRa/0": "Explosive Kiss of Death", "CgAAADIJS20A5dYAwGjeh5HU1grlMBugC5qCGecQ4WF8sKVTJcQ": "Majestic Incendiary Spike Shield", "CgAAADIjS20A5dYAwKjI7X6jgSDCqB6+HQESyucQIWF8sKVTJfU": "Shield of Ages", "CgAAADIjS20A5dYAwKjJ7X7jgiA5wx6/HQMSiucQIW58s6VTZfY": "Naught", "CgAAADIJS20A5dYAwOjBx36j0ArCAxupC1qCOecQoWH8sCVSZdo": "Asteroid Belt", "CgAAADIJS20A5dYAwOjBx37j0ArAkhuqC2SCmeYQYWB8seVRJdo": "Miss Moxxi's Slammer", "CgAAADIJS20A5dYAwCjtx37jsQqVaBuvC16C2ecQoWE8sKVRpdY": "Prismatic Bulwark", "CgAAADIjS20A5dYAwGjJ7cNUgyDtQx64HQ0SiuYQIWF8kyVSZfc": "Hippocratic M0RQ", "CgAAADIJS20A5dYAwGjix34jvQpY5RuyC1SCGecQIWB8sSVR5dE": "Avalanche", "CgAAADIJS20A5dYAwOjtx36jsQrTGhutC1KCuecQIWF8sKV25dY": "Kala", "CgAAADIJS20A5dYAwKjtx35jsQokxBuuC1yCuecQoWH8saVRJdU": "The Sham", "CgAAADIJS20A5dYAwKjqx34jxAr3+Bu3C06CGecQIWF8sCVSZdA": "Supernova", "CgAAADIjS20A5dYAwOjI7X4jgSDAXB69HQcSyucQoW58sCVSJfQ": "Rerouter", "CgAAADI+S20ApSB4OlA1MJNbkwVxmhunClJy2+cQ4WezStKvEgs": "Voltaic Support Relay", "CgAAADI+S20A5dYAwCjYD5VdgALTFhuoEziKuecQYXW0pG1ALQs": "Celestial Gladiator Title.Title_ClassMod", "CgAAADI+S20A5dYAwOjYj5BdgALHthutEz6KmeYQoWu0pG1ALQs": "Eternal Protector Title.Title_ClassMod", "CgAAADI+S20A5dYAwOjYT5VdgAJr0xupEzqK2ecQYXU0pC1fLQs": "Eridian Vanquisher Title.Title_ClassMod", "CgAAADI+S20A5dYAwOjZD4F8gALFKhm9HT8q6ucQYXV0pG1ALQs": "Chronicler Of Elpis Title.Title_ClassMod", "CgAAADI+S20A5dYAwCjYj4NegAJuiBu8DCqKueYQYW20pG1fLQs": "Stampeding Brotrap Title.Title_ClassMod", "CgAAADI+S20A5dYAwKjYz4NegALL8Ru8DDSKueYQ4Wt0pG1fLQs": "Inspirational Brotrap Title.Title_ClassMod", "CgAAADI+S20A5dYAwKjZT4degAIo+hu5DCqKWecQoW10pC1ALQs": "Loot Piñata Title.Title_ClassMod", "CgAAADI+S20A5dYAwOjZD4F8gALIaBm5HTsq6ucQYXX0pa1fLQs": "Chronicler Of Elpis Title.Title_ClassMod", "CgAAADI+S20A5dYAwOjZT4p8gAIwQh2yHAMCCucQIWp0pK1fLQs": "High-and-Mighty Gentry Title.Title_ClassMod", "CgAAADI+S20A5dYAwKjYD4Z8gALIJB2wHA0C6ucQYWq0pG1ALQs": "Posh Blue Blood Title.Title_ClassMod", "CgAAADI+S20A5dYAwKjYD4B8gAK3FB2+HA8CyucQYXW0pG1ALQs": "Celestial Baroness Class Mod", "CgAAADI+S20A5dYAwKjYT4tegALZ1R29HAkCyucQYXW0pG1ALQs": "Eridian Vanquisher Class Mod", "igAAADLIsoD8PtYAwGgdLRBYQksgRv28ne4JxtUBK5dULG0ybNBI": "Catalyzing Subdivided Splitter", "igAAADKIsoD8Pgu3wZeYD+1YQksgXUy5ne6K/9QBK0NVJG0ybLBI": "Heated Subdivided Splitter", "igAAADLTvzbzUihvP2fOT8CYRMIhAVi8ng+KxtEBKxc0p6xU7fFK": "Sparkling Volt Thrower", "igAAADISS+3rVgZnP2fuDa0e6MY0XmG53Q+J/9wBKy8U5+8U6YRI": "Party Popper", }, } ASSET_PATH = "../GibbedBL2/Gibbed.Borderlands%s/projects/Gibbed.Borderlands%s.GameInfo/Resources/%s.json" def get_asset(fn, cache={}): if fn not in cache: if GAME == "borderlands 2": path = ASSET_PATH % ("2", "2", fn) else: path = ASSET_PATH % ("Oz", "Oz", fn) with open(path, "rb") as f: cache[fn] = json.load(f) return cache[fn] def get_asset_library_manager(): config = get_asset("Asset Library Manager") if "_sets_by_id" not in config: config["_sets_by_id"] = {set["id"]: set for set in config["sets"]} if "_find_asset" not in config: cfg = config["_find_asset"] = collections.defaultdict(dict) for set in config["sets"]: for field, libinfo in set["libraries"].items(): for sublib, info in enumerate(libinfo["sublibraries"]): for asset, name in enumerate(info["assets"]): if args.verify and name in cfg[field]: print("Got duplicate", field, name, cfg[field][name], (set["id"], sublib, asset)) cfg[field][name] = set["id"], sublib, asset, info["package"] return config def get_balance_info(is_weapon, balance): cls = "Weapon" if is_weapon else "Item" allbal = get_asset(cls + " Balance") if balance not in allbal: config = get_asset_library_manager() setid, sublib, asset, cat = config["_find_asset"]["BalanceDefs"][balance] balance = cat + "." + balance info = allbal[balance] base = get_balance_info(is_weapon, info["base"]) if "base" in info else {"parts": { }} ret = {k:v for k,v in base.items() if k != "parts"} for k,v in info.items(): if k == "parts": mode = v.get("mode") if mode == "Complete": ret["parts"] = { } else: ret["parts"] = {k:v for k,v in base["parts"].items() if k != "mode"} for part, opts in v.items(): if part == "mode": continue if mode == "Additive" and isinstance(ret["parts"].get(part), list) and isinstance(opts, list): ret["parts"][part] = opts + ret["parts"][part] else: ret["parts"][part] = opts elif k != "base": ret[k] = v return ret class ConsumableLE(Consumable): def get(self, num): return super().get(num)[::-1] @classmethod def from_bits(cls, data): return cls(''.join(format(x, "08b")[::-1] for x in data)) def bogocrypt(seed, data, direction="decrypt"): if not seed: return data split = (seed % 32) % len(data) if direction == "encrypt": data = data[split:] + data[:split] if seed > 1<<31: seed |= 31<<32 xor = seed >> 5 data = list(data) for i, x in enumerate(data): xor = (xor * 0x10A860C1) % 0xFFFFFFFB data[i] = x ^ (xor & 255) data = bytes(data) if direction == "encrypt": return data return data[-split:] + data[:-split] @dataclass class Asset: seed: None is_weapon: None type: "*Types" balance: "BalanceDefs" brand: "Manufacturers" grade: int stage: int pieces: ["*Parts"] * 8 material: "*Parts" pfx: "*Parts" title: "*Parts" @property def partnames(self): return partnames(self.is_weapon) @classmethod def decode_asset_library(cls, data): orig = data seed = int.from_bytes(data[1:5], "big") dec = data[:5] + bogocrypt(seed, data[5:], "decrypt") if args.verify: reconstructed = dec[:5] + bogocrypt(seed, dec[5:], "encrypt") if data != reconstructed: print("Imperfect reconstruction of weapon/item:") print(data) print(reconstructed) raise AssertionError data = dec + b"\xFF" * (40 - len(dec)) crc16 = int.from_bytes(data[5:7], "big") data = data[:5] + b"\xFF\xFF" + data[7:] crc = binascii.crc32(data) crc = (crc >> 16) ^ (crc & 65535) if crc != crc16: raise ValueError("Checksum mismatch") config = get_asset_library_manager() is_weapon = data[0] >= 128 weap_item = "Weapon" if is_weapon else "Item" if (data[0] & 127) != config["version"]: raise ValueError("Version number mismatch") uid = int.from_bytes(data[1:5], "little") if not uid: return None setid = data[7] bits = ConsumableLE.from_bits(data[8:]) def _decode(field): cfg = config["configs"][field] asset = bits.get(cfg["asset_bits"]) sublib = bits.get(cfg["sublibrary_bits"] - 1) useset = bits.get(1) if "0" not in (useset+sublib+asset): return None cfg = config["_sets_by_id"][setid if useset == "1" else 0]["libraries"][field] return cfg["sublibraries"][int(sublib,2)]["assets"][int(asset,2)] ret = {"seed": seed, "is_weapon": is_weapon} for field, typ in cls.__dataclass_fields__.items(): typ = typ.type if typ is None: continue if typ is int: ret[field] = int(bits.get(7), 2) elif isinstance(typ, str): ret[field] = _decode(typ.replace("*", weap_item)) elif isinstance(typ, list): ret[field] = [_decode(t.replace("*", weap_item)) for t in typ] else: raise AssertionError("Bad annotation %r" % typ) ret = cls(**ret) if args.verify: if ret.encode_asset_library() != orig: raise AssertionError("Weapon reconstruction does not match original: %r" % ret) return ret def encode_asset_library(self): bits = [] config = get_asset_library_manager() fields = [] needsets = {0} def _encode(field, item): cfg = config["configs"][field] fields.append("%s-%d-%d" % (field, cfg["asset_bits"], cfg["sublibrary_bits"])) if item is None: bits.append("1" * (cfg["asset_bits"] + cfg["sublibrary_bits"])) return setid, sublib, asset, cat = config["_find_asset"][field][item] needsets.add(setid) bits.append(format(asset, "0%db" % cfg["asset_bits"])[::-1]) bits.append(format(sublib, "0%db" % (cfg["sublibrary_bits"]-1))[::-1]) bits.append("1" if setid else "0") weap_item = "Weapon" if self.is_weapon else "Item" for field, typ in self.__dataclass_fields__.items(): typ = typ.type if typ is None: continue if typ is int: bits.append(format(getattr(self, field), "07b")[::-1]) elif isinstance(typ, str): _encode(typ.replace("*", weap_item), getattr(self, field)) elif isinstance(typ, list): for t, piece in zip(typ, getattr(self, field)): _encode(t.replace("*", weap_item), piece) if len(needsets) > 2: print("Need multiple set IDs! Cannot encode.", needsets) bits = "".join(bits) bits += "1" * (8 - (len(bits) % 8)) data = int(bits[::-1], 2).to_bytes(len(bits)//8, "little") data = ( bytes([config["version"] | (128 if self.is_weapon else 0)]) + self.seed.to_bytes(4, "big") + b"\xFF\xFF" + bytes([max(needsets)]) + data ) data = data + b"\xFF" * (40 - len(data)) crc = binascii.crc32(data) crc = (crc >> 16) ^ (crc & 65535) return data[:5] + bogocrypt(self.seed, (crc.to_bytes(2, "big") + data[7:]).rstrip(b"\xFF"), "encrypt") def get_title(self): if self.type == "ItemDefs.ID_Ep4_FireHawkMessage": return "FireHawkMessage" weap_item = "Weapon" if self.is_weapon else "Item" config = get_asset_library_manager() setid, sublib, asset, cat = config["_find_asset"][weap_item + "Types"][self.type] typeinfo = get_asset(weap_item + " Types")[cat + "." + self.type] if typeinfo.get("has_full_name"): return typeinfo["name"] names = get_asset(weap_item + " Name Parts") pfxinfo = None if self.pfx: setid, sublib, asset, cat = config["_find_asset"][weap_item + "Parts"][self.pfx] pfxinfo = names.get(cat + "." + self.pfx) if self.title: setid, sublib, asset, cat = config["_find_asset"][weap_item + "Parts"][self.title] titinfo = names.get(cat + "." + self.title) title = titinfo["name"] if titinfo else self.title else: title = "<no title>" if pfxinfo and "name" in pfxinfo: title = pfxinfo["name"] + " " + title return title def __repr__(self): if self.grade == self.stage: lvl = "Lvl %d" % self.grade else: lvl = "Level %d/%d" % (self.grade, self.stage) type = self.type.split(".", 1)[1].replace("WT_", "").replace("WeaponType_", "").replace("_", " ") ret = "%s %s (%s)" % (lvl, self.get_title(), type) if args.itemids: ret += " {%s}" % armor_serial(self.encode_asset_library()) if args.pieces: ret += "\n" + " + ".join(filter(None, self.pieces)) if args.raw: ret += "\n" + ", ".join("%s=%r" % (f, getattr(self, f)) for f in self.__dataclass_fields__) if args.library and "{}" in args.library: args.library += ",{%s}" % armor_serial(self.encode_asset_library()) return ret
MIT License
radlab/sparrow
deploy/third_party/boto-2.1.1/boto/rds/__init__.py
RDSConnection.get_all_events
python
def get_all_events(self, source_identifier=None, source_type=None, start_time=None, end_time=None, max_records=None, marker=None): params = {} if source_identifier and source_type: params['SourceIdentifier'] = source_identifier params['SourceType'] = source_type if start_time: params['StartTime'] = start_time.isoformat() if end_time: params['EndTime'] = end_time.isoformat() if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeEvents', params, [('Event', Event)])
Get information about events related to your DBInstances, DBSecurityGroups and DBParameterGroups. :type source_identifier: str :param source_identifier: If supplied, the events returned will be limited to those that apply to the identified source. The value of this parameter depends on the value of source_type. If neither parameter is specified, all events in the time span will be returned. :type source_type: str :param source_type: Specifies how the source_identifier should be interpreted. Valid values are: b-instance | db-security-group | db-parameter-group | db-snapshot :type start_time: datetime :param start_time: The beginning of the time interval for events. If not supplied, all available events will be returned. :type end_time: datetime :param end_time: The ending of the time interval for events. If not supplied, all available events will be returned. :type max_records: int :param max_records: The maximum number of records to be returned. If more results are available, a MoreToken will be returned in the response that can be used to retrieve additional records. Default is 100. :type marker: str :param marker: The marker provided by a previous request. :rtype: list :return: A list of class:`boto.rds.event.Event`
https://github.com/radlab/sparrow/blob/afb8efadeb88524f1394d1abe4ea66c6fd2ac744/deploy/third_party/boto-2.1.1/boto/rds/__init__.py#L941-L996
import boto.utils import urllib from boto.connection import AWSQueryConnection from boto.rds.dbinstance import DBInstance from boto.rds.dbsecuritygroup import DBSecurityGroup from boto.rds.parametergroup import ParameterGroup from boto.rds.dbsnapshot import DBSnapshot from boto.rds.event import Event from boto.rds.regioninfo import RDSRegionInfo def regions(): return [RDSRegionInfo(name='us-east-1', endpoint='rds.us-east-1.amazonaws.com'), RDSRegionInfo(name='eu-west-1', endpoint='rds.eu-west-1.amazonaws.com'), RDSRegionInfo(name='us-west-1', endpoint='rds.us-west-1.amazonaws.com'), RDSRegionInfo(name='ap-northeast-1', endpoint='rds.ap-northeast-1.amazonaws.com'), RDSRegionInfo(name='ap-southeast-1', endpoint='rds.ap-southeast-1.amazonaws.com') ] def connect_to_region(region_name, **kw_params): for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None class RDSConnection(AWSQueryConnection): DefaultRegionName = 'us-east-1' DefaultRegionEndpoint = 'rds.amazonaws.com' APIVersion = '2011-04-01' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/'): if not region: region = RDSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path) def _required_auth_capability(self): return ['rds'] def get_all_dbinstances(self, instance_id=None, max_records=None, marker=None): params = {} if instance_id: params['DBInstanceIdentifier'] = instance_id if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeDBInstances', params, [('DBInstance', DBInstance)]) def create_dbinstance(self, id, allocated_storage, instance_class, master_username, master_password, port=3306, engine='MySQL5.1', db_name=None, param_group=None, security_groups=None, availability_zone=None, preferred_maintenance_window=None, backup_retention_period=None, preferred_backup_window=None, multi_az=False, engine_version=None, auto_minor_version_upgrade=True): params = {'DBInstanceIdentifier' : id, 'AllocatedStorage' : allocated_storage, 'DBInstanceClass' : instance_class, 'Engine' : engine, 'MasterUsername' : master_username, 'MasterUserPassword' : master_password} if port: params['Port'] = port if db_name: params['DBName'] = db_name if param_group: params['DBParameterGroupName'] = param_group if security_groups: l = [] for group in security_groups: if isinstance(group, DBSecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'DBSecurityGroups.member') if availability_zone: params['AvailabilityZone'] = availability_zone if preferred_maintenance_window: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window: params['PreferredBackupWindow'] = preferred_backup_window if multi_az: params['MultiAZ'] = 'true' if engine_version: params['EngineVersion'] = engine_version if auto_minor_version_upgrade is False: params['AutoMinorVersionUpgrade'] = 'false' return self.get_object('CreateDBInstance', params, DBInstance) def create_dbinstance_read_replica(self, id, source_id, instance_class=None, port=3306, availability_zone=None, auto_minor_version_upgrade=None): params = {'DBInstanceIdentifier' : id, 'SourceDBInstanceIdentifier' : source_id} if instance_class: params['DBInstanceClass'] = instance_class if port: params['Port'] = port if availability_zone: params['AvailabilityZone'] = availability_zone if auto_minor_version_upgrade is not None: if auto_minor_version_upgrade is True: params['AutoMinorVersionUpgrade'] = 'true' else: params['AutoMinorVersionUpgrade'] = 'false' return self.get_object('CreateDBInstanceReadReplica', params, DBInstance) def modify_dbinstance(self, id, param_group=None, security_groups=None, preferred_maintenance_window=None, master_password=None, allocated_storage=None, instance_class=None, backup_retention_period=None, preferred_backup_window=None, multi_az=False, apply_immediately=False): params = {'DBInstanceIdentifier' : id} if param_group: params['DBParameterGroupName'] = param_group if security_groups: l = [] for group in security_groups: if isinstance(group, DBSecurityGroup): l.append(group.name) else: l.append(group) self.build_list_params(params, l, 'DBSecurityGroups.member') if preferred_maintenance_window: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if master_password: params['MasterUserPassword'] = master_password if allocated_storage: params['AllocatedStorage'] = allocated_storage if instance_class: params['DBInstanceClass'] = instance_class if backup_retention_period is not None: params['BackupRetentionPeriod'] = backup_retention_period if preferred_backup_window: params['PreferredBackupWindow'] = preferred_backup_window if multi_az: params['MultiAZ'] = 'true' if apply_immediately: params['ApplyImmediately'] = 'true' return self.get_object('ModifyDBInstance', params, DBInstance) def delete_dbinstance(self, id, skip_final_snapshot=False, final_snapshot_id=''): params = {'DBInstanceIdentifier' : id} if skip_final_snapshot: params['SkipFinalSnapshot'] = 'true' else: params['SkipFinalSnapshot'] = 'false' params['FinalDBSnapshotIdentifier'] = final_snapshot_id return self.get_object('DeleteDBInstance', params, DBInstance) def reboot_dbinstance(self, id): params = {'DBInstanceIdentifier' : id} return self.get_object('RebootDBInstance', params, DBInstance) def get_all_dbparameter_groups(self, groupname=None, max_records=None, marker=None): params = {} if groupname: params['DBParameterGroupName'] = groupname if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeDBParameterGroups', params, [('DBParameterGroup', ParameterGroup)]) def get_all_dbparameters(self, groupname, source=None, max_records=None, marker=None): params = {'DBParameterGroupName' : groupname} if source: params['Source'] = source if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker pg = self.get_object('DescribeDBParameters', params, ParameterGroup) pg.name = groupname return pg def create_parameter_group(self, name, engine='MySQL5.1', description=''): params = {'DBParameterGroupName': name, 'Engine': engine, 'Description' : description} return self.get_object('CreateDBParameterGroup', params, ParameterGroup) def modify_parameter_group(self, name, parameters=None): params = {'DBParameterGroupName': name} for i in range(0, len(parameters)): parameter = parameters[i] parameter.merge(params, i+1) return self.get_list('ModifyDBParameterGroup', params, ParameterGroup, verb='POST') def reset_parameter_group(self, name, reset_all_params=False, parameters=None): params = {'DBParameterGroupName':name} if reset_all_params: params['ResetAllParameters'] = 'true' else: params['ResetAllParameters'] = 'false' for i in range(0, len(parameters)): parameter = parameters[i] parameter.merge(params, i+1) return self.get_status('ResetDBParameterGroup', params) def delete_parameter_group(self, name): params = {'DBParameterGroupName':name} return self.get_status('DeleteDBParameterGroup', params) def get_all_dbsecurity_groups(self, groupname=None, max_records=None, marker=None): params = {} if groupname: params['DBSecurityGroupName'] = groupname if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeDBSecurityGroups', params, [('DBSecurityGroup', DBSecurityGroup)]) def create_dbsecurity_group(self, name, description=None): params = {'DBSecurityGroupName':name} if description: params['DBSecurityGroupDescription'] = description group = self.get_object('CreateDBSecurityGroup', params, DBSecurityGroup) group.name = name group.description = description return group def delete_dbsecurity_group(self, name): params = {'DBSecurityGroupName':name} return self.get_status('DeleteDBSecurityGroup', params) def authorize_dbsecurity_group(self, group_name, cidr_ip=None, ec2_security_group_name=None, ec2_security_group_owner_id=None): params = {'DBSecurityGroupName':group_name} if ec2_security_group_name: params['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_owner_id: params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id if cidr_ip: params['CIDRIP'] = urllib.quote(cidr_ip) return self.get_object('AuthorizeDBSecurityGroupIngress', params, DBSecurityGroup) def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None, ec2_security_group_owner_id=None, cidr_ip=None): params = {'DBSecurityGroupName':group_name} if ec2_security_group_name: params['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_owner_id: params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id if cidr_ip: params['CIDRIP'] = cidr_ip return self.get_object('RevokeDBSecurityGroupIngress', params, DBSecurityGroup) revoke_security_group = revoke_dbsecurity_group def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None, max_records=None, marker=None): params = {} if snapshot_id: params['DBSnapshotIdentifier'] = snapshot_id if instance_id: params['DBInstanceIdentifier'] = instance_id if max_records: params['MaxRecords'] = max_records if marker: params['Marker'] = marker return self.get_list('DescribeDBSnapshots', params, [('DBSnapshot', DBSnapshot)]) def create_dbsnapshot(self, snapshot_id, dbinstance_id): params = {'DBSnapshotIdentifier' : snapshot_id, 'DBInstanceIdentifier' : dbinstance_id} return self.get_object('CreateDBSnapshot', params, DBSnapshot) def delete_dbsnapshot(self, identifier): params = {'DBSnapshotIdentifier' : identifier} return self.get_object('DeleteDBSnapshot', params, DBSnapshot) def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id, instance_class, port=None, availability_zone=None): params = {'DBSnapshotIdentifier' : identifier, 'DBInstanceIdentifier' : instance_id, 'DBInstanceClass' : instance_class} if port: params['Port'] = port if availability_zone: params['AvailabilityZone'] = availability_zone return self.get_object('RestoreDBInstanceFromDBSnapshot', params, DBInstance) def restore_dbinstance_from_point_in_time(self, source_instance_id, target_instance_id, use_latest=False, restore_time=None, dbinstance_class=None, port=None, availability_zone=None): params = {'SourceDBInstanceIdentifier' : source_instance_id, 'TargetDBInstanceIdentifier' : target_instance_id} if use_latest: params['UseLatestRestorableTime'] = 'true' elif restore_time: params['RestoreTime'] = restore_time.isoformat() if dbinstance_class: params['DBInstanceClass'] = dbinstance_class if port: params['Port'] = port if availability_zone: params['AvailabilityZone'] = availability_zone return self.get_object('RestoreDBInstanceToPointInTime', params, DBInstance)
Apache License 2.0
rfyiamcool/elasticsearch_parse
elasticsearch_parse/search.py
Search.params
python
def params(self, **kwargs): s = self._clone() s._params.update(kwargs) return s
Specify query params to be used when executing the search. All the keyword arguments will override the current values. See http://elasticsearch-py.readthedocs.org/en/master/api.html#elasticsearch.Elasticsearch.search for all availible parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local')
https://github.com/rfyiamcool/elasticsearch_parse/blob/831421fc1552f03962ca03236b9f53a4041d166c/elasticsearch_parse/search.py#L277-L291
from six import iteritems, string_types from elasticsearch.helpers import scan from .query import Q, EMPTY_QUERY, Filtered from .filter import F, EMPTY_FILTER from .aggs import A, AggBase from .utils import DslBase class BaseProxy(object): def __init__(self, search, attr_name): self._search = search self._proxied = self._empty self._attr_name = attr_name def __nonzero__(self): return self._proxied != self._empty __bool__ = __nonzero__ def __call__(self, *args, **kwargs): s = self._search._clone() getattr(s, self._attr_name)._proxied += self._shortcut(*args, **kwargs) return s def __getattr__(self, attr_name): return getattr(self._proxied, attr_name) def __setattr__(self, attr_name, value): if not attr_name.startswith('_'): self._proxied = self._shortcut(self._proxied.to_dict()) setattr(self._proxied, attr_name, value) super(BaseProxy, self).__setattr__(attr_name, value) class ProxyDescriptor(object): def __init__(self, name): self._attr_name = '_%s_proxy' % name def __get__(self, instance, owner): return getattr(instance, self._attr_name) def __set__(self, instance, value): proxy = getattr(instance, self._attr_name) proxy._proxied = proxy._shortcut(value) class ProxyQuery(BaseProxy): _empty = EMPTY_QUERY _shortcut = staticmethod(Q) class ProxyFilter(BaseProxy): _empty = EMPTY_FILTER _shortcut = staticmethod(F) class AggsProxy(AggBase, DslBase): name = 'aggs' def __init__(self, search): self._base = self._search = search self._params = {'aggs': {}} def to_dict(self): return super(AggsProxy, self).to_dict().get('aggs', {}) class Search(object): query = ProxyDescriptor('query') filter = ProxyDescriptor('filter') post_filter = ProxyDescriptor('post_filter') def __init__(self, using='default', index=None, doc_type=None, extra=None): self._using = using self._index = None if isinstance(index, (tuple, list)): self._index = list(index) elif index: self._index = [index] self._doc_type = [] self._doc_type_map = {} if isinstance(doc_type, (tuple, list)): for dt in doc_type: self._add_doc_type(dt) elif isinstance(doc_type, dict): self._doc_type.extend(doc_type.keys()) self._doc_type_map.update(doc_type) elif doc_type: self._add_doc_type(doc_type) self.aggs = AggsProxy(self) self._sort = [] self._extra = extra or {} self._params = {} self._fields = None self._partial_fields = {} self._highlight = {} self._highlight_opts = {} self._suggest = {} self._script_fields = {} self._query_proxy = ProxyQuery(self, 'query') self._filter_proxy = ProxyFilter(self, 'filter') self._post_filter_proxy = ProxyFilter(self, 'post_filter') def __getitem__(self, n): s = self._clone() if isinstance(n, slice): if n.start and n.start < 0 or n.stop and n.stop < 0: raise ValueError("Search does not support negative slicing.") s._extra['from'] = n.start or 0 s._extra['size'] = n.stop - (n.start or 0) if n.stop is not None else 10 return s else: if n < 0: raise ValueError("Search does not support negative indexing.") s._extra['from'] = n s._extra['size'] = 1 return s @classmethod def from_dict(cls, d): s = cls() s.update_from_dict(d) return s def _clone(self): s = self.__class__(using=self._using, index=self._index, doc_type=self._doc_type) s._doc_type_map = self._doc_type_map.copy() s._sort = self._sort[:] s._fields = self._fields[:] if self._fields is not None else None s._partial_fields = self._partial_fields.copy() s._extra = self._extra.copy() s._highlight = self._highlight.copy() s._highlight_opts = self._highlight_opts.copy() s._suggest = self._suggest.copy() s._script_fields = self._script_fields.copy() for x in ('query', 'filter', 'post_filter'): getattr(s, x)._proxied = getattr(self, x)._proxied if self.aggs._params.get('aggs'): s.aggs._params = {'aggs': self.aggs._params['aggs'].copy()} s._params = self._params.copy() return s def update_from_dict(self, d): d = d.copy() if 'query' in d: self.query._proxied = Q(d.pop('query')) if 'post_filter' in d: self.post_filter._proxied = F(d.pop('post_filter')) if isinstance(self.query._proxied, Filtered): self.filter._proxied = self.query._proxied.filter self.query._proxied = self.query._proxied.query aggs = d.pop('aggs', d.pop('aggregations', {})) if aggs: self.aggs._params = { 'aggs': dict( (name, A(value)) for (name, value) in iteritems(aggs)) } if 'sort' in d: self._sort = d.pop('sort') if 'fields' in d: self._fields = d.pop('fields') if 'partial_fields' in d: self._partial_fields = d.pop('partial_fields') if 'highlight' in d: high = d.pop('highlight').copy() self._highlight = high.pop('fields') self._highlight_opts = high if 'suggest' in d: self._suggest = d.pop('suggest') if 'text' in self._suggest: text = self._suggest.pop('text') for s in self._suggest.values(): s.setdefault('text', text) if 'script_fields' in d: self._script_fields = d.pop('script_fields') self._extra = d def script_fields(self, **kwargs): s = self._clone() for name in kwargs: if isinstance(kwargs[name], string_types): kwargs[name] = {'script': kwargs[name]} s._script_fields.update(kwargs) return s
MIT License
azure/autorest.az
test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/vendored_sdks/users/aio/operations/_users_user_operations.py
UsersUserOperations.delete_user
python
async def delete_user( self, user_id: str, if_match: Optional[str] = None, **kwargs ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) accept = "application/json" url = self.delete_user.metadata['url'] path_format_arguments = { 'user-id': self._serialize.url("user_id", user_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} header_parameters = {} if if_match is not None: header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
Delete entity from users. Delete entity from users. :param user_id: key: id of user. :type user_id: str :param if_match: ETag. :type if_match: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
https://github.com/azure/autorest.az/blob/b000db70f608c64918d04a0e0f5b50bb5468baa0/test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/vendored_sdks/users/aio/operations/_users_user_operations.py#L310-L362
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class UsersUserOperations: models = models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_user( self, orderby: Optional[List[Union[str, "models.Get5ItemsItem"]]] = None, select: Optional[List[Union[str, "models.Get6ItemsItem"]]] = None, expand: Optional[List[Union[str, "models.Get7ItemsItem"]]] = None, **kwargs ) -> AsyncIterable["models.CollectionOfUser"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) accept = "application/json" def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: url = self.list_user.metadata['url'] query_parameters = {} if self._config.top is not None: query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0) if self._config.skip is not None: query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0) if self._config.search is not None: query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str') if self._config.filter is not None: query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str') if self._config.count is not None: query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool') if orderby is not None: query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',') if select is not None: query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('CollectionOfUser', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.odata_next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(models.OdataError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_user.metadata = {'url': '/users'} async def create_user( self, body: "models.MicrosoftGraphUser", **kwargs ) -> "models.MicrosoftGraphUser": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self.create_user.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(body, 'MicrosoftGraphUser') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MicrosoftGraphUser', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_user.metadata = {'url': '/users'} async def get_user( self, user_id: str, select: Optional[List[Union[str, "models.Get1ItemsItem"]]] = None, expand: Optional[List[Union[str, "models.Get2ItemsItem"]]] = None, **kwargs ) -> "models.MicrosoftGraphUser": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) accept = "application/json" url = self.get_user.metadata['url'] path_format_arguments = { 'user-id': self._serialize.url("user_id", user_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} if select is not None: query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',') header_parameters = {} header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MicrosoftGraphUser', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_user.metadata = {'url': '/users/{user-id}'} async def update_user( self, user_id: str, body: "models.MicrosoftGraphUser", **kwargs ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self.update_user.metadata['url'] path_format_arguments = { 'user-id': self._serialize.url("user_id", user_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(body, 'MicrosoftGraphUser') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) update_user.metadata = {'url': '/users/{user-id}'}
MIT License
jingw/pyhdfs
pyhdfs/__init__.py
HdfsClient.set_replication
python
def set_replication(self, path: str, **kwargs: _PossibleArgumentTypes) -> bool: response = _json(self._put(path, "SETREPLICATION", **kwargs))["boolean"] assert isinstance(response, bool), type(response) return response
Set replication for an existing file. :param replication: new replication :type replication: short :returns: true if successful; false if file does not exist or is a directory :rtype: bool
https://github.com/jingw/pyhdfs/blob/77be5f10dd52ad08bbb1829b65dc05ddf87beefa/pyhdfs/__init__.py#L723-L733
import base64 import binascii import getpass import logging import os import posixpath import random import re import shutil import time import warnings from http import HTTPStatus from typing import Any from typing import Callable from typing import Dict from typing import IO from typing import Iterable from typing import Iterator from typing import List from typing import Optional from typing import Tuple from typing import Type from typing import Union from typing import cast from urllib.parse import quote as url_quote import requests.api import requests.exceptions import simplejson import simplejson.scanner DEFAULT_PORT = 50070 WEBHDFS_PATH = "/webhdfs/v1" __version__ = "0.3.1" _logger = logging.getLogger(__name__) _PossibleArgumentTypes = Union[str, int, None, List[str]] class HdfsException(Exception): class HdfsNoServerException(HdfsException): class HdfsHttpException(HdfsException): _expected_status_code: Optional[int] = None def __init__( self, message: str, exception: str, status_code: int, **kwargs: object ) -> None: assert ( self._expected_status_code is None or self._expected_status_code == status_code ), "Expected status {} for {}, got {}".format( self._expected_status_code, exception, status_code ) super().__init__(message) self.message = message self.exception = exception self.status_code = status_code self.__dict__.update(kwargs) class HdfsIllegalArgumentException(HdfsHttpException): _expected_status_code = 400 class HdfsHadoopIllegalArgumentException(HdfsIllegalArgumentException): pass class HdfsInvalidPathException(HdfsHadoopIllegalArgumentException): pass class HdfsUnsupportedOperationException(HdfsHttpException): _expected_status_code = 400 class HdfsSecurityException(HdfsHttpException): _expected_status_code = 401 class HdfsIOException(HdfsHttpException): _expected_status_code = 403 class HdfsQuotaExceededException(HdfsIOException): pass class HdfsNSQuotaExceededException(HdfsQuotaExceededException): pass class HdfsDSQuotaExceededException(HdfsQuotaExceededException): pass class HdfsAccessControlException(HdfsIOException): pass class HdfsFileAlreadyExistsException(HdfsIOException): pass class HdfsPathIsNotEmptyDirectoryException(HdfsIOException): pass class HdfsRemoteException(HdfsIOException): pass class HdfsRetriableException(HdfsIOException): pass class HdfsStandbyException(HdfsIOException): pass class HdfsSnapshotException(HdfsIOException): pass class HdfsFileNotFoundException(HdfsIOException): _expected_status_code = 404 class HdfsRuntimeException(HdfsHttpException): _expected_status_code = 500 _EXCEPTION_CLASSES: Dict[str, Type[HdfsHttpException]] = { name: member for name, member in globals().items() if isinstance(member, type) and issubclass(member, HdfsHttpException) } class _BoilerplateClass(Dict[str, object]): def __init__(self, **kwargs: object) -> None: super().__init__(**kwargs) self.__dict__ = self def __repr__(self) -> str: kvs = ["{}={!r}".format(k, v) for k, v in self.items()] return "{}({})".format(self.__class__.__name__, ", ".join(kvs)) def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) and dict.__eq__(self, other) def __ne__(self, other: object) -> bool: return not self.__eq__(other) class TypeQuota(_BoilerplateClass): consumed: int quota: int class ContentSummary(_BoilerplateClass): directoryCount: int fileCount: int length: int quota: int spaceConsumed: int spaceQuota: int typeQuota: Dict[str, TypeQuota] class FileChecksum(_BoilerplateClass): algorithm: str bytes: str length: int class FileStatus(_BoilerplateClass): accessTime: int blockSize: int group: str length: int modificationTime: int owner: str pathSuffix: str permission: str replication: int symlink: Optional[str] type: str childrenNum: int class HdfsClient(object): def __init__( self, hosts: Union[str, Iterable[str]] = "localhost", randomize_hosts: bool = True, user_name: Optional[str] = None, timeout: float = 20, max_tries: int = 2, retry_delay: float = 5, requests_session: Optional[requests.Session] = None, requests_kwargs: Optional[Dict[str, Any]] = None, ) -> None: if max_tries < 1: raise ValueError("Invalid max_tries: {}".format(max_tries)) if retry_delay < 0: raise ValueError("Invalid retry_delay: {}".format(retry_delay)) self.randomize_hosts = randomize_hosts self.hosts = self._parse_hosts(hosts) if not self.hosts: raise ValueError("No hosts given") if randomize_hosts: self.hosts = list(self.hosts) random.shuffle(self.hosts) self.timeout = timeout self.max_tries = max_tries self.retry_delay = retry_delay self.user_name = user_name or os.environ.get( "HADOOP_USER_NAME", getpass.getuser() ) self._last_time_recorded_active: Optional[float] = None self._requests_session = requests_session or cast( requests.Session, requests.api ) self._requests_kwargs = requests_kwargs or {} for k in ("method", "url", "data", "timeout", "stream", "params"): if k in self._requests_kwargs: raise ValueError("Cannot override requests argument {}".format(k)) def _parse_hosts(self, hosts: Union[str, Iterable[str]]) -> List[str]: host_list = re.split(r",|;", hosts) if isinstance(hosts, str) else list(hosts) for i, host in enumerate(host_list): if ":" not in host: host_list[i] = "{:s}:{:d}".format(host, DEFAULT_PORT) if self.randomize_hosts: random.shuffle(host_list) return host_list def _record_last_active(self, host: str) -> None: assert host in self.hosts self.hosts = [host] + [h for h in self.hosts if h != host] self._last_time_recorded_active = time.time() def _request( self, method: str, path: str, op: str, expected_status: HTTPStatus, **kwargs: _PossibleArgumentTypes, ) -> requests.Response: hosts = self.hosts if not posixpath.isabs(path): raise ValueError("Path must be absolute, was given {}".format(path)) _transform_user_name_key(kwargs) kwargs.setdefault("user.name", self.user_name) formatted_args = " ".join("{}={}".format(*t) for t in kwargs.items()) _logger.info("%s %s %s %s", op, path, formatted_args, ",".join(hosts)) kwargs["op"] = op for i in range(self.max_tries): log_level = logging.DEBUG if i < self.max_tries - 1 else logging.WARNING for host in hosts: try: response = self._requests_session.request( method, "http://{}{}{}".format( host, WEBHDFS_PATH, url_quote(path.encode("utf-8")) ), params=kwargs, timeout=self.timeout, allow_redirects=False, **self._requests_kwargs, ) except ( requests.exceptions.ConnectionError, requests.exceptions.Timeout, ): _logger.log( log_level, "Failed to reach to %s (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True, ) continue try: _check_response(response, expected_status) except (HdfsRetriableException, HdfsStandbyException): _logger.log( log_level, "%s is in startup or standby mode (attempt %d/%d)", host, i + 1, self.max_tries, exc_info=True, ) continue self._record_last_active(host) return response if i != self.max_tries - 1: time.sleep(self.retry_delay) raise HdfsNoServerException("Could not use any of the given hosts") def _get( self, path: str, op: str, expected_status: Any = HTTPStatus.OK, **kwargs: _PossibleArgumentTypes, ) -> requests.Response: return self._request("get", path, op, expected_status, **kwargs) def _put( self, path: str, op: str, expected_status: Any = HTTPStatus.OK, **kwargs: _PossibleArgumentTypes, ) -> requests.Response: return self._request("put", path, op, expected_status, **kwargs) def _post( self, path: str, op: str, expected_status: Any = HTTPStatus.OK, **kwargs: _PossibleArgumentTypes, ) -> requests.Response: return self._request("post", path, op, expected_status, **kwargs) def _delete( self, path: str, op: str, expected_status: Any = HTTPStatus.OK, **kwargs: _PossibleArgumentTypes, ) -> requests.Response: return self._request("delete", path, op, expected_status, **kwargs) def create( self, path: str, data: Union[IO[bytes], bytes], **kwargs: _PossibleArgumentTypes, ) -> None: metadata_response = self._put( path, "CREATE", expected_status=HTTPStatus.TEMPORARY_REDIRECT, **kwargs ) assert not metadata_response.content data_response = self._requests_session.put( metadata_response.headers["location"], data=data, **self._requests_kwargs ) _check_response(data_response, expected_status=HTTPStatus.CREATED) assert not data_response.content def append( self, path: str, data: Union[bytes, IO[bytes]], **kwargs: _PossibleArgumentTypes, ) -> None: metadata_response = self._post( path, "APPEND", expected_status=HTTPStatus.TEMPORARY_REDIRECT, **kwargs ) data_response = self._requests_session.post( metadata_response.headers["location"], data=data, **self._requests_kwargs ) _check_response(data_response) assert not data_response.content def concat( self, target: str, sources: List[str], **kwargs: _PossibleArgumentTypes ) -> None: if not isinstance(sources, list): raise ValueError("sources should be a list") if any("," in s for s in sources): raise NotImplementedError("WebHDFS does not support commas in concat") response = self._post(target, "CONCAT", sources=",".join(sources), **kwargs) assert not response.content def open(self, path: str, **kwargs: _PossibleArgumentTypes) -> IO[bytes]: metadata_response = self._get( path, "OPEN", expected_status=HTTPStatus.TEMPORARY_REDIRECT, **kwargs ) data_response = self._requests_session.get( metadata_response.headers["location"], stream=True, **self._requests_kwargs ) _check_response(data_response) return data_response.raw def mkdirs(self, path: str, **kwargs: _PossibleArgumentTypes) -> bool: response = _json(self._put(path, "MKDIRS", **kwargs))["boolean"] assert isinstance(response, bool), type(response) return response def create_symlink( self, link: str, destination: str, **kwargs: _PossibleArgumentTypes ) -> None: response = self._put(link, "CREATESYMLINK", destination=destination, **kwargs) assert not response.content def rename( self, path: str, destination: str, **kwargs: _PossibleArgumentTypes ) -> bool: response = _json(self._put(path, "RENAME", destination=destination, **kwargs))[ "boolean" ] assert isinstance(response, bool), type(response) return response def delete(self, path: str, **kwargs: _PossibleArgumentTypes) -> bool: response = _json(self._delete(path, "DELETE", **kwargs))["boolean"] assert isinstance(response, bool), type(response) return response def get_file_status( self, path: str, **kwargs: _PossibleArgumentTypes ) -> FileStatus: return FileStatus( **_json(self._get(path, "GETFILESTATUS", **kwargs))["FileStatus"] ) def list_status( self, path: str, **kwargs: _PossibleArgumentTypes ) -> List[FileStatus]: return [ FileStatus(**item) for item in _json(self._get(path, "LISTSTATUS", **kwargs))["FileStatuses"][ "FileStatus" ] ] def get_content_summary( self, path: str, **kwargs: _PossibleArgumentTypes ) -> ContentSummary: data = _json(self._get(path, "GETCONTENTSUMMARY", **kwargs))["ContentSummary"] if "typeQuota" in data: data["typeQuota"] = { k: TypeQuota(**v) for k, v in data["typeQuota"].items() } return ContentSummary(**data) def get_file_checksum( self, path: str, **kwargs: _PossibleArgumentTypes ) -> FileChecksum: metadata_response = self._get( path, "GETFILECHECKSUM", expected_status=HTTPStatus.TEMPORARY_REDIRECT, **kwargs, ) assert not metadata_response.content data_response = self._requests_session.get( metadata_response.headers["location"], **self._requests_kwargs ) _check_response(data_response) return FileChecksum(**_json(data_response)["FileChecksum"]) def get_home_directory(self, **kwargs: _PossibleArgumentTypes) -> str: response = _json(self._get("/", "GETHOMEDIRECTORY", **kwargs))["Path"] assert isinstance(response, str), type(response) return response def set_permission(self, path: str, **kwargs: _PossibleArgumentTypes) -> None: response = self._put(path, "SETPERMISSION", **kwargs) assert not response.content def set_owner(self, path: str, **kwargs: _PossibleArgumentTypes) -> None: response = self._put(path, "SETOWNER", **kwargs) assert not response.content
MIT License
openstack/stevedore
stevedore/extension.py
ExtensionManager.entry_points_names
python
def entry_points_names(self): return list(map(operator.attrgetter("name"), self.list_entry_points()))
Return the list of entry points names for this namespace.
https://github.com/openstack/stevedore/blob/442f1571937a5adb92b3494de1603f6f4fcce0ea/stevedore/extension.py#L211-L213
import logging import operator from . import _cache from .exception import NoMatches LOG = logging.getLogger(__name__) class Extension(object): def __init__(self, name, entry_point, plugin, obj): self.name = name self.entry_point = entry_point self.plugin = plugin self.obj = obj @property def module_name(self): match = self.entry_point.pattern.match(self.entry_point.value) return match.group('module') @property def extras(self): return [ getattr(e, 'string', e) for e in self.entry_point.extras ] @property def attr(self): match = self.entry_point.pattern.match(self.entry_point.value) return match.group('attr') @property def entry_point_target(self): return self.entry_point.value class ExtensionManager(object): def __init__(self, namespace, invoke_on_load=False, invoke_args=(), invoke_kwds={}, propagate_map_exceptions=False, on_load_failure_callback=None, verify_requirements=False): self._init_attributes( namespace, propagate_map_exceptions=propagate_map_exceptions, on_load_failure_callback=on_load_failure_callback) extensions = self._load_plugins(invoke_on_load, invoke_args, invoke_kwds, verify_requirements) self._init_plugins(extensions) @classmethod def make_test_instance(cls, extensions, namespace='TESTING', propagate_map_exceptions=False, on_load_failure_callback=None, verify_requirements=False): o = cls.__new__(cls) o._init_attributes(namespace, propagate_map_exceptions=propagate_map_exceptions, on_load_failure_callback=on_load_failure_callback) o._init_plugins(extensions) return o def _init_attributes(self, namespace, propagate_map_exceptions=False, on_load_failure_callback=None): self.namespace = namespace self.propagate_map_exceptions = propagate_map_exceptions self._on_load_failure_callback = on_load_failure_callback def _init_plugins(self, extensions): self.extensions = extensions self._extensions_by_name_cache = None @property def _extensions_by_name(self): if self._extensions_by_name_cache is None: d = {} for e in self.extensions: d[e.name] = e self._extensions_by_name_cache = d return self._extensions_by_name_cache ENTRY_POINT_CACHE = {} def list_entry_points(self): if self.namespace not in self.ENTRY_POINT_CACHE: eps = list(_cache.get_group_all(self.namespace)) self.ENTRY_POINT_CACHE[self.namespace] = eps return self.ENTRY_POINT_CACHE[self.namespace]
Apache License 2.0
openstack/horizon
openstack_dashboard/management/commands/migrate_settings.py
get_module_path
python
def get_module_path(module_name): path = sys.path for name in module_name.split('.'): file_pointer, path, desc = imp.find_module(name, path) path = [path, ] if file_pointer is not None: file_pointer.close() return path[0]
Gets the module path without importing anything. Avoids conflicts with package dependencies. (taken from http://github.com/sitkatech/pypatch)
https://github.com/openstack/horizon/blob/5e405d71926764b8aa60c75794b62f668f4e8122/openstack_dashboard/management/commands/migrate_settings.py#L29-L42
import difflib import imp import os import shlex import subprocess import sys import time import warnings from django.core.management.templates import BaseCommand warnings.simplefilter('ignore')
Apache License 2.0
mfouesneau/tap
tap/tap.py
TAP_Service.get_table_list
python
def get_table_list(self): return self.query("""select * from TAP_SCHEMA.tables where schema_name not like 'tap_schema'""")
returns the list of available tables from the service ADQL query: SELECT * from TAP_SCHEMA.tables Returns ------- tab: Table list of the tables with description, size and types
https://github.com/mfouesneau/tap/blob/dc304f2981ecd2333d9b5784a10e556187849203/tap/tap.py#L361-L371
import requests import time import math try: from io import BytesIO from http.client import HTTPConnection from urllib.parse import urlencode except ImportError: from StringIO import StringIO as BytesIO from httplib import HTTPConnection from urllib import urlencode from xml.dom.minidom import parseString from lxml import etree import json from astropy.table import Table try: from IPython.display import Markdown, display except ImportError: Markdown = None display = None def _pretty_print_time(t): units = [u"s", u"ms", u'us', "ns"] scaling = [1, 1e3, 1e6, 1e9] if t > 0.0 and t < 1000.0: order = min(-int(math.floor(math.log10(t)) // 3), 3) elif t >= 1000.0: order = 0 else: order = 3 return "%.3g %s" % (t * scaling[order], units[order]) class TAP_AsyncQuery(object): def __init__(self, adql_query, host, path, port=80, session=None, protocol='http'): self.adql = adql_query self.host = host self.port = port self.protocol = protocol self.path = path self.location = None self.jobid = None self.response = None self.session = session def submit(self, silent=False): data = {'query': str(self.adql), 'request': 'doQuery', 'lang': 'ADQL', 'format': 'votable', 'phase': 'run'} headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain" } try: cookies = self.session.cookies headers['Cookie'] = ';'.join("{0}={1}".format(k,v) for k,v in cookies.items()) except: pass connection = HTTPConnection(self.host, self.port) connection.request("POST", self.path, urlencode(data), headers) self.response = connection.getresponse() self.location = self.response.getheader("location") self.jobid = self.location[self.location.rfind('/') + 1:] connection.close() if not silent: print("Query Status: " + str(self.response.status), "Reason: " + str(self.response.reason)) print("Location: " + self.location) print("Job id: " + self.jobid) @classmethod def recall_query(cls, host, path, port, jobid, protocol='http'): location = '{1:s}/{2:s}/async/{0:s}'.format(jobid, host, path) q = cls("", host, path, port) q.location = location q.jobid = jobid q.protocol = protocol return q @property def status(self): headers = {} try: location = self.location location = location.split('://')[-1] self.response = self.session.get(self.protocol + '://' + location) data = self.response.text except: connection = HTTPConnection(self.host, self.port) connection.request("GET", self.path + "/" + self.jobid, headers) self.response = connection.getresponse() data = self.response.read() dom = parseString(data) phase_element = dom.getElementsByTagName('uws:phase')[0] phase_value_element = phase_element.firstChild phase = phase_value_element.toxml() return phase @property def finished(self): return self.status == 'COMPLETED' def get(self, sleep=0.2, wait=True): while (not self.finished) & wait: time.sleep(sleep) if not self.finished: return try: location = self.location location = location.split('://')[-1] self.response = self.session.get(self.protocol + '://' + location + "/results/result") self.data = self.response.text except: connection = HTTPConnection(self.host, self.port) connection.request("GET", self.path + "/" + self.jobid + "/results/result") self.response = connection.getresponse() self.data = self.response.read() connection.close() try: table = Table.read(BytesIO(self.data), format="votable") return table except TypeError: table = Table.read(BytesIO(self.data.encode('utf8')), format="votable") return table except Exception as e: content = parseString(self.response.text) text = [] for k in content.getElementsByTagName('INFO'): name, value = k.attributes.values() if 'QUERY_STATUS' in value.nodeValue: status = value.nodeValue print(status) text.append(k.firstChild.nodeValue.replace('.', '.\n').replace(':', ':\n')) print(e) raise RuntimeError('Query error.\n{0}'.format('\n'.join(text))) def _repr_markdown_(self): try: from IPython.display import Markdown return Markdown("""*ADQL Query*\n```mysql\n{0}\n```\n* *Status*: `{1}`, Reason `{2}`\n* *Location*: {3}\n* *Job id*: `{4}`\n """.format(str(self.adql), str(self.response.status), str(self.response.reason), self.location, self.jobid))._repr_markdown_() except ImportError: pass class TAP_Service(object): def __init__(self, host, path, port=80, protocol='http', **kargs): self.host = host self.port = port self.path = path self.protocol = protocol self.session = requests.Session() @property def tap_endpoint(self): return "{s.protocol:s}://{s.host:s}{s.path:s}".format(s=self) def recall_query(self, jobid): location = '{1:s}{2:s}/async/{0:s}'.format(jobid, self.host, self.path) q = TAP_AsyncQuery("", self.host, self.path, self.port, self.session, protocol=self.protocol) q.location = location q.jobid = jobid return q def login(self, username, password=None): if password is None: import getpass password = getpass.getpass() r = self.session.post("https://{s.host:s}/tap-server/login".format(s=self), data={'username': username, 'password':password}) if not r.ok: raise RuntimeError('Authentication failed\n' + str(r)) def logout(self): return self.session.post("https://{s.host:s}/tap-server/logout".format(s=self)) def query(self, adql_query, sync=True): if sync: r = self.session.post(self.tap_endpoint + '/sync', data={'query': str(adql_query), 'request': 'doQuery', 'lang': 'ADQL', 'format': 'votable', 'phase': 'run'} ) try: table = Table.read(BytesIO(r.text.encode('utf8')), format="votable") return table except: self.response = r content = parseString(self.response.text) text = [] for k in content.getElementsByTagName('INFO'): name, value = k.attributes.values() if 'QUERY_STATUS' in value.nodeValue: status = value.nodeValue print(status) text.append(k.firstChild.nodeValue.replace('.', '.\n').replace(':', ':\n')) raise RuntimeError('Query error.\n{0}'.format('\n'.join(text))) else: return self.query_async(adql_query) def query_async(self, adql_query, submit=True, **kwargs): q = TAP_AsyncQuery(adql_query, self.host, self.path + '/async', port=self.port, protocol=self.protocol, session=self.session) if submit: q.submit(**kwargs) return q
MIT License
googlecloudplatform/ml-on-gcp
gcf/gcf-ai-platform-example/main.py
get_demo_inference_endpoint
python
def get_demo_inference_endpoint(request): request_json = request.get_json(silent=True) sentence = request_json['sentence'] service = _connect_service() project = 'yourGCPProjectName' model = 'demo_model' response = _get_model_prediction(service, project, model=model, body=_generate_payload(sentence)) return json.dumps(response)
Endpoint to demonstrate requesting an inference from a model hosted via Google's AI Platform. Args: request object with an argument `sentence` Expected content type is 'application/json' Returns: JSON formatted response with prediction results
https://github.com/googlecloudplatform/ml-on-gcp/blob/ffd88931674e08ef6b0b20de27700ed1da61772c/gcf/gcf-ai-platform-example/main.py#L100-L120
import json from flask import current_app as app import googleapiclient.discovery def _generate_payload(sentence): return {"instances": [{"sentence": sentence}]} def _get_model_meta(service, project, model='demo_model', version=None): url = f'projects/{project}/models/{model}' if version: url += f'/versions/{version}' response = service.projects().models().versions().get(name=url).execute() meta = response else: response = service.projects().models().get(name=url).execute() meta = response['defaultVersion'] model_id = meta['name'] return meta, model_id def _get_model_prediction(service, project, model='demo_model', version=None, body=None): if body is None: raise NotImplementedError( f"_get_model_prediction didn't get any payload for model {model}") url = f'projects/{project}/models/{model}' if version: url += f'/versions/{version}' response = service.projects().predict(name=url, body=body).execute() return response def _connect_service(): kwargs = {'serviceName': 'ml', 'version': 'v1'} return googleapiclient.discovery.build(**kwargs)
Apache License 2.0
junlulocky/pybgmm
pybgmm/prior/crp.py
CRP.table_simulation
python
def table_simulation(self): for n in range(2, self._n): rand = random.random() p_total = 0 existing_table = False for k, n_k in enumerate(self._tables): prob = n_k / (n + self._alpha - 1) p_total += prob if rand < p_total: self._tables[k] += 1 existing_table = True break if not existing_table: self._tables.append(1) return self._tables
draw table & cluster numbers :return:
https://github.com/junlulocky/pybgmm/blob/9e3fb310a48733e66770059c8157d55e9d341529/pybgmm/prior/crp.py#L43-L76
import numpy as np import scipy as sp import random from matplotlib import pyplot as plt class CRP: def __init__(self, n, alpha=3, G_0='normal', name=None): self._n = n self._alpha = alpha self._results = [] self._results.append(np.random.normal(1)) self._tables = [1] def __call__(self): self.process_simulation() self.table_simulation() def process_simulation(self): for i in range(self._n): probability = self._alpha / float(self._alpha + i - 1) tmp = np.random.uniform(size=(1,)) if tmp < probability: self._results.append(np.random.normal(1)) else: self._results.append(np.random.choice(self._results[:i-1], 1)[0])
MIT License
alexferl/flask-simpleldap
flask_simpleldap/__init__.py
LDAP.get_user_groups
python
def get_user_groups(self, user): conn = self.bind try: if current_app.config['LDAP_OPENLDAP']: fields = [str(current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'])] records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, ldap_filter.filter_format( current_app.config['LDAP_GROUP_MEMBER_FILTER'], (self.get_object_details(user, dn_only=True),)), fields) else: records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, ldap_filter.filter_format( current_app.config['LDAP_USER_OBJECT_FILTER'], (user,)), [current_app.config['LDAP_USER_GROUPS_FIELD']]) conn.unbind_s() if records: if current_app.config['LDAP_OPENLDAP']: group_member_filter = current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'] record_list = [record[1] for record in records] record_dicts = [ record for record in record_list if isinstance(record, dict)] groups = [item.get([group_member_filter][0])[0] for item in record_dicts] return groups else: if current_app.config['LDAP_USER_GROUPS_FIELD'] in records[0][1]: groups = records[0][1][ current_app.config['LDAP_USER_GROUPS_FIELD']] result = [re.findall(b'(?:cn=|CN=)(.*?),', group)[0] for group in groups] result = [r.decode('utf-8') for r in result] return result except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
Returns a ``list`` with the user's groups or ``None`` if unsuccessful. :param str user: User we want groups for.
https://github.com/alexferl/flask-simpleldap/blob/78f66d8e195f696f94fa41d352605b5425e2835e/flask_simpleldap/__init__.py#L242-L289
import re from functools import wraps import ldap from ldap import filter as ldap_filter from flask import abort, current_app, g, make_response, redirect, url_for, request __all__ = ['LDAP'] class LDAPException(RuntimeError): message = None def __init__(self, message): self.message = message def __str__(self): return self.message class LDAP(object): def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) @staticmethod def init_app(app): app.config.setdefault('LDAP_HOST', 'localhost') app.config.setdefault('LDAP_PORT', 389) app.config.setdefault('LDAP_SCHEMA', 'ldap') app.config.setdefault('LDAP_USERNAME', None) app.config.setdefault('LDAP_PASSWORD', None) app.config.setdefault('LDAP_TIMEOUT', 10) app.config.setdefault('LDAP_USE_SSL', False) app.config.setdefault('LDAP_USE_TLS', False) app.config.setdefault('LDAP_REQUIRE_CERT', False) app.config.setdefault('LDAP_CERT_PATH', '/path/to/cert') app.config.setdefault('LDAP_BASE_DN', None) app.config.setdefault('LDAP_OBJECTS_DN', 'distinguishedName') app.config.setdefault('LDAP_USER_FIELDS', []) app.config.setdefault('LDAP_USER_OBJECT_FILTER', '(&(objectclass=Person)(userPrincipalName=%s))') app.config.setdefault('LDAP_USER_GROUPS_FIELD', 'memberOf') app.config.setdefault('LDAP_GROUP_FIELDS', []) app.config.setdefault('LDAP_GROUPS_OBJECT_FILTER', 'objectclass=Group') app.config.setdefault('LDAP_GROUP_OBJECT_FILTER', '(&(objectclass=Group)(userPrincipalName=%s))') app.config.setdefault('LDAP_GROUP_MEMBERS_FIELD', 'member') app.config.setdefault('LDAP_LOGIN_VIEW', 'login') app.config.setdefault('LDAP_REALM_NAME', 'LDAP authentication') app.config.setdefault('LDAP_OPENLDAP', False) app.config.setdefault('LDAP_GROUP_MEMBER_FILTER', '*') app.config.setdefault('LDAP_GROUP_MEMBER_FILTER_FIELD', '*') app.config.setdefault('LDAP_CUSTOM_OPTIONS', None) if app.config['LDAP_USE_SSL'] or app.config['LDAP_USE_TLS']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) if app.config['LDAP_REQUIRE_CERT']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, app.config['LDAP_CERT_PATH']) for option in ['USERNAME', 'PASSWORD', 'BASE_DN']: if app.config['LDAP_{0}'.format(option)] is None: raise LDAPException('LDAP_{0} cannot be None!'.format(option)) @staticmethod def _set_custom_options(conn): options = current_app.config['LDAP_CUSTOM_OPTIONS'] if options: for k, v in options.items(): conn.set_option(k, v) return conn @property def initialize(self): try: conn = ldap.initialize('{0}://{1}:{2}'.format( current_app.config['LDAP_SCHEMA'], current_app.config['LDAP_HOST'], current_app.config['LDAP_PORT'])) conn.set_option(ldap.OPT_NETWORK_TIMEOUT, current_app.config['LDAP_TIMEOUT']) conn = self._set_custom_options(conn) conn.protocol_version = ldap.VERSION3 if current_app.config['LDAP_USE_TLS']: conn.start_tls_s() return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args)) @property def bind(self): conn = self.initialize try: conn.simple_bind_s( current_app.config['LDAP_USERNAME'], current_app.config['LDAP_PASSWORD']) return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args)) def bind_user(self, username, password): user_dn = self.get_object_details(user=username, dn_only=True) if user_dn is None: return try: conn = self.initialize _user_dn = user_dn.decode('utf-8') if isinstance(user_dn, bytes) else user_dn conn.simple_bind_s(_user_dn, password) return True except ldap.LDAPError: return def get_object_details(self, user=None, group=None, query_filter=None, dn_only=False): query = None fields = None if user is not None: if not dn_only: fields = current_app.config['LDAP_USER_FIELDS'] query_filter = query_filter or current_app.config['LDAP_USER_OBJECT_FILTER'] query = ldap_filter.filter_format(query_filter, (user,)) elif group is not None: if not dn_only: fields = current_app.config['LDAP_GROUP_FIELDS'] query_filter = query_filter or current_app.config['LDAP_GROUP_OBJECT_FILTER'] query = ldap_filter.filter_format(query_filter, (group,)) conn = self.bind try: records = conn.search_s(current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, query, fields) conn.unbind_s() result = {} if records and records[0][0] is not None and isinstance(records[0][1], dict): if dn_only: if current_app.config['LDAP_OPENLDAP']: if records: return records[0][0] else: if current_app.config['LDAP_OBJECTS_DN'] in records[0][1]: dn = records[0][1][ current_app.config['LDAP_OBJECTS_DN']] return dn[0] for k, v in list(records[0][1].items()): result[k] = v return result except ldap.LDAPError as e: raise LDAPException(self.error(e.args)) def get_groups(self, fields=None, dn_only=False): conn = self.bind try: fields = fields or current_app.config['LDAP_GROUP_FIELDS'] if current_app.config['LDAP_OPENLDAP']: records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, current_app.config['LDAP_GROUPS_OBJECT_FILTER'], fields) else: records = conn.search_s( current_app.config['LDAP_BASE_DN'], ldap.SCOPE_SUBTREE, current_app.config['LDAP_GROUPS_OBJECT_FILTER'], fields) conn.unbind_s() if records: if dn_only: return [r[0] for r in records] else: return [r[1] for r in records] else: return [] except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
MIT License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/sem/boxer.py
Boxer.interpret
python
def interpret(self, input, discourse_id=None, question=False, verbose=False): discourse_ids = ([discourse_id] if discourse_id is not None else None) d, = self.interpret_multi_sents([[input]], discourse_ids, question, verbose) if not d: raise Exception('Unable to interpret: "{0}"'.format(input)) return d
Use Boxer to give a first order representation. :param input: str Input sentence to parse :param occur_index: bool Should predicates be occurrence indexed? :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. :return: ``drt.DrtExpression``
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/sem/boxer.py#L80-L93
from __future__ import print_function, unicode_literals import os import re import operator import subprocess from optparse import OptionParser import tempfile from functools import reduce from nltk.internals import find_binary from nltk.sem.logic import (ExpectedMoreTokensException, LogicalExpressionException, UnexpectedTokenException, Variable) from nltk.sem.drt import (DRS, DrtApplicationExpression, DrtEqualityExpression, DrtNegatedExpression, DrtOrExpression, DrtParser, DrtProposition, DrtTokens, DrtVariableExpression) from nltk.compat import python_2_unicode_compatible class Boxer(object): def __init__(self, boxer_drs_interpreter=None, elimeq=False, bin_dir=None, verbose=False, resolve=True): if boxer_drs_interpreter is None: boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter() self._boxer_drs_interpreter = boxer_drs_interpreter self._resolve = resolve self._elimeq = elimeq self.set_bin_dir(bin_dir, verbose) def set_bin_dir(self, bin_dir, verbose=False): self._candc_bin = self._find_binary('candc', bin_dir, verbose) self._candc_models_path = os.path.normpath(os.path.join(self._candc_bin[:-5], '../models')) self._boxer_bin = self._find_binary('boxer', bin_dir, verbose)
MIT License
knewton/edm2016
rnn_prof/irt/cpd/ogive.py
OgiveCPD._irf_arg
python
def _irf_arg(**params): raise NotImplementedError
Compute the argument of the item response function, e.g., for the OnePO model: (thetas + offset_coeffs).
https://github.com/knewton/edm2016/blob/65e7767b79970941eae0fa0fe48f44e3c31a2322/rnn_prof/irt/cpd/ogive.py#L104-L107
from __future__ import division from abc import ABCMeta import logging import numpy as np from scipy import stats as st from .cpd import CPDTerms, CPD, FunctionInfo from ..constants import THETAS_KEY, OFFSET_COEFFS_KEY, NONOFFSET_COEFFS_KEY from ..linear_operators import IndexOperator from ..updaters import UpdateTerms LOGGER = logging.getLogger(__name__) class OgiveCPD(CPD): __metaclass__ = ABCMeta _irf_arg_grad_cache = None _irf_arg_hessian_cache = None @staticmethod def bernoulli_logli(trues, probs, average=False): if trues.shape != probs.shape: raise ValueError("trues and probs have shapes {} and {}, must be numpy arrays of same " "shape".format(trues.shape, probs.shape)) falses = np.logical_not(trues) log_li = np.sum(np.log(probs[trues])) + np.sum(np.log(1.0 - probs[falses])) if average: return log_li / trues.size else: return log_li lin_operators = {} def _to_data_space(self, par_key, x): return self.lin_operators[par_key] * x def _all_to_data_space(self, **params): return {par_key: self._to_data_space(par_key, par) for par_key, par in params.iteritems()} def _to_par_space(self, par_key, x): return self.lin_operators[par_key].rmatvec(x) def _validate_args(self, correct, terms_to_compute, **input_params): if terms_to_compute: self._validate_param_keys(input_params.keys(), terms_to_compute.keys()) if not isinstance(correct, np.ndarray) or correct.ndim != 1: raise ValueError("correct must be 1D numpy array") for par_key, param in input_params.iteritems(): if not isinstance(param, np.ndarray) or param.ndim != 2: raise ValueError("{} must be a 2D numpy array".format(par_key)) def compute_prob_correct(self, **params): params_dataspace = self._all_to_data_space(**params) irf_arg = self._irf_arg(**params_dataspace) return self._prob_correct_from_irf_arg(irf_arg, **params_dataspace) def compute_prob_true(self, **params): return self.compute_prob_correct(**params) def index_map(self, par_key): linear_operator = self.lin_operators[par_key] return linear_operator * np.arange(linear_operator.shape[1]) def _irf_arg_grad(self, key, **params): raise NotImplementedError @staticmethod
Apache License 2.0
wangguojun2018/centernet3d
mmdet3d/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py
KITTI2Waymo.__len__
python
def __len__(self): return len(self.waymo_tfrecord_pathnames)
Length of the filename list.
https://github.com/wangguojun2018/centernet3d/blob/5ddee6be104a9a22ee80fdeddc6f2627e36966d2/mmdet3d/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py#L223-L225
try: from waymo_open_dataset import dataset_pb2 as open_dataset except ImportError: raise ImportError( 'Please run "pip install waymo-open-dataset-tf-2-1-0==1.2.0" ' 'to install the official devkit first.') import mmcv import numpy as np import tensorflow as tf from glob import glob from os.path import join from waymo_open_dataset import label_pb2 from waymo_open_dataset.protos import metrics_pb2 class KITTI2Waymo(object): def __init__(self, kitti_result_files, waymo_tfrecords_dir, waymo_results_save_dir, waymo_results_final_path, prefix, workers=64): self.kitti_result_files = kitti_result_files self.waymo_tfrecords_dir = waymo_tfrecords_dir self.waymo_results_save_dir = waymo_results_save_dir self.waymo_results_final_path = waymo_results_final_path self.prefix = prefix self.workers = int(workers) self.name2idx = {} for idx, result in enumerate(kitti_result_files): if len(result['sample_idx']) > 0: self.name2idx[str(result['sample_idx'][0])] = idx if int(tf.__version__.split('.')[0]) < 2: tf.enable_eager_execution() self.k2w_cls_map = { 'Car': label_pb2.Label.TYPE_VEHICLE, 'Pedestrian': label_pb2.Label.TYPE_PEDESTRIAN, 'Sign': label_pb2.Label.TYPE_SIGN, 'Cyclist': label_pb2.Label.TYPE_CYCLIST, } self.T_ref_to_front_cam = np.array([[0.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) self.get_file_names() self.create_folder() def get_file_names(self): self.waymo_tfrecord_pathnames = sorted( glob(join(self.waymo_tfrecords_dir, '*.tfrecord'))) print(len(self.waymo_tfrecord_pathnames), 'tfrecords found.') def create_folder(self): mmcv.mkdir_or_exist(self.waymo_results_save_dir) def parse_objects(self, kitti_result, T_k2w, context_name, frame_timestamp_micros): def parse_one_object(instance_idx): cls = kitti_result['name'][instance_idx] length = round(kitti_result['dimensions'][instance_idx, 0], 4) height = round(kitti_result['dimensions'][instance_idx, 1], 4) width = round(kitti_result['dimensions'][instance_idx, 2], 4) x = round(kitti_result['location'][instance_idx, 0], 4) y = round(kitti_result['location'][instance_idx, 1], 4) z = round(kitti_result['location'][instance_idx, 2], 4) rotation_y = round(kitti_result['rotation_y'][instance_idx], 4) score = round(kitti_result['score'][instance_idx], 4) y -= height / 2 x, y, z = self.transform(T_k2w, x, y, z) heading = -(rotation_y + np.pi / 2) while heading < -np.pi: heading += 2 * np.pi while heading > np.pi: heading -= 2 * np.pi box = label_pb2.Label.Box() box.center_x = x box.center_y = y box.center_z = z box.length = length box.width = width box.height = height box.heading = heading o = metrics_pb2.Object() o.object.box.CopyFrom(box) o.object.type = self.k2w_cls_map[cls] o.score = score o.context_name = context_name o.frame_timestamp_micros = frame_timestamp_micros return o objects = metrics_pb2.Objects() for instance_idx in range(len(kitti_result['name'])): o = parse_one_object(instance_idx) objects.objects.append(o) return objects def convert_one(self, file_idx): file_pathname = self.waymo_tfrecord_pathnames[file_idx] file_data = tf.data.TFRecordDataset(file_pathname, compression_type='') for frame_num, frame_data in enumerate(file_data): frame = open_dataset.Frame() frame.ParseFromString(bytearray(frame_data.numpy())) filename = f'{self.prefix}{file_idx:03d}{frame_num:03d}' for camera in frame.context.camera_calibrations: if camera.name == 1: T_front_cam_to_vehicle = np.array( camera.extrinsic.transform).reshape(4, 4) T_k2w = T_front_cam_to_vehicle @ self.T_ref_to_front_cam context_name = frame.context.name frame_timestamp_micros = frame.timestamp_micros if filename in self.name2idx: kitti_result = self.kitti_result_files[self.name2idx[filename]] objects = self.parse_objects(kitti_result, T_k2w, context_name, frame_timestamp_micros) else: print(filename, 'not found.') objects = metrics_pb2.Objects() with open( join(self.waymo_results_save_dir, f'{filename}.bin'), 'wb') as f: f.write(objects.SerializeToString()) def convert(self): print('Start converting ...') mmcv.track_parallel_progress(self.convert_one, range(len(self)), self.workers) print('\nFinished ...') pathnames = sorted(glob(join(self.waymo_results_save_dir, '*.bin'))) combined = self.combine(pathnames) with open(self.waymo_results_final_path, 'wb') as f: f.write(combined.SerializeToString())
Apache License 2.0
alebastr/sway-systemd
src/assign-cgroups.py
XlibHelper.get_net_wm_pid
python
def get_net_wm_pid(self, wid: int) -> int: window = self.display.create_resource_object("window", wid) net_wm_pid = self.display.get_atom("_NET_WM_PID") pid = window.get_full_property(net_wm_pid, X.AnyPropertyType) if pid is None: raise Exception("Failed to get PID from _NET_WM_PID") return int(pid.value.tolist()[0])
Get PID from _NET_WM_PID property of X11 window
https://github.com/alebastr/sway-systemd/blob/cd7d0a8d480c09d8389bec7ace35387d1935b17b/src/assign-cgroups.py#L116-L124
import argparse import asyncio import logging import socket import struct from typing import Optional from dbus_next import Variant from dbus_next.aio import MessageBus from dbus_next.errors import DBusError from i3ipc import Event from i3ipc.aio import Con, Connection from psutil import Process from tenacity import retry, retry_if_exception_type, stop_after_attempt from Xlib import X from Xlib.display import Display try: from Xlib.ext import res as XRes except ImportError: XRes = None LOG = logging.getLogger("assign-cgroups") SD_BUS_NAME = "org.freedesktop.systemd1" SD_OBJECT_PATH = "/org/freedesktop/systemd1" SD_SLICE_FORMAT = "app-{app_id}.slice" SD_UNIT_FORMAT = "app-{app_id}-{unique}.scope" LAUNCHER_APPS = ["nwgbar", "nwgdmenu", "nwggrid"] def escape_app_id(app_id: str) -> str: return app_id.replace("-", "\\x2d") LAUNCHER_APP_CGROUPS = [ SD_SLICE_FORMAT.format(app_id=escape_app_id(app)) for app in LAUNCHER_APPS ] def get_cgroup(pid: int) -> Optional[str]: try: with open(f"/proc/{pid}/cgroup", "r") as file: cgroup = file.read() return cgroup.strip().split(":")[-1] except OSError: LOG.exception("Error geting cgroup info") return None def get_pid_by_socket(sockpath: str) -> int: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: sock.connect(sockpath) ucred = sock.getsockopt( socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize("iII") ) pid, _, _ = struct.unpack("iII", ucred) return pid class XlibHelper: def __init__(self): self.display = Display() self.use_xres = self._try_init_xres() def _try_init_xres(self) -> bool: if XRes is None or self.display.query_extension(XRes.extname) is None: LOG.warning( "X-Resource extension is not supported. " "Process identification for X11 applications will be less reliable." ) return False ver = self.display.res_query_version() LOG.info( "X-Resource version %d.%d", ver.server_major, ver.server_minor, ) return (ver.server_major, ver.server_minor) >= (1, 2)
MIT License
cstr-edinburgh/snickery
script/file_naming.py
make_synthesis_condition_name
python
def make_synthesis_condition_name(config): if config.get('synth_smooth', False): smooth='smooth_' else: smooth='' if config.get('greedy_search', False): greedy = 'greedy-yes_' else: greedy = 'greedy-no_' target_weights = '-'.join([str(val) for val in config['target_stream_weights']]) if config['target_representation'] == 'sample': name = 'sample_target-%s'%(target_weights) else: join_weights = '-'.join([str(val) for val in config['join_stream_weights']]) jcw = config['join_cost_weight'] jct = config.get('join_cost_type', 'natural2') nc = config.get('n_candidates', 30) tl = config.get('taper_length', 50) name = '%s%starget-%s_join-%s_scale-%s_presel-%s_jmetric-%s_cand-%s_taper-%s'%( greedy, smooth, target_weights, join_weights, jcw, config['preselection_method'], jct, nc, tl ) name += 'multiepoch-%s'%(config.get('multiepoch', 1)) return name
Return string encoding all variables which can be ...
https://github.com/cstr-edinburgh/snickery/blob/6d7e0b48cbb21760089bbbe85f6d7bd206f89821/script/file_naming.py#L33-L67
import os from util import safe_makedir def get_data_dump_name(config, joindata=False, joinsql=False, searchtree=False): safe_makedir(os.path.join(config['workdir'], 'data_dumps')) condition = make_train_condition_name(config) assert not (joindata and joinsql) if joindata: last_part = '.joindata.hdf5' elif joinsql: last_part = '.joindata.sql' elif searchtree: last_part = '.searchtree.hdf5' else: last_part = '.hdf5' database_fname = os.path.join(config['workdir'], "data_dumps", condition + last_part) return database_fname def make_train_condition_name(config): if not config['target_representation'] == 'sample': jstreams = '-'.join(config['stream_list_join']) tstreams = '-'.join(config['stream_list_target']) return '%s_utts_jstreams-%s_tstreams-%s_rep-%s'%(config['n_train_utts'], jstreams, tstreams, config.get('target_representation', 'twopoint')) else: streams = '-'.join(config['stream_list_target']) return '%s_utts_streams-%s_rep-%s'%(config['n_train_utts'], streams, config.get('target_representation', 'twopoint'))
Apache License 2.0
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/page_setup.py
PageSetup.border_always_in_front
python
def border_always_in_front(self): return self._border_always_in_front
Gets the border_always_in_front of this PageSetup. # noqa: E501 Gets or sets a value indicating whether the page border is positioned relative to intersecting texts and objects. # noqa: E501 :return: The border_always_in_front of this PageSetup. # noqa: E501 :rtype: bool
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/page_setup.py#L252-L260
import pprint import re import datetime import six import json class PageSetup(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'link': 'WordsApiLink', 'bidi': 'bool', 'border_always_in_front': 'bool', 'border_applies_to': 'str', 'border_distance_from': 'str', 'bottom_margin': 'float', 'different_first_page_header_footer': 'bool', 'first_page_tray': 'int', 'footer_distance': 'float', 'gutter': 'float', 'header_distance': 'float', 'left_margin': 'float', 'line_number_count_by': 'int', 'line_number_distance_from_text': 'float', 'line_number_restart_mode': 'str', 'line_starting_number': 'int', 'orientation': 'str', 'other_pages_tray': 'int', 'page_height': 'float', 'page_number_style': 'str', 'page_starting_number': 'int', 'page_width': 'float', 'paper_size': 'str', 'restart_page_numbering': 'bool', 'right_margin': 'float', 'rtl_gutter': 'bool', 'section_start': 'str', 'suppress_endnotes': 'bool', 'top_margin': 'float', 'vertical_alignment': 'str' } attribute_map = { 'link': 'Link', 'bidi': 'Bidi', 'border_always_in_front': 'BorderAlwaysInFront', 'border_applies_to': 'BorderAppliesTo', 'border_distance_from': 'BorderDistanceFrom', 'bottom_margin': 'BottomMargin', 'different_first_page_header_footer': 'DifferentFirstPageHeaderFooter', 'first_page_tray': 'FirstPageTray', 'footer_distance': 'FooterDistance', 'gutter': 'Gutter', 'header_distance': 'HeaderDistance', 'left_margin': 'LeftMargin', 'line_number_count_by': 'LineNumberCountBy', 'line_number_distance_from_text': 'LineNumberDistanceFromText', 'line_number_restart_mode': 'LineNumberRestartMode', 'line_starting_number': 'LineStartingNumber', 'orientation': 'Orientation', 'other_pages_tray': 'OtherPagesTray', 'page_height': 'PageHeight', 'page_number_style': 'PageNumberStyle', 'page_starting_number': 'PageStartingNumber', 'page_width': 'PageWidth', 'paper_size': 'PaperSize', 'restart_page_numbering': 'RestartPageNumbering', 'right_margin': 'RightMargin', 'rtl_gutter': 'RtlGutter', 'section_start': 'SectionStart', 'suppress_endnotes': 'SuppressEndnotes', 'top_margin': 'TopMargin', 'vertical_alignment': 'VerticalAlignment' } def __init__(self, link=None, bidi=None, border_always_in_front=None, border_applies_to=None, border_distance_from=None, bottom_margin=None, different_first_page_header_footer=None, first_page_tray=None, footer_distance=None, gutter=None, header_distance=None, left_margin=None, line_number_count_by=None, line_number_distance_from_text=None, line_number_restart_mode=None, line_starting_number=None, orientation=None, other_pages_tray=None, page_height=None, page_number_style=None, page_starting_number=None, page_width=None, paper_size=None, restart_page_numbering=None, right_margin=None, rtl_gutter=None, section_start=None, suppress_endnotes=None, top_margin=None, vertical_alignment=None): self._link = None self._bidi = None self._border_always_in_front = None self._border_applies_to = None self._border_distance_from = None self._bottom_margin = None self._different_first_page_header_footer = None self._first_page_tray = None self._footer_distance = None self._gutter = None self._header_distance = None self._left_margin = None self._line_number_count_by = None self._line_number_distance_from_text = None self._line_number_restart_mode = None self._line_starting_number = None self._orientation = None self._other_pages_tray = None self._page_height = None self._page_number_style = None self._page_starting_number = None self._page_width = None self._paper_size = None self._restart_page_numbering = None self._right_margin = None self._rtl_gutter = None self._section_start = None self._suppress_endnotes = None self._top_margin = None self._vertical_alignment = None self.discriminator = None if link is not None: self.link = link if bidi is not None: self.bidi = bidi if border_always_in_front is not None: self.border_always_in_front = border_always_in_front if border_applies_to is not None: self.border_applies_to = border_applies_to if border_distance_from is not None: self.border_distance_from = border_distance_from if bottom_margin is not None: self.bottom_margin = bottom_margin if different_first_page_header_footer is not None: self.different_first_page_header_footer = different_first_page_header_footer if first_page_tray is not None: self.first_page_tray = first_page_tray if footer_distance is not None: self.footer_distance = footer_distance if gutter is not None: self.gutter = gutter if header_distance is not None: self.header_distance = header_distance if left_margin is not None: self.left_margin = left_margin if line_number_count_by is not None: self.line_number_count_by = line_number_count_by if line_number_distance_from_text is not None: self.line_number_distance_from_text = line_number_distance_from_text if line_number_restart_mode is not None: self.line_number_restart_mode = line_number_restart_mode if line_starting_number is not None: self.line_starting_number = line_starting_number if orientation is not None: self.orientation = orientation if other_pages_tray is not None: self.other_pages_tray = other_pages_tray if page_height is not None: self.page_height = page_height if page_number_style is not None: self.page_number_style = page_number_style if page_starting_number is not None: self.page_starting_number = page_starting_number if page_width is not None: self.page_width = page_width if paper_size is not None: self.paper_size = paper_size if restart_page_numbering is not None: self.restart_page_numbering = restart_page_numbering if right_margin is not None: self.right_margin = right_margin if rtl_gutter is not None: self.rtl_gutter = rtl_gutter if section_start is not None: self.section_start = section_start if suppress_endnotes is not None: self.suppress_endnotes = suppress_endnotes if top_margin is not None: self.top_margin = top_margin if vertical_alignment is not None: self.vertical_alignment = vertical_alignment @property def link(self): return self._link @link.setter def link(self, link): self._link = link @property def bidi(self): return self._bidi @bidi.setter def bidi(self, bidi): self._bidi = bidi @property
MIT License
demille/emailhooks
django_nonrel/django/contrib/admindocs/views.py
extract_views_from_urlpatterns
python
def extract_views_from_urlpatterns(urlpatterns, base=''): views = [] for p in urlpatterns: if hasattr(p, 'url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern)) elif hasattr(p, 'callback'): try: views.append((p.callback, base + p.regex.pattern)) except ViewDoesNotExist: continue else: raise TypeError(_("%s does not appear to be a urlpattern object") % p) return views
Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex)
https://github.com/demille/emailhooks/blob/16dc3b295ac9d35a20e8d0db52760db2b7e8a822/django_nonrel/django/contrib/admindocs/views.py#L342-L363
import inspect import os import re from django import template from django.template import RequestContext from django.conf import settings from django.contrib.admin.views.decorators import staff_member_required from django.db import models from django.shortcuts import render_to_response from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.http import Http404 from django.core import urlresolvers from django.contrib.admindocs import utils from django.contrib.sites.models import Site from django.utils.importlib import import_module from django.utils._os import upath from django.utils import six from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_') class GenericSite(object): domain = 'example.com' name = 'my site' @staff_member_required def doc_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) return render_to_response('admin_doc/index.html', { 'root_path': urlresolvers.reverse('admin:index'), }, context_instance=RequestContext(request)) @staff_member_required def bookmarklets(request): admin_root = urlresolvers.reverse('admin:index') return render_to_response('admin_doc/bookmarklets.html', { 'root_path': admin_root, 'admin_url': "%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root), }, context_instance=RequestContext(request)) @staff_member_required def template_tag_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() tags = [] app_libs = list(six.iteritems(template.libraries)) builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) if title: title = utils.parse_rst(title, 'tag', _('tag:') + tag_name) if body: body = utils.parse_rst(body, 'tag', _('tag:') + tag_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name) if library in template.builtins: tag_library = '' else: tag_library = module_name.split('.')[-1] tags.append({ 'name': tag_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_tag_index.html', { 'root_path': urlresolvers.reverse('admin:index'), 'tags': tags }, context_instance=RequestContext(request)) @staff_member_required def template_filter_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) load_all_installed_template_libraries() filters = [] app_libs = list(six.iteritems(template.libraries)) builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for filter_name, filter_func in library.filters.items(): title, body, metadata = utils.parse_docstring(filter_func.__doc__) if title: title = utils.parse_rst(title, 'filter', _('filter:') + filter_name) if body: body = utils.parse_rst(body, 'filter', _('filter:') + filter_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name) if library in template.builtins: tag_library = '' else: tag_library = module_name.split('.')[-1] filters.append({ 'name': filter_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) return render_to_response('admin_doc/template_filter_index.html', { 'root_path': urlresolvers.reverse('admin:index'), 'filters': filters }, context_instance=RequestContext(request)) @staff_member_required def view_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) if settings.ADMIN_FOR: settings_modules = [import_module(m) for m in settings.ADMIN_FOR] else: settings_modules = [settings] views = [] for settings_mod in settings_modules: urlconf = import_module(settings_mod.ROOT_URLCONF) view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for (func, regex) in view_functions: views.append({ 'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)), 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'url': simplify_regex(regex), }) return render_to_response('admin_doc/view_index.html', { 'root_path': urlresolvers.reverse('admin:index'), 'views': views }, context_instance=RequestContext(request)) @staff_member_required def view_detail(request, view): if not utils.docutils_is_available: return missing_docutils_page(request) mod, func = urlresolvers.get_mod_func(view) try: view_func = getattr(import_module(mod), func) except (ImportError, AttributeError): raise Http404 title, body, metadata = utils.parse_docstring(view_func.__doc__) if title: title = utils.parse_rst(title, 'view', _('view:') + view) if body: body = utils.parse_rst(body, 'view', _('view:') + view) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view) return render_to_response('admin_doc/view_detail.html', { 'root_path': urlresolvers.reverse('admin:index'), 'name': view, 'summary': title, 'body': body, 'meta': metadata, }, context_instance=RequestContext(request)) @staff_member_required def model_index(request): if not utils.docutils_is_available: return missing_docutils_page(request) m_list = [m._meta for m in models.get_models()] return render_to_response('admin_doc/model_index.html', { 'root_path': urlresolvers.reverse('admin:index'), 'models': m_list }, context_instance=RequestContext(request)) @staff_member_required def model_detail(request, app_label, model_name): if not utils.docutils_is_available: return missing_docutils_page(request) try: app_mod = models.get_app(app_label) except ImproperlyConfigured: raise Http404(_("App %r not found") % app_label) model = None for m in models.get_models(app_mod): if m._meta.object_name.lower() == model_name: model = m break if model is None: raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label}) opts = model._meta fields = [] for field in opts.fields: if isinstance(field, models.ForeignKey): data_type = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type) else: data_type = get_readable_field_data_type(field) verbose = field.verbose_name fields.append({ 'name': field.name, 'data_type': data_type, 'verbose': verbose, 'help_text': field.help_text, }) for field in opts.many_to_many: data_type = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type} fields.append({ 'name': "%s.all" % field.name, "data_type": 'List', 'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name), }) fields.append({ 'name' : "%s.count" % field.name, 'data_type' : 'Integer', 'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name), }) for func_name, func in model.__dict__.items(): if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1): try: for exclude in MODEL_METHODS_EXCLUDE: if func_name.startswith(exclude): raise StopIteration except StopIteration: continue verbose = func.__doc__ if verbose: verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name) fields.append({ 'name': func_name, 'data_type': get_return_data_type(func_name), 'verbose': verbose, }) for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects(): verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name} accessor = rel.get_accessor_name() fields.append({ 'name' : "%s.all" % accessor, 'data_type' : 'List', 'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name), }) fields.append({ 'name' : "%s.count" % accessor, 'data_type' : 'Integer', 'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name), }) return render_to_response('admin_doc/model_detail.html', { 'root_path': urlresolvers.reverse('admin:index'), 'name': '%s.%s' % (opts.app_label, opts.object_name), 'summary': _("Fields on %s objects") % opts.object_name, 'description': model.__doc__, 'fields': fields, }, context_instance=RequestContext(request)) @staff_member_required def template_detail(request, template): templates = [] for site_settings_module in settings.ADMIN_FOR: settings_mod = import_module(site_settings_module) if Site._meta.installed: site_obj = Site.objects.get(pk=settings_mod.SITE_ID) else: site_obj = GenericSite() for dir in settings_mod.TEMPLATE_DIRS: template_file = os.path.join(dir, template) templates.append({ 'file': template_file, 'exists': os.path.exists(template_file), 'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '', 'site_id': settings_mod.SITE_ID, 'site': site_obj, 'order': list(settings_mod.TEMPLATE_DIRS).index(dir), }) return render_to_response('admin_doc/template_detail.html', { 'root_path': urlresolvers.reverse('admin:index'), 'name': template, 'templates': templates, }, context_instance=RequestContext(request)) def missing_docutils_page(request): return render_to_response('admin_doc/missing_docutils.html') def load_all_installed_template_libraries(): for module_name in template.get_templatetags_modules(): mod = import_module(module_name) try: libraries = [ os.path.splitext(p)[0] for p in os.listdir(os.path.dirname(upath(mod.__file__))) if p.endswith('.py') and p[0].isalpha() ] except OSError: libraries = [] for library_name in libraries: try: lib = template.get_library(library_name) except template.InvalidTemplateLibrary: pass def get_return_data_type(func_name): if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return '' def get_readable_field_data_type(field): return field.description % field.__dict__
MIT License
briandilley/ebs-deploy
ebs_deploy/__init__.py
EbsHelper.environment_exists
python
def environment_exists(self, env_name): response = self.ebs.describe_environments(application_name=self.app_name, environment_names=[env_name], include_deleted=False) return len(response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']) > 0 and response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'][0][ 'Status'] != 'Terminated'
Returns whether or not the given environment exists
https://github.com/briandilley/ebs-deploy/blob/91e4b6741ccd7d8d00380db3237157d108d871ec/ebs_deploy/__init__.py#L328-L336
from boto.exception import S3ResponseError from boto.s3.connection import S3Connection from boto.beanstalk import connect_to_region from boto.s3.key import Key from datetime import datetime from time import time, sleep import zipfile import os import subprocess import sys import yaml import re import functools MAX_RED_SAMPLES = 20 def out(message): sys.stdout.write(message + "\n") sys.stdout.flush() def merge_dict(dict1, dict2): ret = dict(dict2) for key, val in list(dict1.items()): val2 = dict2.get(key) if val2 is None: ret[key] = val elif isinstance(val, dict) and isinstance(val2, dict): ret[key] = merge_dict(val, val2) elif isinstance(val, (list)) and isinstance(val2, (list)): ret[key] = val + val2 else: ret[key] = val2 return ret def get(vals, key, default_val=None): val = vals for part in key.split('.'): if isinstance(val, dict): val = val.get(part, None) if val is None: return default_val else: return default_val return val def parse_option_settings(option_settings): ret = [] for namespace, params in list(option_settings.items()): for key, value in list(params.items()): ret.append((namespace, key, value)) return ret def parse_env_config(config, env_name): all_env = get(config, 'app.all_environments', {}) env = get(config, 'app.environments.' + str(env_name), {}) return merge_dict(all_env, env) def upload_application_archive(helper, env_config, archive=None, directory=None, version_label=None): if version_label is None: version_label = datetime.now().strftime('%Y%m%d_%H%M%S') archive_file_name = None if archive: archive_file_name = os.path.basename(archive) archive_files = get(env_config, 'archive.files', []) if get(env_config, 'archive.generate'): cmd = get(env_config, 'archive.generate.cmd') output_file = get(env_config, 'archive.generate.output_file') use_shell = get(env_config, 'archive.generate.use_shell', True) exit_code = get(env_config, 'archive.generate.exit_code', 0) if not cmd or not output_file: raise Exception('Archive generation requires cmd and output_file at a minimum') output_regex = None try: output_regex = re.compile(output_file) except: pass result = subprocess.call(cmd, shell=use_shell) if result != exit_code: raise Exception('Generate command exited with code %s (expected %s)' % (result, exit_code)) if output_file and os.path.exists(output_file): archive_file_name = os.path.basename(output_file) directory = os.path.dirname(output_file) archive = output_file else: for root, dirs, files in os.walk(".", followlinks=True): for f in files: fullpath = os.path.join(root, f) if fullpath.endswith(output_file): archive = fullpath archive_file_name = os.path.basename(fullpath) break elif output_regex and output_regex.match(fullpath): archive = fullpath archive_file_name = os.path.basename(fullpath) break if archive: break if not archive or not archive_file_name: raise Exception('Unable to find expected output file matching: %s' % (output_file)) elif not archive: if not directory: directory = "." includes = get(env_config, 'archive.includes', []) excludes = get(env_config, 'archive.excludes', []) def _predicate(f): for exclude in excludes: if re.match(exclude, f): return False if len(includes) > 0: for include in includes: if re.match(include, f): return True return False return True archive = create_archive(directory, str(version_label) + ".zip", config=archive_files, ignore_predicate=_predicate) archive_file_name = str(version_label) + ".zip" add_config_files_to_archive(directory, archive, config=archive_files) helper.upload_archive(archive, archive_file_name) helper.create_application_version(version_label, archive_file_name) return version_label def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']): with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file: root_len = len(os.path.abspath(directory)) out("Creating archive: " + str(filename)) for root, dirs, files in os.walk(directory, followlinks=True): archive_root = os.path.abspath(root)[root_len + 1:] for f in files: fullpath = os.path.join(root, f) archive_name = os.path.join(archive_root, f) if filename in fullpath: continue if ignored_files is not None: for name in ignored_files: if fullpath.endswith(name): out("Skipping: " + str(name)) continue if ignore_predicate is not None: if not ignore_predicate(archive_name): out("Skipping: " + str(archive_name)) continue out("Adding: " + str(archive_name)) zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) return filename def add_config_files_to_archive(directory, filename, config={}): with zipfile.ZipFile(filename, 'a') as zip_file: for conf in config: for conf, tree in list(conf.items()): if 'yaml' in tree: content = yaml.dump(tree['yaml'], default_flow_style=False) else: content = tree.get('content', '') out("Adding file " + str(conf) + " to archive " + str(filename)) file_entry = zipfile.ZipInfo(conf) file_entry.external_attr = tree.get('permissions', 0o644) << 16 zip_file.writestr(file_entry, content) return filename class AwsCredentials: def __init__(self, access_key, secret_key, security_token, region, bucket, bucket_path): self.access_key = access_key self.secret_key = secret_key self.security_token = security_token self.bucket = bucket self.region = region self.bucket_path = bucket_path if not self.bucket_path.endswith('/'): self.bucket_path += '/' class EbsHelper(object): def __init__(self, aws, wait_time_secs, app_name=None,): self.aws = aws self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key, aws_secret_access_key=aws.secret_key, security_token=aws.security_token) self.s3 = S3Connection( aws_access_key_id=aws.access_key, aws_secret_access_key=aws.secret_key, security_token=aws.security_token, host=(lambda r: 's3.amazonaws.com' if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(aws.region)) self.app_name = app_name self.wait_time_secs = wait_time_secs def swap_environment_cnames(self, from_env_name, to_env_name): self.ebs.swap_environment_cnames(source_environment_name=from_env_name, destination_environment_name=to_env_name) def upload_archive(self, filename, key, auto_create_bucket=True): try: bucket = self.s3.get_bucket(self.aws.bucket) if (( self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or ( self.aws.region == 'us-east-1' and bucket.get_location() != '') or ( self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'): raise Exception("Existing bucket doesn't match region") except S3ResponseError: bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region) def __report_upload_progress(sent, total): if not sent: sent = 0 if not total: total = 0 out("Uploaded " + str(sent) + " bytes of " + str(total) + " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)") k = Key(bucket) k.key = self.aws.bucket_path + key k.set_metadata('time', str(time())) k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10) def list_available_solution_stacks(self): stacks = self.ebs.list_available_solution_stacks() return stacks['ListAvailableSolutionStacksResponse']['ListAvailableSolutionStacksResult']['SolutionStacks'] def create_application(self, description=None): out("Creating application " + str(self.app_name)) self.ebs.create_application(self.app_name, description=description) def delete_application(self): out("Deleting application " + str(self.app_name)) self.ebs.delete_application(self.app_name, terminate_env_by_force=True) def application_exists(self): response = self.ebs.describe_applications(application_names=[self.app_name]) return len(response['DescribeApplicationsResponse']['DescribeApplicationsResult']['Applications']) > 0 def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'): out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
MIT License