text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cellar Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
u'Glance Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
[u'Glance Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
u'Glance Developers', 'GlanceReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
internap/arsenal
|
releasenotes/source/conf.py
|
Python
|
apache-2.0
| 8,940 | 0 |
"""List the IP forwarding rules"""
from baseCmd import *
from baseResponse import *
class listIpForwardingRulesCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list resources by account. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""list only resources belonging to the domain specified"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""Lists rule with the specified ID."""
self.id = None
self.typeInfo['id'] = 'uuid'
"""list the rule belonging to this public IP address"""
self.ipaddressid = None
self.typeInfo['ipaddressid'] = 'uuid'
"""defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."""
self.isrecursive = None
self.typeInfo['isrecursive'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"""
self.listall = None
self.typeInfo['listall'] = 'boolean'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list objects by project"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""Lists all rules applied to the specified VM."""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
self.required = []
class listIpForwardingRulesResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the port forwarding rule"""
self.id = None
self.typeInfo['id'] = 'string'
"""the cidr list to forward traffic from"""
self.cidrlist = None
self.typeInfo['cidrlist'] = 'string'
"""is firewall for display to the regular user"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""the public ip address for the port forwarding rule"""
self.ipaddress = None
self.typeInfo['ipaddress'] = 'string'
"""the public ip address id for the port forwarding rule"""
self.ipaddressid = None
self.typeInfo['ipaddressid'] = 'string'
"""the id of the guest network the port forwarding rule belongs to"""
self.networkid = None
self.typeInfo['networkid'] = 'string'
"""the ending port of port forwarding rule's private port range"""
self.privateendport = None
self.typeInfo['privateendport'] = 'string'
"""the starting port of port forwarding rule's private port range"""
self.privateport = None
self.typeInfo['privateport'] = 'string'
"""the protocol of the port forwarding rule"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the ending port of port forwarding rule's private port range"""
self.publicendport = None
self.typeInfo['publicendport'] = 'string'
"""the starting port of port forwarding rule's public port range"""
self.publicport = None
self.typeInfo['publicport'] = 'string'
"""the state of the rule"""
self.state = None
self.typeInfo['state'] = 'string'
"""the VM display name for the port forwarding rule"""
self.virtualmachinedisplayname = None
self.typeInfo['virtualmachinedisplayname'] = 'string'
"""the VM ID for the port forwarding rule"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'string'
"""the VM name for the port forwarding rule"""
self.virtualmachinename = None
self.typeInfo['virtualmachinename'] = 'string'
"""the vm ip address for the port forwarding rule"""
self.vmguestip = None
self.typeInfo['vmguestip'] = 'string'
"""the list of resource tags associated with the rule"""
self.tags = []
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/listIpForwardingRules.py
|
Python
|
apache-2.0
| 4,977 | 0.001005 |
import os
from datetime import date
from unittest.mock import MagicMock, call
import pytest
import imagesize
from kaleidoscope import renderer, generator
from kaleidoscope.model import Gallery, Album, Section, Photo
from kaleidoscope.generator import generate, DefaultListener
def test_generate_gallery_index(tmpdir, disable_resize):
"""Generator should generate gallery index file."""
gallery = Gallery("Testing Gallery", "The Tester", [])
generate(gallery, str(tmpdir))
assert tmpdir.join("index.html").check()
def test_gallery_index_context(tmpdir, monkeypatch, disable_resize):
"""Generator should provide the gallery object for index template."""
render_mock = MagicMock()
monkeypatch.setattr(renderer, 'render', render_mock)
gallery = Gallery("Testing Gallery", "The Tester", [])
generate(gallery, str(tmpdir))
render_mock.assert_called_with(
"gallery.html",
str(tmpdir.join("index.html")),
{'gallery': gallery, 'current_year': date.today().year}
)
def test_album_index_generated(tmpdir, gallery_with_one_photo, disable_resize):
"""Generator should create album index file."""
generate(gallery_with_one_photo, str(tmpdir))
assert tmpdir.join("album", "index.html").exists()
def test_album_index_context(tmpdir, monkeypatch, disable_resize):
"""
Generator should provide provide correct context to the album template.
"""
render_mock = MagicMock()
monkeypatch.setattr(renderer, 'render', render_mock)
album = Album("album", "The Album", date(2017, 6, 24), [])
gallery = Gallery("Testin Gallery", "The Tester", [album])
generate(gallery, str(tmpdir))
render_mock.assert_called_with(
"album.html",
str(tmpdir.join("album", "index.html")),
{'album': album, 'gallery': gallery, 'current_year': date.today().year}
)
def test_resize_thumbnail(tmpdir, gallery_with_one_photo):
"""Generator should create thumbnail file."""
generate(gallery_with_one_photo, str(tmpdir))
thumb_path = tmpdir.join("album", "thumb", "photo.jpg")
assert thumb_path.exists()
assert imagesize.get(str(thumb_path)) <= (300, 200)
def test_resize_large(tmpdir, gallery_with_one_photo):
"""Generator should create large resized file."""
generate(gallery_with_one_photo, str(tmpdir))
large_path = tmpdir.join("album", "large", "photo.jpg")
assert large_path.exists()
assert imagesize.get(str(large_path)) <= (1500, 1000)
def test_resize_existing(tmpdir, gallery_with_one_photo):
"""When resized image allready exists, do not resize it again."""
thumb_path = tmpdir.join("album", "thumb", "photo.jpg")
large_path = tmpdir.join("album", "large", "photo.jpg")
thumb_path.ensure()
large_path.ensure()
original_thumb_mtime = thumb_path.mtime()
original_large_mtime = large_path.mtime()
generate(gallery_with_one_photo, str(tmpdir))
assert thumb_path.mtime() == original_thumb_mtime
assert large_path.mtime() == original_large_mtime
def test_resized_images_metadata(tmpdir, gallery_with_one_photo):
"""Generator should fill resized images metadata in the Photo."""
generate(gallery_with_one_photo, str(tmpdir))
photo = next(gallery_with_one_photo.albums[0].photos)
assert photo.thumb.url == "thumb/photo.jpg"
assert photo.thumb.size <= (300, 200)
assert photo.large.url == "large/photo.jpg"
assert photo.large.size <= (1500, 1000)
def test_copy_assets(tmpdir, disable_resize):
"""Generator should copy assets directory into output."""
gallery = Gallery("", "", [])
generate(gallery, str(tmpdir))
assert tmpdir.join("assets", "kaleidoscope.js").exists()
assert tmpdir.join("assets", "kaleidoscope.css").exists()
def test_assets_directory_cleaned(tmpdir, disable_resize):
"""Generator should clean up existing assets directory."""
extra_file = tmpdir.join("assets", "existing-file.txt")
extra_file.ensure()
generate(Gallery("", "", []), str(tmpdir))
assert not extra_file.exists()
def test_generator_reporting_events(gallery_with_three_photos, tmpdir,
disable_resize):
"""Generator should report important events using provided reporter."""
listener = MagicMock(spec=DefaultListener)
generate(gallery_with_three_photos, tmpdir, listener)
album = gallery_with_three_photos.albums[0]
assert listener.starting_album.call_args == call(album, 3)
assert listener.finishing_album.called
assert listener.resizing_photo.call_count == 3
def test_counting_photos_to_resize(
gallery_with_three_photos, tmpdir, disable_resize):
"""Listener should receive count of photos that would be really resized."""
# Let's make 1.jpg already resized => 2 photos would remain
tmpdir.join("album", "large", "f1.jpg").ensure()
tmpdir.join("album", "thumb", "f1.jpg").ensure()
listener = MagicMock(spec=DefaultListener)
generate(gallery_with_three_photos, tmpdir, listener)
album = gallery_with_three_photos.albums[0]
assert listener.starting_album.call_args == call(album, 2)
assert listener.resizing_photo.call_count == 2
@pytest.fixture
def gallery_with_one_photo():
photo_path = os.path.join(os.path.dirname(__file__), 'data', 'photo.jpg')
photo = Photo("photo.jpg", "", "", photo_path)
album = Album("album", "The Album", date(2017, 6, 24), [Section("photos", [photo])])
return Gallery("Testin Gallery", "The Tester", [album])
@pytest.fixture
def gallery_with_three_photos():
photo_path = os.path.join(os.path.dirname(__file__), 'data', 'photo.jpg')
photos = [Photo("f%d.jpg" % (i,), "", "", photo_path) for i in range(3)]
album = Album("album", "The Album", date(2017, 6, 24), [Section("photos", photos)])
return Gallery("Testing Gallery", "The Tester", [album])
@pytest.fixture
def disable_resize(monkeypatch):
"""Replace image resize with dummy function and provide constant size."""
monkeypatch.setattr(generator, 'resize', MagicMock())
monkeypatch.setattr(imagesize, 'get', MagicMock(return_value=(42, 42)))
|
sergejx/kaleidoscope
|
tests/test_generator.py
|
Python
|
bsd-3-clause
| 6,135 | 0.000326 |
import announcements, users, corporate, api, volunteer, teams, innovation
def configure_routes(app):
app.add_url_rule('/', 'landing', view_func=users.views.landing, methods=['GET'])
# Signing Up/Registration
app.add_url_rule('/register', 'sign-up', view_func=users.views.sign_up, methods=['GET', 'POST'])
app.add_url_rule('/callback', 'callback', view_func=users.views.callback, methods=['GET'])
app.add_url_rule('/complete_mlh_registration', 'complete-mlh-registration',
view_func=users.views.complete_mlh_registration, methods=['GET', 'POST'])
app.add_url_rule('/complete_registration', 'complete-registration', view_func=users.views.complete_registration,
methods=['GET', 'POST'])
app.add_url_rule('/login', 'login', view_func=users.views.login, methods=['GET', 'POST'])
app.add_url_rule('/logout', 'logout', view_func=users.views.logout, methods=['GET'])
app.add_url_rule('/login/reset', 'forgot-password', view_func=users.views.forgot_password,
methods=['GET', 'POST'])
app.add_url_rule('/login/reset/<token>', 'reset-password', view_func=users.views.reset_password,
methods=['GET', 'POST'])
app.add_url_rule('/register/confirm/<token>', 'confirm-account', view_func=users.views.confirm_account,
methods=['GET'])
# User action pages
app.add_url_rule('/edit_profile', 'edit-profile', view_func=users.views.edit_profile,
methods=['GET', 'POST'])
app.add_url_rule('/dashboard', 'dashboard', view_func=users.views.dashboard, methods=['GET'])
app.add_url_rule('/resend_confirmation_email', 'resend-confirmation-email',
view_func=users.views.resend_confirmation, methods=['POST'])
# app.add_url_rule('/profile/resume', 'view-own-resume', view_func=users.views.view_own_resume, methods=['GET'])
# app.add_url_rule('/refresh', 'refresh-mlh-data', view_func=users.views.refresh_from_mlh, methods=['GET'])
app.add_url_rule('/accept', 'accept-invite', view_func=users.views.accept, methods=['GET', 'POST'])
app.add_url_rule('/accept/sign', 'sign', view_func=users.views.sign, methods=['GET', 'POST'])
app.add_url_rule('/additional_status', 'additional-status', view_func=users.views.additional_status,
methods=['GET'])
app.add_url_rule('/accept_travel_reimbursement', 'accept-travel-reimbursement',
view_func=users.views.accept_reimbursement, methods=['POST'])
app.add_url_rule('/view_campus_ambassadors', 'view-campus-ambassadors',
view_func=users.views.view_campus_ambassadors, methods=['GET'])
# Team actions
app.add_url_rule('/team', 'team', view_func=teams.views.team, methods=['GET', 'POST'])
# Admin Pages
app.add_url_rule('/admin', 'admin-dash', view_func=users.admin_views.admin_dashboard,
methods=['GET'])
app.add_url_rule('/admin/create-corp-user', 'create-corp', view_func=users.admin_views.create_corp_user,
methods=['GET', 'POST'])
app.add_url_rule('/admin/debug', 'debug-user', view_func=users.admin_views.debug_user,
methods=['GET', 'POST'])
app.add_url_rule('/admin/initial-create', 'initial-create',
view_func=users.admin_views.initial_create, methods=['GET', 'POST'])
app.add_url_rule('/admin/batch', 'batch-modify', view_func=users.admin_views.batch_modify,
methods=['GET', 'POST'])
app.add_url_rule('/admin/send-email', 'send-email',
view_func=users.admin_views.send_email_to_users, methods=['GET', 'POST'])
app.add_url_rule('/admin/volunteer-list', 'volunteer-list', view_func=volunteer.views.volunteer_list,
methods=['GET'])
app.add_url_rule('/admin/add-volunteer', 'add-volunteer',
view_func=volunteer.views.add_volunteer, methods=['POST'])
app.add_url_rule('/admin/reject', 'reject-users', view_func=users.admin_views.reject_users,
methods=['GET', 'POST'])
app.add_url_rule('/admin/accept-teams', 'accept-teams', view_func=users.admin_views.accept_teams,
methods=['GET', 'POST'])
app.add_url_rule('/admin/check-in', 'manual-check-in',
view_func=users.admin_views.check_in_manual, methods=['GET', 'POST'])
app.add_url_rule('/admin/sign/<user_id>', 'check-in-sign',
view_func=users.admin_views.check_in_sign, methods=['GET', 'POST'])
app.add_url_rule('/admin/check-in-post', 'manual-check-in-post',
view_func=users.admin_views.check_in_post, methods=['POST'])
app.add_url_rule('/admin/set-mlh-id', 'set-mlh-id', view_func=users.admin_views.set_mlh_id,
methods=['GET', 'POST'])
app.add_url_rule('/admin/job/<job_key>', 'worker-jobs', view_func=users.admin_views.job_view,
methods=['GET'])
# API
app.add_url_rule('/api/announcements', 'announcements', view_func=announcements.views.announcement_list,
methods=['GET'])
app.add_url_rule('/api/announcements/create', 'create-announcement',
view_func=announcements.views.create_announcement, methods=['POST'])
app.add_url_rule('/api/partners', 'partners', view_func=api.views.partner_list, methods=['GET'])
app.add_url_rule('/api/schedule', 'schedule', view_func=api.views.schedule, methods=['GET'])
app.add_url_rule('/api/schedule/<day>', 'day-schedule', view_func=api.views.schedule_day, methods=['GET'])
app.add_url_rule('/api/check-in', 'check-in-api', view_func=api.views.check_in, methods=['GET', 'POST'])
app.add_url_rule('/api/passbook', 'passbook', view_func=api.views.passbook, methods=['POST'])
# Corporate Portal
app.add_url_rule('/corp/login', 'corp-login', view_func=corporate.views.login, methods=['GET', 'POST'])
app.add_url_rule('/corp/login/reset', 'corp-forgot-password', view_func=corporate.views.forgot_password,
methods=['GET', 'POST'])
app.add_url_rule('/corp/login/reset/<token>', 'corp-reset-password', view_func=corporate.views.reset_password,
methods=['GET', 'POST'])
app.add_url_rule('/corp/setup/<token>', 'new-user-setup', view_func=corporate.views.new_user_setup,
methods=['GET', 'POST'])
app.add_url_rule('/corp', 'corp-dash', view_func=corporate.views.corporate_dash, methods=['GET', 'POST'])
app.add_url_rule('/corp/search', 'corp-search', view_func=corporate.views.corporate_search, methods=['GET'])
app.add_url_rule('/corp/search/results', 'search-results', view_func=corporate.views.search_results,
methods=['POST'])
app.add_url_rule('/corp/view/resume', 'resume-view', view_func=corporate.views.view_resume, methods=['GET'])
app.add_url_rule('/corp/download/all-resumes', 'all-resume-download',
view_func=corporate.views.download_all_resumes, methods=['GET'])
app.add_url_rule('/innovation/auth', 'innovation-auth', view_func=innovation.views.auth, methods=['GET'])
app.add_url_rule('/innovation/get-user-info', 'innovation-user-info', view_func=innovation.views.get_user_info, methods=['GET'])
|
rohitdatta/pepper
|
pepper/routes.py
|
Python
|
agpl-3.0
| 7,316 | 0.007791 |
#!/usr/bin/env python
"""This Class is an Arbiter module for having a webservice
throuhg which we can have `sync` and `live polling` functionalities
"""
import json
import os
import select
import subprocess
import sys
import tarfile
import time
from shinken.basemodule import BaseModule
from shinken.external_command import ExternalCommand
from shinken.log import logger
from shinken.webui.bottlewebui import (run, route, request,
response, abort, parse_auth)
from nocout_live import main as live_poll_main
properties = {
'daemons': ['arbiter', 'receiver'],
'type': 'ws_nocout',
'external': True,
}
# called by the plugin manager to get a broker
def get_instance(plugin):
# info("[WS_Nocout] get_instance ...")
instance = WsNocout(plugin)
return instance
# Main app var. Will be fill with our running module instance
app = None
# Check_MK home dir
CHECK_MK_CONF_PATH = '/omd/dev_slave/slave_2/etc/check_mk/conf.d/wato/'
CHECK_MK_BIN = '/omd/dev_slave/slave_2/bin/cmk'
OLD_CONFIG = 'old_config.tar.gz'
NEW_CONFIG = 'new_config.tar.gz'
def get_commands(time_stamps, hosts, services, return_codes, outputs):
"""Composing a command list based on the information received in
POST request
"""
commands = []
current_time_stamp = int(time.time())
def _compose_command(t, h, s, r, o):
"""Simple function to create a command from the inputs"""
cmd = ""
if not s or s == "":
cmd = '[%s] PROCESS_HOST_CHECK_RESULT;%s;%s;%s' % (t if t is not None else current_time_stamp, h, r, o)
else:
cmd = '[%s] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % (t if t is not None else current_time_stamp, h, s, r, o)
logger.debug("[WS_Nocout] CMD: %s" % (cmd))
commands.append(cmd)
# Trivial case: empty commmand list
if (return_codes is None or len(return_codes) == 0):
return commands
# Sanity check: if we get N return codes, we must have N hosts.
# The other values could be None
if (len(return_codes) != len(hosts)):
logger.error("[WS_Nocout] number of return codes (%d) does not match number of hosts (%d)" % (len(return_codes), len(hosts)))
abort(400, "number of return codes does not match number of hosts")
map(_compose_command, time_stamps, hosts, services, return_codes, outputs)
logger.debug("[WS_Nocout] received command: %s" % (str(commands)))
return commands
def get_page():
commands_list = []
try:
# Getting lists of informations for the commands
time_stamp_list = []
host_name_list = []
service_description_list = []
return_code_list = []
output_list = []
time_stamp_list = request.forms.getall(key='time_stamp')
logger.debug("[WS_Nocout] time_stamp_list: %s" % (time_stamp_list))
host_name_list = request.forms.getall(key='host_name')
logger.debug("[WS_Nocout] host_name_list: %s" % (host_name_list))
service_description_list = request.forms.getall(key='service_description')
logger.debug("[WS_Nocout] service_description_list: %s" % (service_description_list))
return_code_list = request.forms.getall(key='return_code')
logger.debug("[WS_Nocout] return_code_list: %s" % (return_code_list))
output_list = request.forms.getall(key='output')
logger.debug("[WS_Nocout] output_list: %s" % (output_list))
commands_list = get_commands(time_stamp_list, host_name_list, service_description_list, return_code_list, output_list)
except Exception, e:
logger.error("[WS_Nocout] failed to get the lists: %s" % str(e))
commands_list = []
#check_auth()
# Adding commands to the main queue()
logger.debug("[WS_Nocout] commands: %s" % str(sorted(commands_list)))
for c in sorted(commands_list):
ext = ExternalCommand(c)
app.from_q.put(ext)
# OK here it's ok, it will return a 200 code
def do_restart():
# Getting lists of informations for the commands
time_stamp = request.forms.get('time_stamp', int(time.time()))
command = '[%s] RESTART_PROGRAM\n' % time_stamp
#check_auth()
# Adding commands to the main queue()
logger.warning("[WS_Nocout] command: %s" % str(command))
ext = ExternalCommand(command)
app.from_q.put(ext)
# OK here it's ok, it will return a 200 code
def do_reload():
# Getting lists of informations for the commands
time_stamp = request.forms.get('time_stamp', int(time.time()))
command = '[%s] RELOAD_CONFIG\n' % time_stamp
#check_auth()
# Adding commands to the main queue()
logger.warning("[WS_Nocout] command: %s" % str(command))
ext = ExternalCommand(command)
app.from_q.put(ext)
# OK here it's ok, it will return a 200 code
def do_recheck():
# Getting lists of informations for the commands
time_stamp = request.forms.get('time_stamp', int(time.time()))
host_name = request.forms.get('host_name', '')
service_description = request.forms.get('service_description', '')
logger.debug("[WS_Nocout] Timestamp '%s' - host: '%s', service: '%s'" % (time_stamp,
host_name,
service_description
)
)
if not host_name:
abort(400, 'Missing parameter host_name')
if service_description:
# SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
command = '[%s] SCHEDULE_FORCED_SVC_CHECK;%s;%s;%s\n' % (time_stamp,
host_name,
service_description,
time_stamp)
else:
# SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
command = '[%s] SCHEDULE_FORCED_HOST_CHECK;%s;%s\n' % (time_stamp,
host_name,
time_stamp)
# We check for auth if it's not anonymously allowed
#check_auth()
# Adding commands to the main queue()
logger.debug("[WS_Nocout] command = %s" % command)
ext = ExternalCommand(command)
app.from_q.put(ext)
# OK here it's ok, it will return a 200 code
def do_downtime():
# Getting lists of informations for the commands
action = request.forms.get('action', 'add')
time_stamp = request.forms.get('time_stamp', int(time.time()))
host_name = request.forms.get('host_name', '')
service_description = request.forms.get('service_description', '')
start_time = request.forms.get('start_time', int(time.time()))
end_time = request.forms.get('end_time', int(time.time()))
# Fixed is 1 for a period between start and end time
fixed = request.forms.get('fixed', '1')
# Fixed is 0 (flexible) for a period of duration seconds from start time
duration = request.forms.get('duration', int('86400'))
trigger_id = request.forms.get('trigger_id', '0')
author = request.forms.get('author', 'anonymous')
comment = request.forms.get('comment', 'No comment')
logger.debug("[WS_Nocout] Downtime %s - host: '%s', service: '%s', comment: '%s'" % (action, host_name, service_description, comment))
if not host_name:
abort(400, 'Missing parameter host_name')
if action == 'add':
if service_description:
# SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>;<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
command = '[%s] SCHEDULE_SVC_DOWNTIME;%s;%s;%s;%s;%s;%s;%s;%s;%s\n' % ( time_stamp,
host_name,
service_description,
start_time,
end_time,
fixed,
trigger_id,
duration,
author,
comment
)
else:
# SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
command = '[%s] SCHEDULE_HOST_DOWNTIME;%s;%s;%s;%s;%s;%s;%s;%s\n' % ( time_stamp,
host_name,
start_time,
end_time,
fixed,
trigger_id,
duration,
author,
comment
)
if action == 'delete':
if service_description:
# DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
command = '[%s] DEL_ALL_SVC_DOWNTIMES;%s;%s\n' % ( time_stamp,
host_name,
service_description)
else:
# DEL_ALL_SVC_DOWNTIMES;<host_name>
command = '[%s] DEL_ALL_HOST_DOWNTIMES;%s\n' % ( time_stamp,
host_name)
# We check for auth if it's not anonymously allowed
if app.username != 'anonymous':
basic = parse_auth(request.environ.get('HTTP_AUTHORIZATION', ''))
# Maybe the user not even ask for user/pass. If so, bail out
if not basic:
abort(401, 'Authentication required')
# Maybe he do not give the good credential?
if basic[0] != app.username or basic[1] != app.password:
abort(403, 'Authentication denied')
# Adding commands to the main queue()
logger.debug("[WS_Nocout] command = %s" % command)
ext = ExternalCommand(command)
app.from_q.put(ext)
# OK here it's ok, it will return a 200 code
def do_local_sync():
""" Get the host and service config files and restart local
check_mk instance"""
#check_auth()
# load device inventory into redis and memc
from handlers.db_ops import load_inventory
load_inventory.apply_async()
ok_msg = {
'success': 1,
'message': 'Config pushed successfully'
}
err_msg = {
'success': 0,
'message': 'Error with the config'
}
body = {}
body.update(err_msg)
response.status = 200
#response.content_type = "application/json"
# prepare a backup of current state
os.chdir(CHECK_MK_CONF_PATH)
out = tarfile.open(CHECK_MK_CONF_PATH + OLD_CONFIG, mode='w:gz')
try:
out = prepare_tar(out)
except Exception as exc:
logger.error('Error in tarfile generation: {0}'.format(exc))
finally:
out.close()
# extract files from request obj and perform check mk restart
try:
old_backup = CHECK_MK_CONF_PATH + OLD_CONFIG
fp = request.files.get('file').file
with open(NEW_CONFIG, 'w') as f:
f.write(fp.read())
abs_path = CHECK_MK_CONF_PATH + NEW_CONFIG
prepare_untar(abs_path, CHECK_MK_CONF_PATH)
#ret = os.system(CHECK_MK_BIN + ' -R')
ret = subprocess.call(CHECK_MK_BIN + ' -C', shell=True)
logger.warning('Ret: {0}'.format(ret))
if ret == 0:
body.update(ok_msg)
else:
# rollback operation
rollback(old_backup)
except Exception as exc:
logger.error('Error in installing new config: {0}'.format(exc))
#status_code = 500
try:
# rollback operation
rollback(old_backup)
except Exception as exc:
# only for debugging purposes
logger.error('Error in rollback operation: {0}'.format(exc))
body.update({
'ret': ret,
})
return json.dumps(body)
def do_local_restart():
""" Restart monitoring core"""
#check_auth()
ok_msg = {
'success': 1,
'message': 'Restart successfully'
}
err_msg = {
'success': 0,
'message': 'Problem with the restart'
}
body = {}
body.update(err_msg)
response.status = 200
try:
ret = subprocess.call(CHECK_MK_BIN + ' -R', shell=True)
if ret == 0:
body.update(ok_msg)
except Exception as exc:
logger.error('Error in restart program: {0}'.format(exc))
return json.dumps(body)
def prepare_untar(filename, extract_to):
tarfile.open(filename, mode='r:gz').extractall(extract_to)
def rollback(old_backup):
prepare_untar(old_backup, CHECK_MK_CONF_PATH)
os.system(CHECK_MK_BIN + ' -R')
def prepare_tar(out):
os.chdir(CHECK_MK_CONF_PATH)
for entry in os.listdir('.'):
if entry.endswith('.mk'):
out.add(entry)
return out
def do_live_poll():
"""Calls live poll module"""
#check_auth()
res = {
'message': 'ok',
}
status_code = 200
logger.warning('Live poll calld')
# get poll params from request obj
try:
req_params = request.json
logger.warning('req: {0}'.format(type(req_params)))
except Exception as exc:
status_code = 500
logger.error('[Ws-Nocout] Exception in do_live_poll: {0}'.format(
exc))
else:
device_list = req_params.get('device_list')
service_list = req_params.get('service_list')
bs_ss_map = req_params.get(
'bs_name_ss_mac_mapping')
ss_map = req_params.get(
'ss_name_mac_mapping')
ds = req_params.get(
'ds')
res = live_poll_main(
device_list=device_list,
service_list=service_list,
bs_name_ss_mac_mapping=bs_ss_map,
ss_name_mac_mapping=ss_map,
ds=ds
)
return json.dumps(res)
class WsNocout(BaseModule):
""" Class to open an HTTP service, where a user can send
command (e.g. restart shinken etc.)"""
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
try:
logger.debug("[WS_Nocout] Configuration starting ...")
self.username = getattr(modconf, 'username', 'anonymous')
self.password = getattr(modconf, 'password', '')
self.port = int(getattr(modconf, 'port', '7760'))
self.host = getattr(modconf, 'host', '0.0.0.0')
# adding inventory load celery task here [being called from do_local_sync]
ETL_BASE_DIR = getattr(modconf, 'etl_base_dir', '/omd/nocout_etl')
sys.path.insert(0, ETL_BASE_DIR)
logger.info(
"[WS_Nocout] Configuration done, host: %s(%s), username: %s)" %
(self.host, self.port, self.username)
)
except AttributeError:
logger.error(
"[WS_Nocout] The module is missing a property, "
"check module declaration in shinken-specific.cfg"
)
raise
except Exception, e:
logger.error("[WS_Nocout] Exception : %s" % str(e))
raise
# We initialize the HTTP part. It's a simple wsgi backend
# with a select hack so we can still exit if someone ask it
def init_http(self):
logger.info("[WS_Nocout] Starting WS arbiter http socket")
try:
self.srv = run(host=self.host, port=self.port, server='wsgirefselect')
except Exception, e:
logger.error("[WS_Nocout] Exception : %s" % str(e))
raise
logger.info("[WS_Nocout] Server started")
# And we link our page
route('/push_check_result', callback=get_page, method='POST')
#route('/restart', callback=do_restart, method='POST')
route('/restart', callback=do_local_restart, method='POST')
route('/reload', callback=do_reload, method='POST')
route('/downtime', callback=do_downtime, method='POST')
route('/recheck', callback=do_recheck, method='POST')
route('/local_sync', callback=do_local_sync, method='POST')
route('/live_poll', callback=do_live_poll, method='POST')
# When you are in "external" mode, that is the main loop of your process
def main(self):
global app
# Change process name (seen in ps or top)
self.set_proctitle(self.name)
# It's an external module, so we need to be sure that we manage
# the signals
self.set_exit_handler()
# Go for Http open :)
self.init_http()
# We fill the global variable with our Queue() link
# with the arbiter, because the page should be a non-class
# one function
app = self
# We will loop forever on the http socket
input = [self.srv.socket]
# Main blocking loop
while not self.interrupted:
input = [self.srv.socket]
try:
inputready, _, _ = select.select(input, [], [], 1)
except select.error, e:
logger.warning("[WS_Nocout] Exception: %s", str(e))
continue
for s in inputready:
# If it's a web request, ask the webserver to do it
if s == self.srv.socket:
self.srv.handle_request()
|
peeyush-tm/shinken
|
modules/ws-nocout/module.py
|
Python
|
agpl-3.0
| 15,057 | 0.030152 |
from __future__ import absolute_import
from __future__ import division
# Copyright (c) 2010-2016 openpyxl
"""Manage Excel date weirdness."""
# Python stdlib imports
import datetime
from datetime import timedelta, tzinfo
import re
from jdcal import (
gcal2jd,
jd2gcal,
MJD_0
)
from openpyxl.compat import lru_cache
# constants
MAC_EPOCH = datetime.date(1904, 1, 1)
WINDOWS_EPOCH = datetime.date(1899, 12, 30)
CALENDAR_WINDOWS_1900 = sum(gcal2jd(WINDOWS_EPOCH.year, WINDOWS_EPOCH.month, WINDOWS_EPOCH.day))
CALENDAR_MAC_1904 = sum(gcal2jd(MAC_EPOCH.year, MAC_EPOCH.month, MAC_EPOCH.day))
SECS_PER_DAY = 86400
EPOCH = datetime.datetime.utcfromtimestamp(0)
W3CDTF_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
W3CDTF_REGEX = re.compile('(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(.(\d{2}))?Z?')
def datetime_to_W3CDTF(dt):
"""Convert from a datetime to a timestamp string."""
return datetime.datetime.strftime(dt, W3CDTF_FORMAT)
def W3CDTF_to_datetime(formatted_string):
"""Convert from a timestamp string to a datetime object."""
match = W3CDTF_REGEX.match(formatted_string)
dt = [int(v) for v in match.groups()[:6]]
return datetime.datetime(*dt)
@lru_cache()
def to_excel(dt, offset=CALENDAR_WINDOWS_1900):
jul = sum(gcal2jd(dt.year, dt.month, dt.day)) - offset
if jul <= 60 and offset == CALENDAR_WINDOWS_1900:
jul -= 1
if hasattr(dt, 'time'):
jul += time_to_days(dt)
return jul
@lru_cache()
def from_excel(value, offset=CALENDAR_WINDOWS_1900):
if value is None:
return
if 1 < value < 60 and offset == CALENDAR_WINDOWS_1900:
value += 1
parts = list(jd2gcal(MJD_0, value + offset - MJD_0))
_, fraction = divmod(value, 1)
jumped = (parts[-1] == 0 and fraction > 0)
diff = datetime.timedelta(days=fraction)
if 0 < abs(value) < 1:
return days_to_time(diff)
if not jumped:
return datetime.datetime(*parts[:3]) + diff
else:
return datetime.datetime(*parts[:3] + [0])
class GMT(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def dst(self, dt):
return timedelta(0)
def tzname(self,dt):
return "GMT"
try:
from datetime import timezone
UTC = timezone(timedelta(0))
except ImportError:
# Python 2.6
UTC = GMT()
@lru_cache()
def time_to_days(value):
"""Convert a time value to fractions of day"""
if value.tzinfo is not None:
value = value.astimezone(UTC)
return (
(value.hour * 3600)
+ (value.minute * 60)
+ value.second
+ value.microsecond / 10**6
) / SECS_PER_DAY
@lru_cache()
def timedelta_to_days(value):
"""Convert a timedelta value to fractions of a day"""
if not hasattr(value, 'total_seconds'):
secs = (value.microseconds +
(value.seconds + value.days * SECS_PER_DAY) * 10**6) / 10**6
else:
secs =value.total_seconds()
return secs / SECS_PER_DAY
@lru_cache()
def days_to_time(value):
mins, seconds = divmod(value.seconds, 60)
hours, mins = divmod(mins, 60)
return datetime.time(hours, mins, seconds, value.microseconds)
|
mfsteen/CIQTranslate-Kristian
|
openpyxl/utils/datetime.py
|
Python
|
gpl-3.0
| 3,155 | 0.00412 |
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Software License Agreement (GPLv2 License)
#
# Copyright (c) 2012 TheCorpora SL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Authors: Miguel Angel Julian <miguel.julian@openqbo.com>;
# Daniel Cuadrado <daniel.cuadrado@openqbo.com>;
# Arturo Bajuelos <arturo@openqbo.com>;
# Sergio Merino <s.merino@openqbo.com>;
import cherrypy
import os
import gen_grammar
import subprocess
from mako.template import Template
from tabsClass import TabClass
import simplejson
from subprocess import Popen, PIPE, STDOUT
import roslib
import signal
roslib.load_manifest('qbo_webi');
import rospy
import time
from uuid import getnode as get_mac
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
class VoiceRecognitionManager(TabClass):
def __init__(self,language):
self.ipWavServer = "audio.openqbo.org"
self.portWavServer="8588"
self.language = language
self.juliusPath=roslib.packages.get_pkg_dir("qbo_listen")
self.juliusAMPath="/usr/share/qbo-julius-model/"
self.htmlTemplate = Template(filename='voiceRecognition/templates/voiceRecognitionTemplate.html')
self.jsTemplate = Template(filename='voiceRecognition/templates/voiceRecognitionTemplate.js')
self.tmpdir="/tmp/"
self.LMPaths="/config/LM/"
self.LMFileName="/sentences.conf"
self.PhonemsFileName="/phonems"
self.TiedlistFileName="/tiedlist"
self.languages_names={'en':'English','es':'Spanish','pt':'Português','de':'Deutsch','fr':'Français','it':'Italiano'}
self.path = roslib.packages.get_pkg_dir("qbo_webi")+"/src/voiceRecognition/"
self.lan = self.language["current_language"]
self.mac = get_mac()
self.p = None
@cherrypy.expose
def voiceRecognitionJs(self, parameters=None):
self.lan = self.language["current_language"]
return self.jsTemplate.render(language=self.language)
def getLanguages(self):
try:
dirList=os.listdir(self.juliusPath+self.LMPaths)
dirList.sort()
except:
dirList=-1
return dirList
def isQboListenInstalled(self):
if self.getLanguages()==-1:
return False
else:
return True
def getLanguageModels(self,language):
try:
dirList=os.listdir(self.juliusPath+self.LMPaths+language)
dirList.sort()
except:
dirList=-1
return dirList
def getLMSentences(self,language,model):
try:
f = open(self.juliusPath+self.LMPaths+language+"/"+model+self.LMFileName,'r')
return f.read()
except:
sentences=""
return sentences
@cherrypy.expose
def getModels(self,lang):
modelList=""
try:
dirList=os.listdir(self.juliusPath+self.LMPaths+lang)
dirList.sort()
for model in dirList:
modelList=modelList+model+"::"
modelList=modelList[:-2]
except:
modelList=-1
return modelList
@cherrypy.expose
def test1(self,lang,text):
text=text.encode("utf-8")
f = open(self.tmpdir+'LModel', 'w')
f.write(text)
f.close()
words=gen_grammar.verrors(self.tmpdir+'LModel',self.juliusAMPath+lang+"/"+self.PhonemsFileName)
if words==0:
return ""
else:
wordsList=""
for word in words:
wordsList=wordsList+word+"::"
wordsList=wordsList[:-2]
return wordsList
@cherrypy.expose
def test2(self,lang,text):
errorlist=""
text=text.encode("utf-8")
print text
wordlist=text.split()
print wordlist
for word in wordlist:
if word[0]!="[" and word[0]!="<":
print word
f = open(self.tmpdir+'word', 'w')
f.write("[sentence]\n")
f.write(word)
f.close()
gen_grammar.createvoca(self.tmpdir+'word', self.juliusAMPath+lang+"/"+self.PhonemsFileName, self.tmpdir+'word')
print self.tmpdir+'word'
print self.juliusAMPath+lang+"/"+self.TiedlistFileName
if gen_grammar.perrors(self.tmpdir+'word.voca',self.juliusAMPath+lang+"/"+self.TiedlistFileName)!=0:
errorlist=errorlist+word+"::"
errorlist=errorlist[:-2]
return errorlist.upper()
@cherrypy.expose
def saveToFile(self,lang,text,model):
try:
#print self.juliusPath+self.LMPaths+language+"/"+model+self.LMFileName
text=text.encode("utf-8")
f = open(self.juliusPath+self.LMPaths+lang+"/"+model+self.LMFileName,'w')
f.write(text)
f.close()
gen_grammar.compilegrammar(model,lang)
subprocess.Popen("roslaunch qbo_listen voice_recognizer.launch".split())
except:
return "ERROR: Cant write the file"
return ""
@cherrypy.expose
def getFile(self,lang="",model=""):
if lang=="" or model=="":
return "ERROR: lang:"+lang+"; model:"+model
else:
#print self.getLMSentences(lang,model)
return self.getLMSentences(lang,model)
@cherrypy.expose
def index(self):
tmp=""
if self.isQboListenInstalled():
for lang in self.getLanguages():
for LM in self.getLanguageModels(lang):
text= self.getLMSentences(lang,LM)
break
break
return self.htmlTemplate.render(language=self.language,lannames=self.languages_names,alllanguage=self.getLanguages())
else:
return "Qbo listen not installed"
# return self.htmlTemplate.render(language=self.language)
@cherrypy.expose
def rec(self):
# n = self.getLenght("Arturo","sp")
# print "***** "+n
#Borramos la anterior grabacion, si la habia
try:
cmd="rm "+self.path+"tmp/*"
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
'''
try:
cmd="rm "+self.path+"/*_en"
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
try:
cmd="rm "+path+"/*sp"
print cmd
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
'''
self.filename = str(self.mac)+"_"+self.lan
#filename = filename.replace("\"","")
# filename = "tmp.wav"
print "FILENAME == "+self.filename
print "grabnando!!!! "+self.path+"tmp/"+self.filename
cmd="arecord -f S16_LE -r 44100 -c 1 "+self.path+"tmp/"+self.filename
self.p = Popen(cmd.split())
name="oleole"
return name
@cherrypy.expose
def stop(self):
if(self.p==None):
print "P ES NULL!!??"
else:
print "matar grabacin"
self.p.send_signal(signal.SIGINT)
cmd="python "+self.path+"sendWav2Server.py "+self.path+"tmp/"+self.filename+" "+self.ipWavServer+" "+self.portWavServer
print cmd
out = runCmd(cmd)
print out[0]
if out[1] != "":
print "Error"
return "error"
return unicode(out[0],'utf8')
@cherrypy.expose
def play(self):
print "play sound"
os.system('aplay '+self.path+"tmp/"+self.filename)
return "ok"
@cherrypy.expose
def save(self,transcripcion):
print "SAVE! transcripcion="+transcripcion
cmd="python "+self.path+"sendTranscription2Server.py "+str(self.mac)+" \""+transcripcion+"\" "+self.lan+" "+self.ipWavServer+" "+self.portWavServer
print cmd
out = runCmd(cmd)
if out[1] != "":
print "Error "+out[1]
return "error"
return out[0]
# return "ok"
def runCmd(cmd, timeout=None):
'''
Will execute a command, read the output and return it back.
@param cmd: command to execute
@param timeout: process timeout in seconds
@return: a tuple of three: first stdout, then stderr, then exit code
@raise OSError: on missing command or if a timeout was reached
'''
ph_out = None # process output
ph_err = None # stderr
ph_ret = None # return code
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# if timeout is not set wait for process to complete
if not timeout:
ph_ret = p.wait()
else:
fin_time = time.time() + timeout
while p.poll() == None and fin_time > time.time():
time.sleep(1)
# if timeout reached, raise an exception
if fin_time < time.time():
# starting 2.6 subprocess has a kill() method which is preferable
# p.kill()
os.kill(p.pid, signal.SIGKILL)
raise OSError("Process timeout has been reached")
ph_ret = p.returncode
ph_out, ph_err = p.communicate()
return (ph_out, ph_err, ph_ret)
|
HailStorm32/Q.bo_stacks
|
qbo_webi/src/voiceRecognition/voiceRecognition.py
|
Python
|
lgpl-2.1
| 10,125 | 0.01541 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: hostname
author:
- "Hiroaki Nakamura (@hnakamur)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname.
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI.
- Any distribution that uses systemd as their init system.
- Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace.
options:
name:
required: true
description:
- Name of the host
'''
EXAMPLES = '''
- hostname: name=web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils._text import to_bytes, to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and Facts(module).is_systemd_managed():
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
# ===========================================
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
distribution_version = get_distribution_version()
if distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"):
strategy_class = SLESStrategy
else:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True)
)
)
hostname = Hostname(module)
changed = False
name = module.params['name']
current_name = hostname.get_current_hostname()
if current_name != name:
hostname.set_current_hostname(name)
changed = True
permanent_name = hostname.get_permanent_hostname()
if permanent_name != name:
hostname.set_permanent_hostname(name)
changed = True
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if __name__ == '__main__':
main()
|
romain-dartigues/ansible-modules-core
|
system/hostname.py
|
Python
|
gpl-3.0
| 20,763 | 0.003516 |
#coding: utf-8
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-dbmessages',
version='0.2.0a',
packages=['dbmessages'],
include_package_data=True,
license='BSD License',
description='Request-independent messaging for Django on top of contrib.messages',
long_description=README,
author='Upwork, Anton Strogonoff',
author_email='python@upwork.com',
maintainer='Anton Strogonoff',
maintainer_email='anton@strogonoff.name',
download_url='http://github.com/strogonoff/django-dbmessages',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
strogonoff/django-dbmessages
|
setup.py
|
Python
|
apache-2.0
| 1,218 | 0.001642 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Will
"""
from django import forms
from app01 import models
class ImportFrom(forms.Form):
HOST_TYPE=((1,"001"),(2,"002")) #替換爲文件
host_type = forms.IntegerField(
widget=forms.Select(choices=HOST_TYPE)
)
hostname = forms.CharField()
def __init__(self,*args,**kwargs):
super(ImportFrom,self).__init__(*args,**kwargs)
HOST_TYPE=((1,"001"),(2,"002")) #替換爲文件
self.fields['host_type'].widget.choices = models.userInfo.objects.all().values_list("id","name")
models.userInfo.objects.get()
models.userInfo.objects.filter()
|
willre/homework
|
day19/web/app01/forms/home.py
|
Python
|
gpl-2.0
| 723 | 0.027027 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-09-01 22:26:01
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import os
import threading
import requests
import lxml
from threading import Thread
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
pic_path = 'pic/' # 保存文件路径
URL = 'http://www.nanrenwo.net/z/tupian/hashiqitupian/'
URL1 = 'http://www.nanrenwo.net/'
class Worker(threading.Thread):
def __init__(self, url, img, filename):
super(Worker, self).__init__()
self.url = url
self.img = img
self.filename = filename
def run(self):
try:
u = self.url + self.img
r = requests.get(u, stream=True)
with open(self.filename, 'wb') as fd:
for chunk in r.iter_content(4096):
fd.write(chunk)
except Exception, e:
raise
def get_imgs(url):
t = 1
r = requests.get(url, stream=True)
soup = BeautifulSoup(r.text, 'lxml')
myimg = [img.get('src') for img in soup.find(id='brand-waterfall').find_all('img')] # 查询id下所有img元素
print 'myimg:', myimg
for img in myimg:
pic_name = pic_path + str(t) + '.jpg'
# img_src = img.get('src')
print 'img: ', img
# self.download_pic(URL1,img,pic_name) #request Url,img src,picture name
w = Worker(URL1, img, pic_name)
w.start()
t += 1
get_imgs(URL)
|
xycfree/py_spider
|
spider/down_pic_thread.py
|
Python
|
gpl-3.0
| 1,358 | 0.021021 |
"""
.. _tut_stats_cluster_source_2samp:
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_tris_connectivity, grade_to_tris
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50)
stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20,
subjects_dir=subjects_dir)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
threshold=f_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('clusters.png')
|
trachelr/mne-python
|
tutorials/plot_cluster_stats_spatio_temporal_2samp.py
|
Python
|
bsd-3-clause
| 4,321 | 0 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest.ip_messaging import TwilioIpMessagingClient
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = TwilioIpMessagingClient(account, token)
service = client.services.get(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
channel = service.channels.get(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
messages = channel.messages.list()
for m in messages:
print(m)
|
teoreteetik/api-snippets
|
ip-messaging/rest/messages/list-messages/list-messages.5.x.py
|
Python
|
mit
| 533 | 0 |
from typing import Optional, Tuple
import os
import sys
from distutils.version import LooseVersion
from version import PROVISION_VERSION
from scripts.lib.zulip_tools import get_dev_uuid_var_path
def get_major_version(v):
# type: (str) -> int
return int(v.split('.')[0])
def get_version_file():
# type: () -> str
uuid_var_path = get_dev_uuid_var_path()
return os.path.join(uuid_var_path, 'provision_version')
PREAMBLE = '''
Before we run tests, we make sure your provisioning version
is correct by looking at var/provision_version, which is at
version %s, and we compare it to the version in source
control (version.py), which is %s.
'''
def preamble(version):
# type: (str) -> str
text = PREAMBLE % (version, PROVISION_VERSION)
text += '\n'
return text
NEED_TO_DOWNGRADE = '''
It looks like you checked out a branch that expects an older
version of dependencies than the version you provisioned last.
This may be ok, but it's likely that you either want to rebase
your branch on top of upstream/master or re-provision your VM.
Do this: `./tools/provision`
'''
NEED_TO_UPGRADE = '''
It looks like you checked out a branch that has added
dependencies beyond what you last provisioned. Your command
is likely to fail until you add dependencies by provisioning.
Do this: `./tools/provision`
'''
def get_provisioning_status():
# type: () -> Tuple[bool, Optional[str]]
version_file = get_version_file()
if not os.path.exists(version_file):
# If the developer doesn't have a version_file written by
# a previous provision, then we don't do any safety checks
# here on the assumption that the developer is managing
# their own dependencies and not running provision.
return True, None
with open(version_file, 'r') as f:
version = f.read().strip()
# Normal path for people that provision--we're all good!
if version == PROVISION_VERSION:
return True, None
# We may be more provisioned than the branch we just moved to. As
# long as the major version hasn't changed, then we should be ok.
if LooseVersion(version) > LooseVersion(PROVISION_VERSION):
if get_major_version(version) == get_major_version(PROVISION_VERSION):
return True, None
else:
return False, preamble(version) + NEED_TO_DOWNGRADE
return False, preamble(version) + NEED_TO_UPGRADE
def assert_provisioning_status_ok(force):
# type: (bool) -> None
if not force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
|
tommyip/zulip
|
tools/lib/test_script.py
|
Python
|
apache-2.0
| 2,710 | 0.002583 |
import json
import pytest
from indy import crypto, did, error
@pytest.mark.asyncio
async def test_auth_crypt_works_for_created_key(wallet_handle, seed_my1, verkey_my2, message):
verkey = await did.create_key(wallet_handle, json.dumps({'seed': seed_my1}))
await crypto.auth_crypt(wallet_handle, verkey, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_unknown_sender_verkey(wallet_handle, verkey_my1, verkey_my2, message):
with pytest.raises(error.WalletItemNotFound):
await crypto.auth_crypt(wallet_handle, verkey_my1, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_invalid_handle(wallet_handle, verkey_my1, verkey_my2, message):
with pytest.raises(error.WalletInvalidHandle):
invalid_wallet_handle = wallet_handle + 1
await crypto.auth_crypt(invalid_wallet_handle, verkey_my1, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_invalid_recipient_vk(wallet_handle, identity_trustee1, message):
(_, key) = identity_trustee1
with pytest.raises(error.CommonInvalidStructure):
await crypto.auth_crypt(wallet_handle, key, 'CnEDk___MnmiHXEV1WFgbV___eYnPqs___TdcZaNhFVW', message)
|
peacekeeper/indy-sdk
|
wrappers/python/tests/crypto/test_auth_crypt.py
|
Python
|
apache-2.0
| 1,227 | 0.005705 |
#!/usr/bin/python3
#############
# this is to be leaded by every module.
# I think
#import mysglobal as g # args,loggerr @every module
#################
import logging
from logzero import setup_logger,LogFormatter,colors
import argparse
import os,sys
import json
from blessings import Terminal
import getpass # lockfile<= getuser
#from threading import Thread # thread: i need accesible thread
import uuid
DEBUG=True
config={} # global config, but not sure
MYSEPATH=os.path.expanduser("~/.myservice")
I_AM_INFINITE=False
BOTTOMLINE_TEXT="no message"
t = Terminal()
ZMQ_REP_PORT=5678
RANDOM_STR = uuid.uuid4()
user_name = os.getenv('USER') # for /var/run/screen/S-user
####################################
# PARSER ARG
######################################
parser=argparse.ArgumentParser(description="""
------------------------------------------------------------------
The tool to run services in userspace
""",usage="""
myservice2 [-d] ... shows the executables in ~/.myservice
myservice2 [-d] infinite ... run infinite (in terminal)
myservice test ... test is without a path inside ~/.myservice
myservice2 [-d] test enable ... introduces into .config.json
myservice2 [-d] test disable
myservice2 [-d] test never ... gray servicename and mode
myservice2 [-d] test undef
myservice2 [-d] test start
myservice2 [-d] test stop ... kills and makes UNDEF
myservice2 [-d] test perm4h ... run every 4 hours (it knows m,h,d)
myservice2 [-d] reconfig ... when MANUAL edit to .confg.json is done
script /dev/null ... do this when an ssh user without access to screen
------------------------------------------------------------------
VARIOUS TRICKS:
myservice2 ... looks for all executables;
* ... already present in .config.json
E ... atribute enable is there
+ or - ... attribute enable is true or false
p ... attribute perm is ON; also a,x
PATHS:
when ~/.myservice/test/aaa
myservice2 aaa enable : finds a path and adds into the .config.json
myservice2 infinite ... runs the table in the terminal (only 1 instance possible)
OR connects to the screen -x myservice2_infinite
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d','--debug', action='store_true' , help='')
#parser.add_argument('-s','--serfmsg', default='',nargs="+" , help='serf message to mmap') # list will come after
#parser.add_argument('count', action="store", type=int)
parser.add_argument('service', action="store", nargs="?") # nargs='+' :
parser.add_argument('command', action="store", nargs="?") # nargs='+'
#parser.add_argument('command', action="store")
# print("""
# USAGE CASES:
# ------------------------------------------------------------------
# ./myservice2.py -d infinite
# ./myservice.py test enable
# ------------------------------------------------------------------
# VARIOUS TRICKS:
# subdir TTT
# myservice2 TTT/aaa enable : adds into the config
# # this was the last time about PATH!; from now on:
# myservice2 aaa sock
# ./myservice2.py -s myservice aaa # send command to mmap to test serf
# # aaa must be status sock
# """)
args=parser.parse_args()
#=========== path must exist
if not os.path.isdir( os.path.expanduser("~/.myservice") ):
#print(" directory exists")
#else:
print(" DIR NOT EXISTS")
os.mkdir( os.path.expanduser("~/.myservice") )
###########################################
# LOGGING - after AGR PARSE
########################################
log_format = '%(color)s%(levelname)1.1s... %(asctime)s%(end_color)s %(message)s' # i... format
LogFormatter.DEFAULT_COLORS[10] = colors.Fore.YELLOW ## debug level=10. default Cyan...
loglevel=1 if args.debug==1 else 11 # all info, but not debug
formatter = LogFormatter(fmt=log_format,datefmt='%Y-%m-%d %H:%M:%S')
logfile=os.path.splitext( os.path.expanduser("~/.myservice/")+os.path.basename(sys.argv[0]) )[0]+'.log'
logger = setup_logger( name="main",logfile=logfile, level=loglevel,formatter=formatter )#to 1-50
lockfile="/tmp/"+"myservice2_"+getpass.getuser()+".lock"
lockfilepid=0
|
jaromrax/myservice
|
version2/mysglobal.py
|
Python
|
gpl-2.0
| 4,226 | 0.014908 |
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import yaml
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.tests.unit import base
from mistral import utils
class WorkflowSpecValidationTestCase(base.BaseTest):
def __init__(self, *args, **kwargs):
super(WorkflowSpecValidationTestCase, self).__init__(*args, **kwargs)
# The relative resource path is ./mistral/tests/resources/workbook/v2.
self._resource_path = 'workbook/v2'
self._spec_parser = spec_parser.get_workflow_list_spec_from_yaml
self._dsl_blank = {
'version': '2.0',
'test': {
'type': 'direct'
}
}
self._dsl_tasks = {
'get': {
'action': 'std.http',
'input': {
'url': 'https://www.openstack.org'
}
},
'echo': {
'action': 'std.echo',
'input': {
'output': 'This is a test.'
}
},
'email': {
'action': 'std.email',
'input': {
'from_addr': 'mistral@example.com',
'to_addrs': ['admin@example.com'],
'subject': 'Test',
'body': 'This is a test.',
'smtp_server': 'localhost',
'smtp_password': 'password'
}
}
}
def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
changes=None, expect_error=False):
if dsl_file and add_tasks:
raise Exception('The add_tasks option is not a valid '
'combination with the dsl_file option.')
if dsl_file:
dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file)
if changes:
dsl_dict = yaml.safe_load(dsl_yaml)
utils.merge_dicts(dsl_dict, changes)
dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
else:
dsl_dict = copy.deepcopy(self._dsl_blank)
if add_tasks:
dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks)
if changes:
utils.merge_dicts(dsl_dict, changes)
dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False)
if not expect_error:
return self._spec_parser(dsl_yaml)
else:
return self.assertRaises(
exc.DSLParsingException,
self._spec_parser,
dsl_yaml
)
class WorkbookSpecValidationTestCase(WorkflowSpecValidationTestCase):
def __init__(self, *args, **kwargs):
super(WorkbookSpecValidationTestCase, self).__init__(*args, **kwargs)
self._spec_parser = spec_parser.get_workbook_spec_from_yaml
self._dsl_blank = {
'version': '2.0',
'name': 'test_wb'
}
def _parse_dsl_spec(self, dsl_file=None, add_tasks=False,
changes=None, expect_error=False):
return super(WorkbookSpecValidationTestCase, self)._parse_dsl_spec(
dsl_file=dsl_file, add_tasks=False, changes=changes,
expect_error=expect_error)
|
StackStorm/mistral
|
mistral/tests/unit/lang/v2/base.py
|
Python
|
apache-2.0
| 3,920 | 0 |
import requests
import datetime
import calendar
class DeskTime(object):
MAIN_URL = 'https://desktime.com/api/2/json/?{params}'
def __init__(self, app_key, username, password):
self.api_key = self._login(app_key, username, password)
if self.api_key is None:
raise Exception("Authorization error")
pass
def _login(self, app_key, username, password):
auth = 'appkey={appkey}&action={action}&email={email}&password={password}'
auth = auth.format(appkey=app_key, action='authorize',
email=username, password=password)
auth_url = self.MAIN_URL.format(params=auth)
res = requests.get(auth_url)
data = res.json()
if not data.get(u'error', None):
return data.get('api_key', None)
return None
def getAllDataForDate(self, date=datetime.datetime.now().date()):
employees = 'apikey={apikey}&action=employees&date={date}'
employees = employees.format(apikey=self.api_key, action='employees',
date=date.isoformat())
url = self.MAIN_URL.format(params=employees)
res = requests.get(url)
data = res.json()
if not data.get('error', None):
return data
return None
def getMonth(self, year, month, with_weekends=False):
monthrange = calendar.monthrange(year, month)
today = datetime.datetime.now().date()
data = []
resdata = {}
for dayindex in range(monthrange[1]):
day = dayindex + 1
date = datetime.date(year, month, day)
if date > today and date.year == today.year and today.month == date.month:
continue
elif date > today:
return None
if not with_weekends and date.weekday() in (5, 6):
continue
data.append(self.getAllDataForDate(date))
for elem in data:
resdata[elem.get('date')] = elem.get('employees')
return data
def getEmployee(self, employee_id):
raise(NotImplementedError)
|
utek/pydesktime
|
pydesktime/desktime.py
|
Python
|
mit
| 2,182 | 0.000917 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import google.api_core.exceptions
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.table import ClusterState
from parameterized import parameterized
from airflow import AirflowException
from airflow.contrib.operators.gcp_bigtable_operator import \
BigtableInstanceDeleteOperator, \
BigtableTableDeleteOperator, \
BigtableTableCreateOperator, \
BigtableTableWaitForReplicationSensor, \
BigtableClusterUpdateOperator, \
BigtableInstanceCreateOperator
try:
# noinspection PyProtectedMember
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
PROJECT_ID = 'test_project_id'
INSTANCE_ID = 'test-instance-id'
CLUSTER_ID = 'test-cluster-id'
CLUSTER_ZONE = 'us-central1-f'
NODES = 5
TABLE_ID = 'test-table-id'
INITIAL_SPLIT_KEYS = []
EMPTY_COLUMN_FAMILIES = {}
class BigtableInstanceCreateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, CLUSTER_ZONE),
('main_cluster_id', PROJECT_ID, INSTANCE_ID, '', CLUSTER_ZONE),
('main_cluster_zone', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
main_cluster_id,
main_cluster_zone, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableInstanceCreateOperator(
project_id=project_id,
instance_id=instance_id,
main_cluster_id=main_cluster_id,
main_cluster_zone=main_cluster_zone,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists_empty_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id"
)
mock_hook.return_value.create_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_instance.assert_called_once_with(
cluster_nodes=None,
cluster_storage_type=None,
instance_display_name=None,
instance_id=INSTANCE_ID,
instance_labels=None,
instance_type=None,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
project_id=PROJECT_ID,
replica_cluster_id=None,
replica_cluster_zone=None,
timeout=None
)
class BigtableClusterUpdateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, NODES),
('cluster_id', PROJECT_ID, INSTANCE_ID, '', NODES),
('nodes', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
cluster_id, nodes, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableClusterUpdateOperator(
project_id=project_id,
instance_id=instance_id,
cluster_id=cluster_id,
nodes=nodes,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id"
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with()
mock_hook.return_value.update_cluster.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists_empty_project_id(self,
mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id"
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with()
mock_hook.return_value.update_cluster.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_that_does_not_exists(self, mock_hook):
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Cluster not found."))
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id"
)
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: cluster '{}' does not exist for instance '{}'.".format(
CLUSTER_ID, INSTANCE_ID)
)
mock_hook.assert_called_once_with()
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_that_does_not_exists_empty_project_id(self, mock_hook):
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Cluster not found."))
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id"
)
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: cluster '{}' does not exist for instance '{}'.".format(
CLUSTER_ID, INSTANCE_ID)
)
mock_hook.assert_called_once_with()
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id"
)
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.update_cluster.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.update_cluster.assert_called_once_with(
instance=instance, cluster_id=CLUSTER_ID, nodes=NODES)
class BigtableInstanceDeleteTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute_empty_project_id(self, mock_hook):
op = BigtableInstanceDeleteOperator(
instance_id=INSTANCE_ID,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID)
@parameterized.expand([
('instance_id', PROJECT_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableInstanceDeleteOperator(
project_id=project_id,
instance_id=instance_id,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_instance_that_doesnt_exists(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Instance not found."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_instance_that_doesnt_exists_empty_project_id(self, mock_hook):
op = BigtableInstanceDeleteOperator(
instance_id=INSTANCE_ID,
task_id="id"
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Instance not found."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
mock_hook.return_value.delete_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID)
class BigtableTableDeleteTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_delete_execute(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableDeleteOperator(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_that_doesnt_exists(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_that_doesnt_exists_empty_project_id(self, mock_hook):
op = BigtableTableDeleteOperator(
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_deleting_table_when_instance_doesnt_exists(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_table.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
op = BigtableTableDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID)
class BigtableTableCreateTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_execute(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id"
)
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableCreateOperator(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_instance_not_exists(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id"
)
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Dependency: instance '{}' does not exist in project '{}'.".format(
INSTANCE_ID, PROJECT_ID)
)
mock_hook.assert_called_once_with()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists(self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id"
)
mock_hook.return_value.get_column_families_for_table.return_value = \
EMPTY_COLUMN_FAMILIES
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_empty_project_id(self, mock_hook):
op = BigtableTableCreateOperator(
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id"
)
mock_hook.return_value.get_column_families_for_table.return_value = \
EMPTY_COLUMN_FAMILIES
instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
op.execute(None)
mock_hook.assert_called_once_with()
mock_hook.return_value.create_table.assert_called_once_with(
instance=instance,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES)
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_with_different_column_families_ids_in_the_table(
self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families=EMPTY_COLUMN_FAMILIES,
task_id="id"
)
mock_hook.return_value.get_column_families_for_table.return_value = {
"existing_family": None}
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Table '{}' already exists with different Column Families.".format(TABLE_ID)
)
mock_hook.assert_called_once_with()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_creating_table_that_exists_with_different_column_families_gc_rule_in__table(
self, mock_hook):
op = BigtableTableCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
initial_split_keys=INITIAL_SPLIT_KEYS,
column_families={"cf-id": MaxVersionsGCRule(1)},
task_id="id"
)
cf_mock = mock.Mock()
cf_mock.gc_rule = mock.Mock(return_value=MaxVersionsGCRule(2))
mock_hook.return_value.get_column_families_for_table.return_value = {
"cf-id": cf_mock
}
mock_hook.return_value.create_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.AlreadyExists("Table already exists."))
with self.assertRaises(AirflowException) as e:
op.execute(None)
err = e.exception
self.assertEqual(
str(err),
"Table '{}' already exists with different Column Families.".format(TABLE_ID)
)
mock_hook.assert_called_once_with()
class BigtableWaitForTableReplicationTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', TABLE_ID),
('table_id', PROJECT_ID, INSTANCE_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id, table_id,
mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableTableWaitForReplicationSensor(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id"
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_no_instance(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_no_table(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found."))
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_not_ready(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.return_value = {
"cl-id": ClusterState(0)
}
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
self.assertFalse(op.poke(None))
mock_hook.assert_called_once_with()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_wait_ready(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
mock_hook.return_value.get_cluster_states_for_table.return_value = {
"cl-id": ClusterState(4)
}
op = BigtableTableWaitForReplicationSensor(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id"
)
self.assertTrue(op.poke(None))
mock_hook.assert_called_once_with()
|
adamhaney/airflow
|
tests/contrib/operators/test_gcp_bigtable_operator.py
|
Python
|
apache-2.0
| 29,272 | 0.00164 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A set of utilities, mostly for post-processing and visualization
We put arrays on disk as raw bytes, extending along the first dimension.
Alongside each array x we ensure the value x.dtype which stores the string
description of the array's dtype.
See Also:
------------
@url
.. image::
@author epnev
"""
# \package caiman/dource_ectraction/cnmf
# \version 1.0
# \copyright GNU General Public License v2.0
# \date Created on Sat Sep 12 15:52:53 2015
from builtins import str
from builtins import range
from past.utils import old_div
import cv2
import h5py
import logging
import numpy as np
import os
import pylab as pl
import scipy
from scipy.sparse import spdiags, issparse, csc_matrix, csr_matrix
import scipy.ndimage.morphology as morph
from skimage.feature.peak import _get_high_intensity_peaks
import tifffile
from typing import List
from .initialization import greedyROI
from ...base.rois import com
from ...mmapping import parallel_dot_product, load_memmap
from ...cluster import extract_patch_coordinates
from ...utils.stats import df_percentile
def decimation_matrix(dims, sub):
D = np.prod(dims)
if sub == 2 and D <= 10000: # faster for small matrices
ind = np.arange(D) // 2 - \
np.arange(dims[0], dims[0] + D) // (dims[0] * 2) * (dims[0] // 2) - \
(dims[0] % 2) * (np.arange(D) % (2 * dims[0]) > dims[0]) * (np.arange(1, 1 + D) % 2)
else:
def create_decimation_matrix_bruteforce(dims, sub):
dims_ds = tuple(1 + (np.array(dims) - 1) // sub)
d_ds = np.prod(dims_ds)
ds_matrix = np.eye(d_ds)
ds_matrix = np.repeat(np.repeat(
ds_matrix.reshape((d_ds,) + dims_ds, order='F'), sub, 1),
sub, 2)[:, :dims[0], :dims[1]].reshape((d_ds, -1), order='F')
ds_matrix /= ds_matrix.sum(1)[:, None]
ds_matrix = csc_matrix(ds_matrix, dtype=np.float32)
return ds_matrix
tmp = create_decimation_matrix_bruteforce((dims[0], sub), sub).indices
ind = np.concatenate([tmp] * (dims[1] // sub + 1))[:D] + \
np.arange(D) // (dims[0] * sub) * ((dims[0] - 1) // sub + 1)
data = 1. / np.unique(ind, return_counts=True)[1][ind]
return csc_matrix((data, ind, np.arange(1 + D)), dtype=np.float32)
def peak_local_max(image, min_distance=1, threshold_abs=None,
threshold_rel=None, exclude_border=True, indices=True,
num_peaks=np.inf, footprint=None):
"""Find peaks in an image as coordinate list or boolean mask.
Adapted from skimage to use opencv for speed.
Replaced scipy.ndimage.maximum_filter by cv2.dilate.
Peaks are the local maxima in a region of `2 * min_distance + 1`
(i.e. peaks are separated by at least `min_distance`).
If peaks are flat (i.e. multiple adjacent pixels have identical
intensities), the coordinates of all such pixels are returned.
If both `threshold_abs` and `threshold_rel` are provided, the maximum
of the two is chosen as the minimum intensity threshold of peaks.
Parameters
----------
image : ndarray
Input image.
min_distance : int, optional
Minimum number of pixels separating peaks in a region of `2 *
min_distance + 1` (i.e. peaks are separated by at least
`min_distance`).
To find the maximum number of peaks, use `min_distance=1`.
threshold_abs : float, optional
Minimum intensity of peaks. By default, the absolute threshold is
the minimum intensity of the image.
threshold_rel : float, optional
Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
exclude_border : int, optional
If nonzero, `exclude_border` excludes peaks from
within `exclude_border`-pixels of the border of the image.
indices : bool, optional
If True, the output will be an array representing peak
coordinates. If False, the output will be a boolean array shaped as
`image.shape` with peaks present at True elements.
num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` peaks based on highest peak intensity.
footprint : ndarray of bools, optional
If provided, `footprint == 1` represents the local region within which
to search for peaks at every point in `image`. Overrides
`min_distance` (also for `exclude_border`).
Returns
-------
output : ndarray or ndarray of bools
* If `indices = True` : (row, column, ...) coordinates of peaks.
* If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
Notes
-----
The peak local maximum function returns the coordinates of local peaks
(maxima) in an image. A maximum filter is used for finding local maxima.
This operation dilates the original image. After comparison of the dilated
and original image, this function returns the coordinates or a mask of the
peaks where the dilated image equals the original image.
Examples
--------
>>> img1 = np.zeros((7, 7))
>>> img1[3, 4] = 1
>>> img1[3, 2] = 1.5
>>> img1
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> peak_local_max(img1, min_distance=1)
array([[3, 4],
[3, 2]])
>>> peak_local_max(img1, min_distance=2)
array([[3, 2]])
>>> img2 = np.zeros((20, 20, 20))
>>> img2[10, 10, 10] = 1
>>> peak_local_max(img2, exclude_border=0)
array([[10, 10, 10]])
"""
if type(exclude_border) == bool:
exclude_border = min_distance if exclude_border else 0
out = np.zeros_like(image, dtype=np.bool)
if np.all(image == image.flat[0]):
if indices is True:
return np.empty((0, 2), np.int)
else:
return out
# Non maximum filter
if footprint is not None:
# image_max = ndi.maximum_filter(image, footprint=footprint,
# mode='constant')
image_max = cv2.dilate(image, footprint=footprint, iterations=1)
else:
size = 2 * min_distance + 1
# image_max = ndi.maximum_filter(image, size=size, mode='constant')
image_max = cv2.dilate(image, cv2.getStructuringElement(
cv2.MORPH_RECT, (size, size)), iterations=1)
mask = image == image_max
if exclude_border:
# zero out the image borders
for i in range(mask.ndim):
mask = mask.swapaxes(0, i)
remove = (footprint.shape[i] if footprint is not None
else 2 * exclude_border)
mask[:remove // 2] = mask[-remove // 2:] = False
mask = mask.swapaxes(0, i)
# find top peak candidates above a threshold
thresholds = []
if threshold_abs is None:
threshold_abs = image.min()
thresholds.append(threshold_abs)
if threshold_rel is not None:
thresholds.append(threshold_rel * image.max())
if thresholds:
mask &= image > max(thresholds)
# Select highest intensities (num_peaks)
coordinates = _get_high_intensity_peaks(image, mask, num_peaks)
if indices is True:
return coordinates
else:
nd_indices = tuple(coordinates.T)
out[nd_indices] = True
return out
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o : (d1[o], d2[o]) for o in intersect_keys if np.any(d1[o] != d2[o])}
same = set(o for o in intersect_keys if np.all(d1[o] == d2[o]))
return added, removed, modified, same
def computeDFF_traces(Yr, A, C, bl, quantileMin=8, frames_window=200):
extract_DF_F(Yr, A, C, bl, quantileMin, frames_window)
def extract_DF_F(Yr, A, C, bl, quantileMin=8, frames_window=200, block_size=400, dview=None):
""" Compute DFF function from cnmf output.
Disclaimer: it might be memory inefficient
Args:
Yr: ndarray (2D)
movie pixels X time
A: scipy.sparse.coo_matrix
spatial components (from cnmf cnm.A)
C: ndarray
temporal components (from cnmf cnm.C)
bl: ndarray
baseline for each component (from cnmf cnm.bl)
quantile_min: float
quantile minimum of the
frames_window: int
number of frames for running quantile
Returns:
Cdf:
the computed Calcium acitivty to the derivative of f
See Also:
..image::docs/img/onlycnmf.png
"""
nA = np.array(np.sqrt(A.power(2).sum(0)).T)
A = scipy.sparse.coo_matrix(A / nA.T)
C = C * nA
bl = (bl * nA.T).squeeze()
nA = np.array(np.sqrt(A.power(2).sum(0)).T)
T = C.shape[-1]
if 'memmap' in str(type(Yr)):
if block_size >= 500:
print('Forcing single thread for memory issues')
dview_res = None
else:
print('Using thread. If memory issues set block_size larger than 500')
dview_res = dview
AY = parallel_dot_product(Yr, A, dview=dview_res, block_size=block_size,
transpose=True).T
else:
AY = A.T.dot(Yr)
bas_val = bl[None, :]
Bas = np.repeat(bas_val, T, 0).T
AA = A.T.dot(A)
AA.setdiag(0)
Cf = (C - Bas) * (nA**2)
C2 = AY - AA.dot(C)
if frames_window is None or frames_window > T:
Df = np.percentile(C2, quantileMin, axis=1)
C_df = Cf / Df[:, None]
else:
Df = scipy.ndimage.percentile_filter(
C2, quantileMin, (frames_window, 1))
C_df = Cf / Df
return C_df
def detrend_df_f(A, b, C, f, YrA=None, quantileMin=8, frames_window=500,
flag_auto=True, use_fast=False, detrend_only=False):
""" Compute DF/F signal without using the original data.
In general much faster than extract_DF_F
Args:
A: scipy.sparse.csc_matrix
spatial components (from cnmf cnm.A)
b: ndarray
spatial background components
C: ndarray
temporal components (from cnmf cnm.C)
f: ndarray
temporal background components
YrA: ndarray
residual signals
quantile_min: float
quantile used to estimate the baseline (values in [0,100])
frames_window: int
number of frames for computing running quantile
flag_auto: bool
flag for determining quantile automatically
use_fast: bool
flag for u´sing approximate fast percentile filtering
detrend_only: bool (False)
flag for only subtracting baseline and not normalizing by it.
Used in 1p data processing where baseline fluorescence cannot be
determined.
Returns:
F_df:
the computed Calcium acitivty to the derivative of f
"""
if C is None:
logging.warning("There are no components for DF/F extraction!")
return None
if b is None or f is None:
b = np.zeros((A.shape[0], 1))
f = np.zeros((1, C.shape[1]))
logging.warning("Background components not present. Results should" +
" not be interpreted as DF/F normalized but only" +
" as detrended.")
detrend_only = True
if 'csc_matrix' not in str(type(A)):
A = scipy.sparse.csc_matrix(A)
if 'array' not in str(type(b)):
b = b.toarray()
if 'array' not in str(type(C)):
C = C.toarray()
if 'array' not in str(type(f)):
f = f.toarray()
nA = np.sqrt(np.ravel(A.power(2).sum(axis=0)))
nA_mat = scipy.sparse.spdiags(nA, 0, nA.shape[0], nA.shape[0])
nA_inv_mat = scipy.sparse.spdiags(1. / nA, 0, nA.shape[0], nA.shape[0])
A = A * nA_inv_mat
C = nA_mat * C
if YrA is not None:
YrA = nA_mat * YrA
F = C + YrA if YrA is not None else C
B = A.T.dot(b).dot(f)
T = C.shape[-1]
if flag_auto:
data_prct, val = df_percentile(F[:, :frames_window], axis=1)
if frames_window is None or frames_window > T:
Fd = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(B, data_prct)])
if not detrend_only:
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
F_df = F - Fd[:, None]
else:
if use_fast:
Fd = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(F, data_prct)])
Df = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(B, data_prct)])
else:
Fd = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(B, data_prct)])
if not detrend_only:
F_df = (F - Fd) / (Df + Fd)
else:
F_df = F - Fd
else:
if frames_window is None or frames_window > T:
Fd = np.percentile(F, quantileMin, axis=1)
Df = np.percentile(B, quantileMin, axis=1)
if not detrend_only:
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
F_df = F - Fd[:, None]
else:
Fd = scipy.ndimage.percentile_filter(
F, quantileMin, (frames_window, 1))
Df = scipy.ndimage.percentile_filter(
B, quantileMin, (frames_window, 1))
if not detrend_only:
F_df = (F - Fd) / (Df + Fd)
else:
F_df = F - Fd
return F_df
def fast_prct_filt(input_data, level=8, frames_window=1000):
"""
Fast approximate percentage filtering
"""
data = np.atleast_2d(input_data).copy()
T = np.shape(data)[-1]
downsampfact = frames_window
elm_missing = int(np.ceil(T * 1.0 / downsampfact)
* downsampfact - T)
padbefore = int(np.floor(elm_missing / 2.))
padafter = int(np.ceil(elm_missing / 2.))
tr_tmp = np.pad(data.T, ((padbefore, padafter), (0, 0)), mode='reflect')
numFramesNew, num_traces = np.shape(tr_tmp)
#% compute baseline quickly
tr_BL = np.reshape(tr_tmp, (downsampfact, int(numFramesNew / downsampfact),
num_traces), order='F')
tr_BL = np.percentile(tr_BL, level, axis=0)
tr_BL = scipy.ndimage.zoom(np.array(tr_BL, dtype=np.float32),
[downsampfact, 1], order=3, mode='nearest',
cval=0.0, prefilter=True)
if padafter == 0:
data -= tr_BL.T
else:
data -= tr_BL[padbefore:-padafter].T
return data.squeeze()
#%%
def detrend_df_f_auto(A, b, C, f, dims=None, YrA=None, use_annulus = True,
dist1 = 7, dist2 = 5, frames_window=1000,
use_fast = False):
"""
Compute DF/F using an automated level of percentile filtering based on
kernel density estimation.
Args:
A: scipy.sparse.csc_matrix
spatial components (from cnmf cnm.A)
b: ndarray
spatial backgrounds
C: ndarray
temporal components (from cnmf cnm.C)
f: ndarray
temporal background components
YrA: ndarray
residual signals
frames_window: int
number of frames for running quantile
use_fast: bool
flag for using fast approximate percentile filtering
Returns:
F_df:
the computed Calcium acitivty to the derivative of f
"""
if 'csc_matrix' not in str(type(A)):
A = scipy.sparse.csc_matrix(A)
if 'array' not in str(type(b)):
b = b.toarray()
if 'array' not in str(type(C)):
C = C.toarray()
if 'array' not in str(type(f)):
f = f.toarray()
nA = np.sqrt(np.ravel(A.power(2).sum(axis=0)))
nA_mat = scipy.sparse.spdiags(nA, 0, nA.shape[0], nA.shape[0])
nA_inv_mat = scipy.sparse.spdiags(1. / nA, 0, nA.shape[0], nA.shape[0])
A = A * nA_inv_mat
C = nA_mat * C
if YrA is not None:
YrA = nA_mat * YrA
F = C + YrA if YrA is not None else C
K = A.shape[-1]
A_ann = A.copy()
if use_annulus:
dist1 = 7
dist2 = 5
X, Y = np.meshgrid(np.arange(-dist1, dist1), np.arange(-dist1, dist1))
R = np.sqrt(X**2+Y**2)
R[R > dist1] = 0
R[R < dist2] = 0
R = R.astype('bool')
for k in range(K):
a = A[:, k].toarray().reshape(dims, order='F') > 0
a2 = np.bitwise_xor(morph.binary_dilation(a, R), a)
a2 = a2.astype(float).flatten(order='F')
a2 /= np.sqrt(a2.sum())
a2 = scipy.sparse.csc_matrix(a2)
A_ann[:, k] = a2.T
B = A_ann.T.dot(b).dot(f)
T = C.shape[-1]
data_prct, val = df_percentile(F[:, :frames_window], axis=1)
if frames_window is None or frames_window > T:
Fd = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(B, data_prct)])
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
if use_fast:
Fd = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(F, data_prct)])
Df = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(B, data_prct)])
else:
Fd = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(B, data_prct)])
F_df = (F - Fd) / (Df + Fd)
return F_df
#%%
def manually_refine_components(Y, xxx_todo_changeme, A, C, Cn, thr=0.9, display_numbers=True,
max_number=None, cmap=None, **kwargs):
"""Plots contour of spatial components
against a background image and allows to interactively add novel components by clicking with mouse
Args:
Y: ndarray
movie in 2D
(dx,dy): tuple
dimensions of the square used to identify neurons (should be set to the galue of gsiz)
A: np.ndarray or sparse matrix
Matrix of Spatial components (d x K)
Cn: np.ndarray (2D)
Background image (e.g. mean, correlation)
thr: scalar between 0 and 1
Energy threshold for computing contours (default 0.995)
display_number: Boolean
Display number of ROIs if checked (default True)
max_number: int
Display the number for only the first max_number components (default None, display all numbers)
cmap: string
User specifies the colormap (default None, default colormap)
Returns:
A: np.ndarray
matrix A os estimated spatial component contributions
C: np.ndarray
array of estimated calcium traces
"""
(dx, dy) = xxx_todo_changeme
if issparse(A):
A = np.array(A.todense())
else:
A = np.array(A)
d1, d2 = np.shape(Cn)
d, nr = np.shape(A)
if max_number is None:
max_number = nr
x, y = np.mgrid[0:d1:1, 0:d2:1]
pl.imshow(Cn, interpolation=None, cmap=cmap)
cm = com(A, d1, d2)
Bmat = np.zeros((np.minimum(nr, max_number), d1, d2))
for i in range(np.minimum(nr, max_number)):
indx = np.argsort(A[:, i], axis=None)[::-1]
cumEn = np.cumsum(A[:, i].flatten()[indx]**2)
cumEn /= cumEn[-1]
Bvec = np.zeros(d)
Bvec[indx] = cumEn
Bmat[i] = np.reshape(Bvec, np.shape(Cn), order='F')
T = np.shape(Y)[-1]
pl.close()
fig = pl.figure()
ax = pl.gca()
ax.imshow(Cn, interpolation=None, cmap=cmap,
vmin=np.percentile(Cn[~np.isnan(Cn)], 1), vmax=np.percentile(Cn[~np.isnan(Cn)], 99))
for i in range(np.minimum(nr, max_number)):
pl.contour(y, x, Bmat[i], [thr])
if display_numbers:
for i in range(np.minimum(nr, max_number)):
ax.text(cm[i, 1], cm[i, 0], str(i + 1))
A3 = np.reshape(A, (d1, d2, nr), order='F')
while True:
pts = fig.ginput(1, timeout=0)
if pts != []:
print(pts)
xx, yy = np.round(pts[0]).astype(np.int)
coords_y = np.array(list(range(yy - dy, yy + dy + 1)))
coords_x = np.array(list(range(xx - dx, xx + dx + 1)))
coords_y = coords_y[(coords_y >= 0) & (coords_y < d1)]
coords_x = coords_x[(coords_x >= 0) & (coords_x < d2)]
a3_tiny = A3[coords_y[0]:coords_y[-1] +
1, coords_x[0]:coords_x[-1] + 1, :]
y3_tiny = Y[coords_y[0]:coords_y[-1] +
1, coords_x[0]:coords_x[-1] + 1, :]
dy_sz, dx_sz = np.shape(a3_tiny)[:-1]
y2_tiny = np.reshape(y3_tiny, (dx_sz * dy_sz, T), order='F')
a2_tiny = np.reshape(a3_tiny, (dx_sz * dy_sz, nr), order='F')
y2_res = y2_tiny - a2_tiny.dot(C)
y3_res = np.reshape(y2_res, (dy_sz, dx_sz, T), order='F')
a__, c__, center__, b_in__, f_in__ = greedyROI(
y3_res, nr=1, gSig=[np.floor(old_div(dx_sz, 2)), np.floor(old_div(dy_sz, 2))], gSiz=[dx_sz, dy_sz])
a_f = np.zeros((d, 1))
idxs = np.meshgrid(coords_y, coords_x)
a_f[np.ravel_multi_index(
idxs, (d1, d2), order='F').flatten()] = a__
A = np.concatenate([A, a_f], axis=1)
C = np.concatenate([C, c__], axis=0)
indx = np.argsort(a_f, axis=None)[::-1]
cumEn = np.cumsum(a_f.flatten()[indx]**2)
cumEn /= cumEn[-1]
Bvec = np.zeros(d)
Bvec[indx] = cumEn
bmat = np.reshape(Bvec, np.shape(Cn), order='F')
pl.contour(y, x, bmat, [thr])
pl.pause(.01)
elif pts == []:
break
nr += 1
A3 = np.reshape(A, (d1, d2, nr), order='F')
return A, C
def app_vertex_cover(A):
""" Finds an approximate vertex cover for a symmetric graph with adjacency matrix A.
Args:
A: boolean 2d array (K x K)
Adjacency matrix. A is boolean with diagonal set to 0
Returns:
L: A vertex cover of A
Authors:
Eftychios A. Pnevmatikakis, Simons Foundation, 2015
"""
L = []
while A.any():
nz = np.nonzero(A)[0] # find non-zero edges
u = nz[np.random.randint(0, len(nz))]
A[u, :] = False
A[:, u] = False
L.append(u)
return np.asarray(L)
def update_order(A, new_a=None, prev_list=None, method='greedy'):
'''Determines the update order of the temporal components given the spatial
components by creating a nest of random approximate vertex covers
Args:
A: np.ndarray
matrix of spatial components (d x K)
new_a: sparse array
spatial component that is added, in order to efficiently update the orders in online scenarios
prev_list: list of list
orders from previous iteration, you need to pass if new_a is not None
Returns:
O: list of sets
list of subsets of components. The components of each subset can be updated in parallel
lo: list
length of each subset
Written by Eftychios A. Pnevmatikakis, Simons Foundation, 2015
'''
K = np.shape(A)[-1]
if new_a is None and prev_list is None:
if method is 'greedy':
prev_list, count_list = update_order_greedy(A, flag_AA=False)
else:
prev_list, count_list = update_order_random(A, flag_AA=False)
return prev_list, count_list
else:
if new_a is None or prev_list is None:
raise Exception(
'In the online update order you need to provide both new_a and prev_list')
counter = 0
AA = A.T.dot(new_a)
for group in prev_list:
if AA[list(group)].sum() == 0:
group.append(K)
counter += 1
break
if counter == 0:
if prev_list is not None:
prev_list = list(prev_list)
prev_list.append([K])
count_list = [len(gr) for gr in prev_list]
return prev_list, count_list
def order_components(A, C):
"""Order components based on their maximum temporal value and size
Args:
A: sparse matrix (d x K)
spatial components
C: matrix or np.ndarray (K x T)
temporal components
Returns:
A_or: np.ndarray
ordered spatial components
C_or: np.ndarray
ordered temporal components
srt: np.ndarray
sorting mapping
"""
A = np.array(A.todense())
nA2 = np.sqrt(np.sum(A**2, axis=0))
K = len(nA2)
A = np.array(np.matrix(A) * spdiags(old_div(1, nA2), 0, K, K))
nA4 = np.sum(A**4, axis=0)**0.25
C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
mC = np.ndarray.max(np.array(C), axis=1)
srt = np.argsort(nA4 * mC)[::-1]
A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
C_or = spdiags(old_div(1., nA2[srt]), 0, K, K) * (C[srt, :])
return A_or, C_or, srt
def update_order_random(A, flag_AA=True):
"""Determies the update order of temporal components using
randomized partitions of non-overlapping components
"""
K = np.shape(A)[-1]
if flag_AA:
AA = A.copy()
else:
AA = A.T.dot(A)
AA.setdiag(0)
F = (AA) > 0
F = F.toarray()
rem_ind = np.arange(K)
O = []
lo = []
while len(rem_ind) > 0:
L = np.sort(app_vertex_cover(F[rem_ind, :][:, rem_ind]))
if L.size:
ord_ind = set(rem_ind) - set(rem_ind[L])
rem_ind = rem_ind[L]
else:
ord_ind = set(rem_ind)
rem_ind = []
O.append(ord_ind)
lo.append(len(ord_ind))
return O[::-1], lo[::-1]
def update_order_greedy(A, flag_AA=True):
"""Determines the update order of the temporal components
this, given the spatial components using a greedy method
Basically we can update the components that are not overlapping, in parallel
Args:
A: sparse crc matrix
matrix of spatial components (d x K)
OR:
A.T.dot(A) matrix (d x d) if flag_AA = true
flag_AA: boolean (default true)
Returns:
parllcomp: list of sets
list of subsets of components. The components of each subset can be updated in parallel
len_parrllcomp: list
length of each subset
Author:
Eftychios A. Pnevmatikakis, Simons Foundation, 2017
"""
K = np.shape(A)[-1]
parllcomp:List = []
for i in range(K):
new_list = True
for ls in parllcomp:
if flag_AA:
if A[i, ls].nnz == 0:
ls.append(i)
new_list = False
break
else:
if (A[:, i].T.dot(A[:, ls])).nnz == 0:
ls.append(i)
new_list = False
break
if new_list:
parllcomp.append([i])
len_parrllcomp = [len(ls) for ls in parllcomp]
return parllcomp, len_parrllcomp
#%%
def compute_residuals(Yr_mmap_file, A_, b_, C_, f_, dview=None, block_size=1000, num_blocks_per_run=5):
'''compute residuals from memory mapped file and output of CNMF
Args:
A_,b_,C_,f_:
from CNMF
block_size: int
number of pixels processed together
num_blocks_per_run: int
nnumber of parallel blocks processes
Returns:
YrA: ndarray
residuals per neuron
'''
if not ('sparse' in str(type(A_))):
A_ = scipy.sparse.coo_matrix(A_)
Ab = scipy.sparse.hstack((A_, b_)).tocsc()
Cf = np.vstack((C_, f_))
nA = np.ravel(Ab.power(2).sum(axis=0))
if 'mmap' in str(type(Yr_mmap_file)):
YA = parallel_dot_product(Yr_mmap_file, Ab, dview=dview, block_size=block_size,
transpose=True, num_blocks_per_run=num_blocks_per_run) * scipy.sparse.spdiags(old_div(1., nA), 0, Ab.shape[-1], Ab.shape[-1])
else:
YA = (Ab.T.dot(Yr_mmap_file)).T * \
spdiags(old_div(1., nA), 0, Ab.shape[-1], Ab.shape[-1])
AA = ((Ab.T.dot(Ab)) * scipy.sparse.spdiags(old_div(1., nA),
0, Ab.shape[-1], Ab.shape[-1])).tocsr()
return (YA - (AA.T.dot(Cf)).T)[:, :A_.shape[-1]].T
def normalize_AC(A, C, YrA, b, f, neurons_sn):
""" Normalize to unit norm A and b
Args:
A,C,Yr,b,f:
outputs of CNMF
"""
if 'sparse' in str(type(A)):
nA = np.ravel(np.sqrt(A.power(2).sum(0)))
else:
nA = np.ravel(np.sqrt((A**2).sum(0)))
if A is not None:
A /= nA
if C is not None:
C = np.array(C)
C *= nA[:, None]
if YrA is not None:
YrA = np.array(YrA)
YrA *= nA[:, None]
if b is not None:
if issparse(b):
nB = np.ravel(np.sqrt(b.power(2).sum(0)))
b = csc_matrix(b)
for k, i in enumerate(b.indptr[:-1]):
b.data[i:b.indptr[k + 1]] /= nB[k]
else:
nB = np.ravel(np.sqrt((b**2).sum(0)))
b = np.atleast_2d(b)
b /= nB
if issparse(f):
f = csr_matrix(f)
for k, i in enumerate(f.indptr[:-1]):
f.data[i:f.indptr[k + 1]] *= nB[k]
else:
f = np.atleast_2d(f)
f *= nB[:, np.newaxis]
if neurons_sn is not None:
neurons_sn *= nA
return csc_matrix(A), C, YrA, b, f, neurons_sn
def get_file_size(file_name, var_name_hdf5='mov'):
""" Computes the dimensions of a file or a list of files without loading
it/them in memory. An exception is thrown if the files have FOVs with
different sizes
Args:
file_name: str or list
locations of file(s) in memory
var_name_hdf5: 'str'
if loading from hdf5 name of the variable to load
Returns:
dims: list
dimensions of FOV
T: list
number of timesteps in each file
"""
if isinstance(file_name, str):
if os.path.exists(file_name):
_, extension = os.path.splitext(file_name)[:2]
extension = extension.lower()
if extension == '.mat':
byte_stream, file_opened = scipy.io.matlab.mio._open_file(file_name, appendmat=False)
mjv, mnv = scipy.io.matlab.mio.get_matfile_version(byte_stream)
if mjv == 2:
extension = '.h5'
if extension in ['.tif', '.tiff', '.btf']:
tffl = tifffile.TiffFile(file_name)
siz = tffl.series[0].shape
T, dims = siz[0], siz[1:]
elif extension == '.avi':
cap = cv2.VideoCapture(file_name)
dims = [0, 0]
try:
T = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
dims[1] = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
dims[0] = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
except():
print('Roll back to opencv 2')
T = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
dims[1] = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
dims[0] = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
elif extension == '.mmap':
filename = os.path.split(file_name)[-1]
Yr, dims, T = load_memmap(os.path.join(
os.path.split(file_name)[0], filename))
elif extension in ('.h5', '.hdf5', '.nwb'):
with h5py.File(file_name, "r") as f:
kk = list(f.keys())
if len(kk) == 1:
siz = f[kk[0]].shape
elif var_name_hdf5 in f:
if extension == '.nwb':
siz = f[var_name_hdf5]['data'].shape
else:
siz = f[var_name_hdf5].shape
else:
logging.error('The file does not contain a variable' +
'named {0}'.format(var_name_hdf5))
raise Exception('Variable not found. Use one of the above')
T, dims = siz[0], siz[1:]
elif extension in ('.sbx'):
from ...base.movies import loadmat_sbx
info = loadmat_sbx(file_name[:-4]+ '.mat')['info']
dims = tuple((info['sz']).astype(int))
# Defining number of channels/size factor
if info['channels'] == 1:
info['nChan'] = 2
factor = 1
elif info['channels'] == 2:
info['nChan'] = 1
factor = 2
elif info['channels'] == 3:
info['nChan'] = 1
factor = 2
# Determine number of frames in whole file
T = int(os.path.getsize(
file_name[:-4] + '.sbx') / info['recordsPerBuffer'] / info['sz'][1] * factor / 4 - 1)
else:
raise Exception('Unknown file type')
dims = tuple(dims)
else:
raise Exception('File not found!')
elif isinstance(file_name, tuple):
from ...base.movies import load
dims = load(file_name[0], var_name_hdf5=var_name_hdf5).shape
T = len(file_name)
elif isinstance(file_name, list):
if len(file_name) == 1:
dims, T = get_file_size(file_name[0], var_name_hdf5=var_name_hdf5)
else:
dims, T = zip(*[get_file_size(fn, var_name_hdf5=var_name_hdf5)
for fn in file_name])
else:
raise Exception('Unknown input type')
return dims, T
def fast_graph_Laplacian(mmap_file, dims, max_radius=10, kernel='heat',
dview=None, sigma=1, thr=0.05, p=10, normalize=True,
use_NN=False, rf=None, strides=None):
""" Computes an approximate affinity maps and its graph Laplacian for all
pixels. For each pixel it restricts its attention to a given radius around
it.
Args:
mmap_file: str
Memory mapped file in pixel first order
max_radius: float
Maximum radius around each pixel
kernel: str {'heat', 'binary', 'cos'}
type of kernel
dview: dview object
multiprocessing or ipyparallel object for parallelization
sigma: float
standard deviation of Gaussian (heat) kernel
thr: float
threshold for affinity matrix
p: int
number of neighbors
normalize: bool
normalize vectors before computing affinity
use_NN: bool
use only p nearest neighbors
Returns:
W: scipy.sparse.csr_matrix
Graph affinity matrix
D: scipy.sparse.spdiags
Diagonal of affinity matrix
L: scipy.sparse.csr_matrix
Graph Laplacian matrix
"""
Np = np.prod(np.array(dims))
if rf is None:
pars = []
for i in range(Np):
pars.append([i, mmap_file, dims, max_radius, kernel, sigma, thr,
p, normalize, use_NN])
if dview is None:
res = list(map(fast_graph_Laplacian_pixel, pars))
else:
res = dview.map(fast_graph_Laplacian_pixel, pars, chunksize=128)
indptr = np.cumsum(np.array([0] + [len(r[0]) for r in res]))
indeces = [item for sublist in res for item in sublist[0]]
data = [item for sublist in res for item in sublist[1]]
W = scipy.sparse.csr_matrix((data, indeces, indptr), shape=[Np, Np])
D = scipy.sparse.spdiags(W.sum(0), 0, Np, Np)
L = D - W
else:
indices, _ = extract_patch_coordinates(dims, rf, strides)
pars = []
for i in range(len(indices)):
pars.append([mmap_file, indices[i], kernel, sigma, thr, p,
normalize, use_NN])
if dview is None:
res = list(map(fast_graph_Laplacian_patches, pars))
else:
res = dview.map(fast_graph_Laplacian_patches, pars)
W = res
D = [scipy.sparse.spdiags(w.sum(0), 0, w.shape[0], w.shape[0]) for w in W]
L = [d - w for (d, w) in zip(W, D)]
return W, D, L
def fast_graph_Laplacian_patches(pars):
""" Computes the full graph affinity matrix on a patch. See
fast_graph_Laplacian above for definition of arguments.
"""
mmap_file, indices, kernel, sigma, thr, p, normalize, use_NN = pars
if type(mmap_file) not in {'str', 'list'}:
Yind = mmap_file
else:
Y = load_memmap(mmap_file)[0]
Yind = np.array(Y[indices])
if normalize:
Yind -= Yind.mean(1)[:, np.newaxis]
Yind /= np.sqrt((Yind**2).sum(1)[:, np.newaxis])
yf = np.ones((Yind.shape[0], 1))
else:
yf = (Yind**2).sum(1)[:, np.newaxis]
yyt = Yind.dot(Yind.T)
W = np.exp(-(yf + yf.T - 2*yyt)/sigma) if kernel.lower() == 'heat' else yyt
W[W<thr] = 0
if kernel.lower() == 'binary':
W[W>0] = 1
if use_NN:
ind = np.argpartition(W, -p, axis=1)[:, :-p]
for i in range(W.shape[0]):
W[i, ind[i]] = 0
W = scipy.sparse.csr_matrix(W)
W = (W + W.T)/2
return W
def fast_graph_Laplacian_pixel(pars):
""" Computes the i-th row of the Graph affinity matrix. See
fast_graph_Laplacian above for definition of arguments.
"""
i, mmap_file, dims, max_radius, kernel, sigma, thr, p, normalize, use_NN = pars
iy, ix = np.unravel_index(i, dims, order='F')
xx = np.arange(0, dims[1]) - ix
yy = np.arange(0, dims[0]) - iy
[XX, YY] = np.meshgrid(xx, yy)
R = np.sqrt(XX**2 + YY**2)
R = R.flatten('F')
indeces = np.where(R < max_radius)[0]
Y = load_memmap(mmap_file)[0]
Yind = np.array(Y[indeces])
y = np.array(Y[i, :])
if normalize:
Yind -= Yind.mean(1)[:, np.newaxis]
Yind /= np.sqrt((Yind**2).sum(1)[:, np.newaxis])
y -= y.mean()
y /= np.sqrt((y**2).sum())
D = Yind - y
if kernel.lower() == 'heat':
w = np.exp(-np.sum(D**2, axis=1)/sigma)
else: # kernel.lower() == 'cos':
w = Yind.dot(y.T)
w[w<thr] = 0
if kernel.lower() == 'binary':
w[w>0] = 1
if use_NN:
ind = np.argpartition(w, -p)[-p:]
else:
ind = np.where(w>0)[0]
return indeces[ind].tolist(), w[ind].tolist()
|
agiovann/Constrained_NMF
|
caiman/source_extraction/cnmf/utilities.py
|
Python
|
gpl-2.0
| 40,754 | 0.001718 |
for i in range(1000000):
def f(x, y=1, *args, **kw):
pass
|
jonathanverner/brython
|
www/speed/benchmarks/create_function_complex_args.py
|
Python
|
bsd-3-clause
| 71 | 0.014085 |
# pylint: skip-file
class GCPResource(object):
'''Object to represent a gcp resource'''
def __init__(self, rname, rtype, project, zone):
'''constructor for gcp resource'''
self._name = rname
self._type = rtype
self._project = project
self._zone = zone
@property
def name(self):
'''property for name'''
return self._name
@property
def type(self):
'''property for type'''
return self._type
@property
def project(self):
'''property for project'''
return self._project
@property
def zone(self):
'''property for zone'''
return self._zone
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_gcloud/build/lib/gcpresource.py
|
Python
|
apache-2.0
| 683 | 0 |
__author__ = "Jacob Lydon"
__copyright__ = "Copyright 2017"
__credits__ = []
__license__ = "GPLv3"
__version__ = "0.1"
__maintainer__ = "Jacob Lydon"
__email__ = "jlydon001@regis.edu"
__status__ = "Development"
|
lydonjake/cs-grad-school-app
|
program/__init__.py
|
Python
|
gpl-3.0
| 211 | 0.004739 |
# /ciscripts/check/python/__init__.py
#
# Module loader file for /ciscripts/check/python.
#
# See /LICENCE.md for Copyright information
"""Module loader file for /ciscripts/check/python."""
|
polysquare/polysquare-ci-scripts
|
ciscripts/check/python/__init__.py
|
Python
|
mit
| 190 | 0 |
# Build Code
import os
import subprocess
import re
class GCC:
def __init__(self):
self.enter_match = re.compile(r'Entering directory')
self.leave_match = re.compile(r'Leaving directory')
def can_build(self, dirname, ext):
if ext in (".c", ".h", ".cpp", ".hpp"):
files = [f.lower() for f in os.listdir(dirname)]
if "makefile" in files:
self.makefile_dir = dirname
return True
return False
def run(self, action, output):
args = ["make"]
if action:
args.append(action)
print(args)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
errorLines = []
while True:
line = proc.stdout.readline().decode("utf-8")
if len(line) == 0:
break
output.write(line)
if line.startswith("In file included from"):
errorLines.append(line)
else:
idx = line.find("Entering directory")
if idx >= 0:
errorLines.append(line)
else:
idx = line.find("Leaving directory")
if idx >= 0:
errorLines.append(line)
else:
idx = line.find("warning:")
if idx >= 0:
errorLines.append(line)
output.write(line)
return errorLines
def get_plugin():
return GCC()
|
peter1010/my_vim
|
vimfiles/py_scripts/build_types/gcc.py
|
Python
|
gpl-2.0
| 1,180 | 0.036441 |
import maya.cmds as cmds
from . import renamer_settings as settings
class FieldReplacer(object):
def __init__(self):
print 'Initializing jsRenamer FieldReplacer...'
#replaceMaterial = self.replaceMaterial
def checkTemplate(self,node):
#availPos = ['C','L','R','LF','RF','LB','RB','U','B']
#availSuf=['GES','GEP','PLY','NRB']
#sel = cmds.ls(sl=1)
#for node in sel:
splitNode = node.split('_')
#print splitNode
#print splitNode[0][-3:]
#check if correct amount of fields
if len(splitNode) == 5:
return True
else:
return False
##########################################
#####REPLACE FIELD########################
##########################################
def replaceMaterial(self, args=None):
ReplaceSel = cmds.ls(sl=1)
prefixReplace = cmds.textField('materialField',query=True,tx=1)
prefixReplace= prefixReplace
if prefixReplace == '':
pass
else:
for each in ReplaceSel:
if self.checkTemplate(each) == True:
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
prefixSplit = replacerOldName.split('_',1)
prefixReplaceName = prefixReplace+ '_' +str(prefixSplit[1])
#print prefixReplaceName
cmds.rename(each,prefixReplaceName)
else:
cmds.error(each+' does not match naming Template (default_C_default_0000_???)')
def replacePosition(self, args=None):
ReplaceSel = cmds.ls(sl=1)
positionReplace = cmds.optionMenu('positionField',query=True,v=1)
for each in ReplaceSel:
if self.checkTemplate(each) == True:
#print each
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
positionSplit = replacerOldName.split('_')
newPosName = positionSplit[0]+'_'+positionReplace+'_'+positionSplit[2]+'_'+positionSplit[3]+'_'+positionSplit[4]
#print newPosName
cmds.rename(each,newPosName)
else:
cmds.error(each+' does not match naming Template (default_C_default_0000_???)')
def replaceBody(self, args=None):
ReplaceSel = cmds.ls(sl=1)
bodyReplace = cmds.textField('bodyField',query=True,tx=1)
for each in ReplaceSel:
if self.checkTemplate(each) == True:
#print each
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
bodySplit = replacerOldName.split('_')
newBodyName = bodySplit[0]+'_'+bodySplit[1]+'_'+bodyReplace+'_'+bodySplit[3]+'_'+bodySplit[4]
#print newBodyName
cmds.rename(each,newBodyName)
else:
cmds.error(each+' does not match naming Template (default_C_default_0000_???)')
###Replace GEO_Suffix
def replaceGeoSuffix(self, args=None):
ReplaceSel = cmds.ls(sl=1)
suffixReplace = cmds.optionMenu('suffixField',query=True,v=1)
for each in ReplaceSel:
if self.checkTemplate(each) == True:
#print each
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
suffixSplit = replacerOldName.rsplit('_',1)
suffixReplaceName = suffixSplit[0] + '_' +suffixReplace
#print suffixReplaceName
cmds.rename(each,suffixReplaceName)
else:
cmds.error(each+' does not match naming Template (default_C_default_0000_???)')
###Replacer
def replacer(self, args=None):
replacerSel = cmds.ls(sl=1)
replacerOld = cmds.textField('replacerOldField',query = True,text=True)
replacerNew = cmds.textField('replacerNewField',query = True,text=True)
for each in replacerSel:
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
replacerNewName = replacerOldName.replace(replacerOld,replacerNew)
print replacerNewName
cmds.rename(each, replacerNewName)
###PrefixAdd
def addPrefix(self, args=None):
prefixSel = cmds.ls(sl=1)
prefixAddition = cmds.textField('addPrefixField',query = True,text=True)
for each in prefixSel:
newPrefixName = prefixAddition+each
print newPrefixName
cmds.rename(each,newPrefixName)
###Suffix Add
def addSuffix(self, args=None):
suffixSel = cmds.ls(sl=1)
suffixAddition = cmds.textField('addSuffixField',query = True,text=True)
for each in suffixSel:
newSuffixName = each+suffixAddition
print newSuffixName
cmds.rename(each,newSuffixName)
###Replace Prefix
def replacePrefix(self, args=None):
prefixReplaceSel = cmds.ls(sl=1)
prefixReplace = cmds.textField('replacePrefixField',query = True,text=True)
if prefixReplace == '':
pass
else:
for each in prefixReplaceSel:
try:
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
prefixSplit = replacerOldName.split('_',1)
prefixReplaceName = prefixReplace+ '_' +str(prefixSplit[1])
print prefixReplaceName
cmds.rename(each,prefixReplaceName)
except:
pass
###Replace Geo Suffix
def replaceSuffix(self, args=None):
suffixReplaceSel = cmds.ls(sl=1)
suffixReplace = cmds.textField('replaceSuffixField',query = True,text=True)
if suffixReplace == '':
pass
else:
for each in suffixReplaceSel:
try:
if '|' in each:
replacerOldName=each.split('|')[-1]
else:
replacerOldName = each
suffixSplit = replacerOldName.rsplit('_',1)
suffixReplaceName = suffixSplit[0] + '_' +suffixReplace
print suffixReplaceName
cmds.rename(each,suffixReplaceName)
except:
pass
|
jszokoli/jsTK
|
jsRenamer/field_replacer.py
|
Python
|
gpl-3.0
| 6,998 | 0.018148 |
"""The HTTP api to control the cloud integration."""
import asyncio
from functools import wraps
import logging
import aiohttp
import async_timeout
import attr
from hass_nabucasa import Cloud, auth, thingtalk
from hass_nabucasa.const import STATE_DISCONNECTED
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.alexa import (
entities as alexa_entities,
errors as alexa_errors,
)
from homeassistant.components.google_assistant import helpers as google_helpers
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.websocket_api import const as ws_const
from homeassistant.core import callback
from .const import (
DOMAIN,
PREF_ALEXA_REPORT_STATE,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
REQUEST_TIMEOUT,
InvalidTrustedNetworks,
InvalidTrustedProxies,
RequireRelink,
)
_LOGGER = logging.getLogger(__name__)
WS_TYPE_STATUS = "cloud/status"
SCHEMA_WS_STATUS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_STATUS}
)
WS_TYPE_SUBSCRIPTION = "cloud/subscription"
SCHEMA_WS_SUBSCRIPTION = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SUBSCRIPTION}
)
WS_TYPE_HOOK_CREATE = "cloud/cloudhook/create"
SCHEMA_WS_HOOK_CREATE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_CREATE, vol.Required("webhook_id"): str}
)
WS_TYPE_HOOK_DELETE = "cloud/cloudhook/delete"
SCHEMA_WS_HOOK_DELETE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_DELETE, vol.Required("webhook_id"): str}
)
_CLOUD_ERRORS = {
InvalidTrustedNetworks: (
500,
"Remote UI not compatible with 127.0.0.1/::1 as a trusted network.",
),
InvalidTrustedProxies: (
500,
"Remote UI not compatible with 127.0.0.1/::1 as trusted proxies.",
),
}
async def async_setup(hass):
"""Initialize the HTTP API."""
async_register_command = hass.components.websocket_api.async_register_command
async_register_command(WS_TYPE_STATUS, websocket_cloud_status, SCHEMA_WS_STATUS)
async_register_command(
WS_TYPE_SUBSCRIPTION, websocket_subscription, SCHEMA_WS_SUBSCRIPTION
)
async_register_command(websocket_update_prefs)
async_register_command(
WS_TYPE_HOOK_CREATE, websocket_hook_create, SCHEMA_WS_HOOK_CREATE
)
async_register_command(
WS_TYPE_HOOK_DELETE, websocket_hook_delete, SCHEMA_WS_HOOK_DELETE
)
async_register_command(websocket_remote_connect)
async_register_command(websocket_remote_disconnect)
async_register_command(google_assistant_list)
async_register_command(google_assistant_update)
async_register_command(alexa_list)
async_register_command(alexa_update)
async_register_command(alexa_sync)
async_register_command(thingtalk_convert)
hass.http.register_view(GoogleActionsSyncView)
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudResendConfirmView)
hass.http.register_view(CloudForgotPasswordView)
_CLOUD_ERRORS.update(
{
auth.UserNotFound: (400, "User does not exist."),
auth.UserNotConfirmed: (400, "Email not confirmed."),
auth.UserExists: (400, "An account with the given email already exists."),
auth.Unauthenticated: (401, "Authentication failed."),
auth.PasswordChangeRequired: (400, "Password change required."),
asyncio.TimeoutError: (502, "Unable to reach the Home Assistant cloud."),
aiohttp.ClientError: (500, "Error making internal request"),
}
)
def _handle_cloud_errors(handler):
"""Webview decorator to handle auth errors."""
@wraps(handler)
async def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = await handler(view, request, *args, **kwargs)
return result
except Exception as err: # pylint: disable=broad-except
status, msg = _process_cloud_exception(err, request.path)
return view.json_message(
msg, status_code=status, message_code=err.__class__.__name__.lower()
)
return error_handler
def _ws_handle_cloud_errors(handler):
"""Websocket decorator to handle auth errors."""
@wraps(handler)
async def error_handler(hass, connection, msg):
"""Handle exceptions that raise from the wrapped handler."""
try:
return await handler(hass, connection, msg)
except Exception as err: # pylint: disable=broad-except
err_status, err_msg = _process_cloud_exception(err, msg["type"])
connection.send_error(msg["id"], err_status, err_msg)
return error_handler
def _process_cloud_exception(exc, where):
"""Process a cloud exception."""
err_info = _CLOUD_ERRORS.get(exc.__class__)
if err_info is None:
_LOGGER.exception("Unexpected error processing request for %s", where)
err_info = (502, f"Unexpected error: {exc}")
return err_info
class GoogleActionsSyncView(HomeAssistantView):
"""Trigger a Google Actions Smart Home Sync."""
url = "/api/cloud/google_actions/sync"
name = "api:cloud:google_actions/sync"
@_handle_cloud_errors
async def post(self, request):
"""Trigger a Google Actions sync."""
hass = request.app["hass"]
cloud: Cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
status = await gconf.async_sync_entities(gconf.agent_user_id)
return self.json({}, status_code=status)
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = "/api/cloud/login"
name = "api:cloud:login"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema({vol.Required("email"): str, vol.Required("password"): str})
)
async def post(self, request, data):
"""Handle login request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
await cloud.login(data["email"], data["password"])
return self.json({"success": True})
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = "/api/cloud/logout"
name = "api:cloud:logout"
@_handle_cloud_errors
async def post(self, request):
"""Handle logout request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.logout()
return self.json_message("ok")
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = "/api/cloud/register"
name = "api:cloud:register"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema(
{
vol.Required("email"): str,
vol.Required("password"): vol.All(str, vol.Length(min=6)),
}
)
)
async def post(self, request, data):
"""Handle registration request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(
cloud.auth.register, data["email"], data["password"]
)
return self.json_message("ok")
class CloudResendConfirmView(HomeAssistantView):
"""Resend email confirmation code."""
url = "/api/cloud/resend_confirm"
name = "api:cloud:resend_confirm"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle resending confirm email code request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.resend_email_confirm, data["email"])
return self.json_message("ok")
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = "/api/cloud/forgot_password"
name = "api:cloud:forgot_password"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle forgot password request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.forgot_password, data["email"])
return self.json_message("ok")
@callback
def websocket_cloud_status(hass, connection, msg):
"""Handle request for account info.
Async friendly.
"""
cloud = hass.data[DOMAIN]
connection.send_message(
websocket_api.result_message(msg["id"], _account_data(cloud))
)
def _require_cloud_login(handler):
"""Websocket decorator that requires cloud to be logged in."""
@wraps(handler)
def with_cloud_auth(hass, connection, msg):
"""Require to be logged into the cloud."""
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
connection.send_message(
websocket_api.error_message(
msg["id"], "not_logged_in", "You need to be logged in to the cloud."
)
)
return
handler(hass, connection, msg)
return with_cloud_auth
@_require_cloud_login
@websocket_api.async_response
async def websocket_subscription(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
response = await cloud.fetch_subscription_info()
if response.status != 200:
connection.send_message(
websocket_api.error_message(
msg["id"], "request_failed", "Failed to request subscription"
)
)
data = await response.json()
# Check if a user is subscribed but local info is outdated
# In that case, let's refresh and reconnect
if data.get("provider") and not cloud.is_connected:
_LOGGER.debug("Found disconnected account with valid subscriotion, connecting")
await hass.async_add_executor_job(cloud.auth.renew_access_token)
# Cancel reconnect in progress
if cloud.iot.state != STATE_DISCONNECTED:
await cloud.iot.disconnect()
hass.async_create_task(cloud.iot.connect())
connection.send_message(websocket_api.result_message(msg["id"], data))
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "cloud/update_prefs",
vol.Optional(PREF_ENABLE_GOOGLE): bool,
vol.Optional(PREF_ENABLE_ALEXA): bool,
vol.Optional(PREF_ALEXA_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_SECURE_DEVICES_PIN): vol.Any(None, str),
}
)
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("id")
changes.pop("type")
# If we turn alexa linking on, validate that we can fetch access token
if changes.get(PREF_ALEXA_REPORT_STATE):
try:
with async_timeout.timeout(10):
await cloud.client.alexa_config.async_get_access_token()
except asyncio.TimeoutError:
connection.send_error(
msg["id"], "alexa_timeout", "Timeout validating Alexa access token."
)
return
except (alexa_errors.NoTokenAvailable, RequireRelink):
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant "
"skill and then try to enable state reporting.",
)
return
await cloud.client.prefs.async_update(**changes)
connection.send_message(websocket_api.result_message(msg["id"]))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_create(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
hook = await cloud.cloudhooks.async_create(msg["webhook_id"], False)
connection.send_message(websocket_api.result_message(msg["id"], hook))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_delete(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
await cloud.cloudhooks.async_delete(msg["webhook_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
def _account_data(cloud):
"""Generate the auth data JSON response."""
if not cloud.is_logged_in:
return {"logged_in": False, "cloud": STATE_DISCONNECTED}
claims = cloud.claims
client = cloud.client
remote = cloud.remote
# Load remote certificate
if remote.certificate:
certificate = attr.asdict(remote.certificate)
else:
certificate = None
return {
"logged_in": True,
"email": claims["email"],
"cloud": cloud.iot.state,
"prefs": client.prefs.as_dict(),
"google_entities": client.google_user_config["filter"].config,
"alexa_entities": client.alexa_user_config["filter"].config,
"remote_domain": remote.instance_domain,
"remote_connected": remote.is_connected,
"remote_certificate": certificate,
}
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/connect"})
async def websocket_remote_connect(hass, connection, msg):
"""Handle request for connect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=True)
await cloud.remote.connect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/disconnect"})
async def websocket_remote_disconnect(hass, connection, msg):
"""Handle request for disconnect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=False)
await cloud.remote.disconnect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/google_assistant/entities"})
async def google_assistant_list(hass, connection, msg):
"""List all google assistant entities."""
cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
entities = google_helpers.async_get_entities(hass, gconf)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"traits": [trait.name for trait in entity.traits()],
"might_2fa": entity.might_2fa(),
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/google_assistant/entities/update",
"entity_id": str,
vol.Optional("should_expose"): bool,
vol.Optional("override_name"): str,
vol.Optional("aliases"): [str],
vol.Optional("disable_2fa"): bool,
}
)
async def google_assistant_update(hass, connection, msg):
"""Update google assistant config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_google_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.google_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/alexa/entities"})
async def alexa_list(hass, connection, msg):
"""List all alexa entities."""
cloud = hass.data[DOMAIN]
entities = alexa_entities.async_get_entities(hass, cloud.client.alexa_config)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"display_categories": entity.default_display_categories(),
"interfaces": [ifc.name() for ifc in entity.interfaces()],
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/alexa/entities/update",
"entity_id": str,
vol.Optional("should_expose"): bool,
}
)
async def alexa_update(hass, connection, msg):
"""Update alexa entity config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_alexa_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.alexa_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/alexa/sync"})
async def alexa_sync(hass, connection, msg):
"""Sync with Alexa."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
success = await cloud.client.alexa_config.async_sync_entities()
except alexa_errors.NoTokenAvailable:
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant " "skill.",
)
return
if success:
connection.send_result(msg["id"])
else:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, "Unknown error")
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/thingtalk/convert", "query": str})
async def thingtalk_convert(hass, connection, msg):
"""Convert a query."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
connection.send_result(
msg["id"], await thingtalk.async_convert(cloud, msg["query"])
)
except thingtalk.ThingTalkConversionError as err:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, str(err))
|
joopert/home-assistant
|
homeassistant/components/cloud/http_api.py
|
Python
|
apache-2.0
| 19,172 | 0.000939 |
import web
db = web.database(dbn='mysql', db='googlemodules', user='ale', passwd='3babes')
for url in db.select('function', what='screenshot'):
print 'http://www.googlemodules.com/image/screenshot'
|
gcobos/rft
|
scripts/get_all_images.py
|
Python
|
agpl-3.0
| 207 | 0 |
class PSFModel(object):
def __init__(self, scopeType, psfModel, xySpace, zSpace, emissionWavelength, numericalAperture, designImmersionOilRefractiveIndex, \
designSpecimenLayerRefractiveIndex, actualImmersionOilRefractiveIndex, \
actualSpecimenLayerRefractiveIndex, actualPointSourceDepthInSpecimenLayer, homeDirectory):
self.scopeType=scopeType
self.psfModel=psfModel
self.xySpace=xySpace
self.zSpace=zSpace
self.emissionWavelength=emissionWavelength
self.numericalAperture=numericalAperture
self.designImmersionOilRefractiveIndex=designImmersionOilRefractiveIndex
self.designSpecimenLayerRefractiveIndex=designSpecimenLayerRefractiveIndex
self.actualImmersionOilRefractiveIndex=actualImmersionOilRefractiveIndex
self.actualSpecimenLayerRefractiveIndex=actualSpecimenLayerRefractiveIndex
self.actualPointSourceDepthInSpecimenLayer=actualPointSourceDepthInSpecimenLayer
def CreatePsf(self, command, psfCommandName, xSize, ySize, zSize):
module=command.run(psfCommandName, True, \
"xSize", xSize, \
"ySize", ySize, \
"zSize", zSize, \
"fftType", "none", \
"scopeType", self.scopeType, \
"psfModel", self.psfModel, \
"xySpace", self.xySpace, \
"zSpace", self.zSpace, \
"emissionWavelength", self.emissionWavelength, \
"numericalAperture", self.numericalAperture, \
"designImmersionOilRefractiveIndex", self.designImmersionOilRefractiveIndex, \
"designSpecimenLayerRefractiveIndex", self.designSpecimenLayerRefractiveIndex, \
"actualImmersionOilRefractiveIndex", self.actualImmersionOilRefractiveIndex, \
"actualSpecimenLayerRefractiveIndex", self.actualSpecimenLayerRefractiveIndex, \
"actualPointSourceDepthInSpecimenLayer", self.actualPointSourceDepthInSpecimenLayer, \
"centerPsf", True).get()
return module.getOutputs().get("output");
|
bnorthan/projects
|
Scripts/Jython/Psfs/PSFModel.py
|
Python
|
gpl-2.0
| 1,817 | 0.053935 |
# -*- coding:utf-8 -*-
'''Created on 2014-8-7 @author: Administrator '''
from sys import path as sys_path
if not '..' in sys_path:sys_path.append("..") #用于import上级目录的模块
import web
#早起的把一个文件分成多个文件,再把class导入
from login.login import (index,login,loginCheck,In,reset,register,find_password)
from blog.blog import (write_blog,upload,blog_content_manage,Get,Del,blog_single_self,blog_single_other)
from admin.admin import (adminAdd,adminGet,adminDel,adminEdit)
#后期应用web.py 的子应用
from wiki.view import wiki_app
from download.download import download_app
from meeting.meeting import meeting_app
from bbs.bbs import bbs_app
urls=(
'/','index',
'/login','login',
'/loginCheck','loginCheck',
'/(admin|user_blog)','In',
'/reset/(.*)','reset',
'/register','register',
'/find_password','find_password',
'/write_blog','write_blog',
'/upload','upload',
'/blog_content_manage','blog_content_manage',
'/Get/classification','Get',
'/Del/blog_content','Del',
'/blog_single_self','blog_single_self',
'/blog_single_other','blog_single_other',
'/admin/add','adminAdd',
'/admin/get','adminGet',
'/admin/del','adminDel',
'/admin/edit','adminEdit',
'/wiki',wiki_app,
'/download',download_app,
'/meeting',meeting_app,
'/bbs',bbs_app,
)
app = web.application(urls ,locals())
#session 在web.config.debug = False模式下可用 可以用一下方式解决 生产中 一般设置web.config.debug = False
web.config.debug = True
if web.config.get('_session') is None:
session = web.session.Session(app,web.session.DiskStore('sessions'))
web.config._session=session
else:
session=web.config._session
#用以下方式可以解决多文件之间传递session的问题
def session_hook():web.ctx.session=session
app.add_processor(web.loadhook(session_hook))
if __name__=='__main__':
app.run()
|
lqe/EconomyCompensation
|
code.py
|
Python
|
gpl-3.0
| 2,064 | 0.036126 |
#!/usr/bin/python
import participantCollection
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%**."
print "These participants have checked in at least once in the last 15 days:"
print ""
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
print "/u/" + participant.name
print ""
print "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:"
print ""
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
print "/u/" + participant.name + " ~"
print ""
|
foobarbazblarg/stayclean
|
stayclean-2015-january/display-on-last-day-before-participants-must-check-in.py
|
Python
|
mit
| 1,018 | 0.006876 |
from __future__ import absolute_import, division, print_function, unicode_literals
# Statsd client. Loosely based on the version by Steve Ivy <steveivy@gmail.com>
import logging
import random
import socket
import time
from contextlib import contextmanager
log = logging.getLogger(__name__)
class StatsD(object):
def __init__(self, host='localhost', port=8125, enabled=True, prefix=''):
self.addr = None
self.enabled = enabled
if enabled:
self.set_address(host, port)
self.prefix = prefix
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def set_address(self, host, port=8125):
try:
self.addr = (socket.gethostbyname(host), port)
except socket.gaierror:
self.addr = None
self.enabled = False
@contextmanager
def timed(self, stat, sample_rate=1):
log.debug('Entering timed context for %r' % (stat,))
start = time.time()
yield
duration = int((time.time() - start) * 1000)
log.debug('Exiting timed context for %r' % (stat,))
self.timing(stat, duration, sample_rate)
def timing(self, stats, time, sample_rate=1):
"""
Log timing information
"""
unit = 'ms'
log.debug('%r took %s %s' % (stats, time, unit))
self.update_stats(stats, "%s|%s" % (time, unit), sample_rate)
def increment(self, stats, sample_rate=1):
"""
Increments one or more stats counters
"""
self.update_stats(stats, 1, sample_rate)
def decrement(self, stats, sample_rate=1):
"""
Decrements one or more stats counters
"""
self.update_stats(stats, -1, sample_rate)
def update_stats(self, stats, delta=1, sampleRate=1):
"""
Updates one or more stats counters by arbitrary amounts
"""
if not self.enabled or self.addr is None:
return
if type(stats) is not list:
stats = [stats]
data = {}
for stat in stats:
data["%s%s" % (self.prefix, stat)] = "%s|c" % delta
self.send(data, sampleRate)
def send(self, data, sample_rate):
sampled_data = {}
if sample_rate < 1:
if random.random() <= sample_rate:
for stat, value in data.items():
sampled_data[stat] = "%s|@%s" % (value, sample_rate)
else:
sampled_data = data
try:
for stat, value in sampled_data.items():
self.udp_sock.sendto("%s:%s" % (stat, value), self.addr)
except Exception as e:
log.exception('Failed to send data to the server: %r', e)
if __name__ == '__main__':
sd = StatsD()
for i in range(1, 100):
sd.increment('test')
|
smarkets/smk_python_sdk
|
smarkets/statsd.py
|
Python
|
mit
| 2,824 | 0.000354 |
__version__ = '0.0.1'
|
jgrillo/zoonomia
|
zoonomia/_version.py
|
Python
|
mit
| 22 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import locale
locale.setlocale(locale.LC_ALL, 'pt_PT.UTF8')
import logging
logging.basicConfig(level=logging.DEBUG)
import os
import string
import datetime
from pprint import pprint
from html2text import add_item
### Constantes ###
MP_STATEMENT = 'deputado_intervencao'
PM_STATEMENT = 'pm_intervencao'
MINISTER_STATEMENT = 'ministro_intervencao'
STATE_SECRETARY_STATEMENT = 'secestado_intervencao'
PRESIDENT_STATEMENT = 'presidente'
SECRETARY_STATEMENT = 'secretario'
STATEMENT = 'intervencao'
MP_INTERRUPTION = 'deputado_interrupcao'
INTERRUPTION = 'vozes_aparte'
APPLAUSE = 'aplauso'
PROTEST = 'protesto'
LAUGHTER = 'riso'
NOTE = 'nota'
PAUSE = 'pausa'
VOTE = 'voto'
TIME = 'hora'
OTHER = 'outro'
INTRO = 'intro'
SUMMARY = 'sumario'
ROLLCALL = 'chamada'
ROLLCALL_PRESENT = 'chamada_presentes'
ROLLCALL_ABSENT = 'chamada_ausentes'
ROLLCALL_LATE = 'chamada_atrasados'
ROLLCALL_MISSION = 'chamada_missao'
SECTION = 'seccao'
END = 'fim'
MP_CONT = 'continuacao'
MP_ASIDE = 'deputado_aparte'
OTHER_START = 'outro_inicio'
OTHER_CONT = 'outro_cont'
PRESIDENT_ASIDE = 'presidente_aparte'
PRESIDENT_NEWSPEAKER = 'presidente_temapalavra'
PRESIDENT_ROLLCALL = 'presidente_chamada'
PRESIDENT_OPEN = 'presidente_aberta'
PRESIDENT_CLOSE = 'presidente_encerrada'
PRESIDENT_SUSPEND = 'presidente_suspensa'
PRESIDENT_REOPEN = 'presidente_reaberta'
PRESIDENT_SWITCH = 'presidente_troca'
ORPHAN = 'orfao'
### Regexes ###
re_hora = (re.compile(ur'^Eram (?P<hours>[0-9]{1,2}) horas e (?P<minutes>[0-9]{1,2}) minutos.$', re.UNICODE), '')
# Separador entre orador e intervenção (algumas gralhas e inconsistências obrigam-nos
# a ser relativamente permissivos ao definir a expressão)
# Importa notar que esta regex é unicode, por causa dos hífens (o Python não os vai
# encontrar de outra forma)
re_separador = (re.compile(ur'\:?[ \.]?[\–\–\—\-] ', re.LOCALE|re.UNICODE), ': -')
re_separador_estrito = (re.compile(ur'\: [\–\–\—\-] ', re.LOCALE|re.UNICODE), ': - ')
re_mauseparador = (re.compile(ur'(?P<prevchar>[\)a-z])\:[ \.][\–\–\—\-](?P<firstword>[\w\»])', re.LOCALE|re.UNICODE), '\g<prevchar>: - \g<firstword>')
re_titulo = (re.compile(ur'((O Sr[\.:])|(A Sr\.?(ª)?))(?!( Deputad))'), '')
re_ministro = (re.compile(ur'^Ministr'), '')
re_secestado = (re.compile(ur'^Secretári[oa] de Estado.*:'), '')
re_palavra = (re.compile(ur'(concedo(-lhe)?|dou|tem|vou dar)(,?[\w ^,]+,?)? a palavra|(faça favor(?! de terminar))', re.UNICODE|re.IGNORECASE), '')
re_concluir = (re.compile(ur'(tempo esgotou-se)|(esgotou-se o( seu)? tempo)|((tem (mesmo )?de|queira) (terminar|concluir))|((ultrapassou|esgotou|terminou)[\w ,]* o( seu)? tempo)|((peço|solicito)(-lhe)? que (termine|conclua))|(atenção ao tempo)|(remate o seu pensamento)|(atenção para o tempo de que dispõe)|(peço desculpa mas quero inform)|(deixem ouvir o orador)|(faça favor de prosseguir a sua)|(favor de (concluir|terminar))|(poder prosseguir a sua intervenção)|(faça( o)? favor de continuar|(queira[\w ,]* concluir))', re.UNICODE|re.IGNORECASE), '')
re_president = (re.compile(ur'O Sr\.?|A Sr\.?ª? Presidente\ ?(?P<nome>\([\w ]+\))?(?P<sep>\:[ \.]?[\–\–\—\-])'), '')
re_cont = (re.compile(ur'O Orador|A Oradora(?P<sep>\:[ \.]?[\–\–\—\-\-])', re.UNICODE), '')
re_voto = (re.compile(ur'^Submetid[oa]s? à votação', re.UNICODE), '')
re_interv = (re.compile(ur'^(?P<titulo>O Sr[\.:]?|A Sr[\.:]?(ª)?)\ (?P<nome>[\w ,’-]+)\ ?(?P<partido>\([\w -]+\))?(?P<sep>\:?[ \.]?[\–\–\—\-]? ?)', re.UNICODE), '')
#re_interv_semquebra = (re.compile(ur'(?P<titulo>O Sr\.?|A Sr(\.)?(ª)?)\ (?P<nome>[\w ,’-]{1,30})\ ?(?P<partido>\([\w -]+\))?(?P<sep>\:[ \.]?[\–\–\—\-])', re.UNICODE), '')
re_interv_semquebra = (re.compile(ur'(?P<titulo>O Sr\.?|A Sr(\.)?(ª)?)\ (?P<nome>[\w ,’-]{1,50})\ ?(?P<partido>\([\w -]+\))?(?P<sep>\:[ \.]?[\–\–\—\-] )', re.UNICODE), '')
re_interv_simples = (re.compile(ur'^(?P<nome>[\w ,’-]+)\ ?(?P<partido>\([\w -]+\))?\ ?(?P<sep>\:?[ \.]?[\–\–\—\-]? )', re.UNICODE), '')
def change_type(p, newtype):
stype, text = p.split(']', 1)
text = text.strip()
return '[%s] %s' % (newtype, text)
def get_type(p):
stype, text = p.split(']', 1)
stype = stype.strip('[] ')
return stype
def get_speaker(p):
stype, text = p.split(']', 1)
text = text.strip()
try:
speaker, text = re.split(re_separador[0], text, 1)
except ValueError:
print 'Não consegui determinar o speaker. Vai vazio.'
print ' ' + p
print
raise
return ''
return speaker
def get_text(p):
stype, text = p.split(']', 1)
text = text.strip()
if ': -' in text:
speaker, text = text.split(':', 1)
else:
pass
return text
def strip_type(p):
stype, text = p.split(']', 1)
text = text.strip()
return text
def check_and_split_para(p):
# verificar se tem regex da intervenção
# se não, return None
# se sim, dividir e reagrupar
pass
class RaspadarTagger:
def __init__(self):
self.contents = []
# cache para registar cargos de governo e nomes
self.gov_posts = {}
def parse_txt_file(self, txtfile):
buffer = open(txtfile, 'r').read()
paragraphs = buffer.split('\n\n')
for para in paragraphs:
self.parse_paragraph(para)
self.process_orphans()
def parse_paragraph(self, p):
p = p.decode('utf-8')
p = p.strip(' \n')
if not p:
return
# FIXME: monkeypatch aqui: É preciso rectificar os separadores. Isto devia
# acontecer no html2txt, mas não tenho tempo agora para re-processar
# os HTML's. Desculpem lá.
if re.search(re_mauseparador[0], p):
p = re.sub(re_mauseparador[0], re_mauseparador[1], p, count=1)
# corresponde à regex de intervenção?
if re.search(re_interv[0], p):
# é intervenção
self.parse_statement(p)
elif re.search(re_cont[0], p):
# é a continuação de uma intervenção ("O Orador")
self.parse_statement(p, cont=True)
else:
# é outra coisa
self.parse_other(p)
def parse_statement(self, p, cont=False):
if cont:
p = re.sub(re_cont[0], re_cont[1], p, 1)
p = re.sub(re_separador[0], '', p, 1).strip()
stype = MP_CONT
else:
if not (re.match(re_titulo[0], p) and re.search(re_separador[0], p)):
stype = ORPHAN
else:
speaker, text = re.split(re_separador[0], p, 1)
speaker = re.sub(re_titulo[0], re_titulo[1], speaker, count=1).strip(u'ª \n')
p = speaker + ': - ' + text.strip()
if p.startswith('Presidente'):
return self.parse_president(p)
elif re.match(re_ministro[0], p) or re.match(re_secestado[0], p):
return self.parse_government(p)
elif p.startswith(u'Secretári') and not 'Estado' in re.split(re_separador[0], p)[0]:
return self.parse_secretary(p)
elif re.match(re_interv_simples[0], p):
stype = MP_STATEMENT
else:
stype = STATEMENT
output = '[%s] %s' % (stype, p)
# encontrar intervenções onde não há quebra de linha
# TODO: este check tem de ser feito no parse_paragraph
if re.search(re_interv_semquebra[0], output):
#print '### Encontrei uma condensada: ###'
result = re.split(re_interv_semquebra[0], output)
new_p = ''
for part in result[1:]:
if part and part != u'ª':
if part.endswith(('.', u'ª')):
new_p += part + ' '
else:
new_p += part
# arrumar a primeira parte
# print 'Primeira: ' + result[0]
# print 'Segunda: ' + new_p
# print
self.contents.append(result[0])
# processar a segunda
try:
self.parse_statement(new_p)
except RuntimeError:
# loop infinito, vamos mostrar o que se passa
print 'Loop infinito ao processar uma linha com mais do que uma intervenção.'
print u'1ª: ' + result[0]
print u'2ª: ' + new_p
raise
return
self.contents.append(output)
return output
def parse_president(self, p):
# extrair nome do/a presidente, caso lá esteja
m = re.search(re_president[0], p)
if m:
name = m.group('nome')
# retirar todo o nome e separador
p = re.sub(re_president[0], re_president[1], p, 1).strip()
if u'encerrada a sessão' in p or u'encerrada a reunião' in p:
stype = PRESIDENT_CLOSE
elif (u'quórum' in p or 'quorum' in p) and 'aberta' in p:
stype = PRESIDENT_OPEN
p = p.replace('Presidente: - ', '', 1)
elif re.search(re_palavra[0], p):
stype = PRESIDENT_NEWSPEAKER
elif re.search(re_concluir[0], p):
stype = PRESIDENT_ASIDE
else:
stype = PRESIDENT_STATEMENT
output = '[%s] %s' % (stype, p)
self.contents.append(output)
return output
def parse_government(self, p):
# A linha vem assim
# Ministra da Saúde (Alice Nenhures): - Acho muito bem!
# E nós queremos
# Alice Nenhures (Ministra da Saúde): - Acho muito bem!
# E nas partes onde só é indicado o cargo, queremos re-incluir o nome
# e para isso usamos o dicionário self.gov_posts como cache
result = re.split(re_separador[0], p, 1)
if len(result) == 2:
speaker, text = result
elif len(result) == 1:
if re.search(re_separador[0], result[0]):
# erros de redacção ex. 'Ministro do Trabalho: Blá blá blá'
speaker, text = re.split(re.separador[0], result[0], 1)
else:
print ' Result too short'
print result
else:
print ' Result too long'
print result
if '(' in speaker:
post, speaker = speaker.strip(')').split('(')
self.gov_posts[post.strip()] = speaker.strip()
# pprint(self.gov_posts)
# print
else:
# procurar o nome associado ao cargo que já registámos da primeira vez
# que esta pessoa falou
# print p
post = speaker.strip()
speaker = self.gov_posts[speaker.strip()].strip()
# pprint(self.gov_posts)
# print
if post.startswith('Primeiro'):
stype = PM_STATEMENT
elif post.startswith('Ministr'):
stype = MINISTER_STATEMENT
elif post.startswith('Secret'):
stype = STATE_SECRETARY_STATEMENT
else:
print post
assert False
output = '[%s] %s (%s): - %s' % (stype, speaker, post.strip(), text.strip())
self.contents.append(output)
return output
def parse_secretary(self, p):
#if 'Estado' in p[:p.find(':')]:
# return self.parse_government(p)
#else:
output = '[%s] %s' % (SECRETARY_STATEMENT, p)
self.contents.append(output)
return output
def parse_other(self, p):
# TODO: Era altamente fazer um tuple regex -> tipo, e aqui ter só um loopzinho
# em vez deste esparguete todo
if p.startswith('Aplauso'):
output = '[%s] %s' % (APPLAUSE, p)
stype = APPLAUSE
elif p.startswith('Protesto'):
stype = PROTEST
elif p.startswith('Riso'):
stype = LAUGHTER
elif p.startswith(('Vozes', 'Uma voz d')):
stype = INTERRUPTION
elif p.startswith((u'SUMÁR', u'S U M Á R')):
stype = SUMMARY
elif re.match(re_hora[0], p):
stype = TIME
elif p.endswith('ORDEM DO DIA'):
stype = SECTION
elif p.startswith(('Entretanto, assumiu', 'Entretanto, reassumiu', 'Neste momento, assumiu', 'Neste momento, reassumiu')):
stype = PRESIDENT_SWITCH
elif re.match(re_voto[0], p):
stype = VOTE
elif p == 'Pausa.':
stype = PAUSE
elif (u'Série' in p and u'Número' in p) or \
u'LEGISLATURA' in p or u'LEGISLATIVA' in p or \
u'PLENÁRIA' in p or u'COMISSÃO' in p or u'DIÁRIO' in p:
stype = INTRO
elif p.startswith((u'Deputados presentes à', u'Srs. Deputados presentes à', u'Estavam presentes os seguintes')):
stype = ROLLCALL_PRESENT
elif (u'Deputados não presentes' in p and u'missões internacionais' in p):
stype = ROLLCALL_MISSION
elif u'Deputados que faltaram' in p or u'Faltaram à' in p:
stype = ROLLCALL_ABSENT
elif u'Deputados que entraram' in p:
stype = ROLLCALL_LATE
elif u'A DIVISÃO' in p:
stype = END
else:
stype = ORPHAN
output = '[%s] %s' % (stype, p)
self.contents.append(output)
return output
def process_orphans(self):
orphan_types = tuple(['[%s]' % t for t in (SUMMARY, ROLLCALL_PRESENT, ROLLCALL_ABSENT, ROLLCALL_LATE, ROLLCALL_MISSION)])
for p in self.contents:
if p.startswith(orphan_types):
stype, remainder = p.split(' ', 1)
stype = stype.strip('[]')
# órfãos seguintes passam a ter o mesmo tipo
try:
new_p = self.contents[self.contents.index(p) + 1]
except IndexError:
break
if not new_p.startswith('[%s]' % ORPHAN):
continue
self.contents[self.contents.index(p) + 1] = change_type(new_p, stype)
new_contents = []
while 1:
p = self.contents[0]
if len(self.contents) > 1:
if get_type(self.contents[1]) == MP_CONT and get_type(p) == PRESIDENT_STATEMENT:
# declaração do presidente seguida de uma continuação - significa
# que a do presidente é um aparte
p = change_type(p, PRESIDENT_ASIDE)
if new_contents:
prev_p = new_contents[-1]
if get_type(p) == ORPHAN and get_type(prev_p) == MP_STATEMENT:
p = change_type(p, MP_CONT)
elif get_type(p) == ORPHAN and get_type(prev_p) == VOTE:
p = change_type(p, PRESIDENT_STATEMENT)
MAIN_INTERVENTION_TYPES = (MP_STATEMENT, MINISTER_STATEMENT, PM_STATEMENT, STATE_SECRETARY_STATEMENT, SECRETARY_STATEMENT)
if get_type(p) == PRESIDENT_NEWSPEAKER:
next_p = self.contents[1]
if get_type(next_p) in (LAUGHTER, APPLAUSE, PAUSE, INTERRUPTION):
for c in self.contents[1:]:
if get_type(c) in MAIN_INTERVENTION_TYPES:
next_p = self.contents[self.contents.index(c)]
# next_p = self.contents[2]
elif not get_type(next_p) in MAIN_INTERVENTION_TYPES:
print 'A seguir a tem a palavra, não tenho o que esperava. É melhor conferir.'
print 'Tem a palavra: %s' % p
print 'Seguinte: : %s' % next_p
raise TypeError
speaker = get_speaker(next_p)
lookahead = 2
while 1:
try:
next_p = self.contents[lookahead]
except IndexError:
print 'Cheguei ao fim enquanto procurava órfãos de uma intervenção. Intervenção inicial:'
print p
break
if get_type(next_p) in (PRESIDENT_STATEMENT, PRESIDENT_NEWSPEAKER, PRESIDENT_CLOSE):
# intervenção do presidente = a anterior terminou
break
elif get_type(next_p) in (MP_STATEMENT, MINISTER_STATEMENT, PM_STATEMENT, STATE_SECRETARY_STATEMENT):
if not speaker == get_speaker(next_p):
# outro deputado fala durante uma intervenção = aparte
self.contents[lookahead] = change_type(next_p, MP_ASIDE)
else:
# mesmo deputado = continuação
self.contents[lookahead] = change_type(next_p, MP_CONT)
elif get_type(next_p) in (MP_CONT, ORPHAN):
# continuação: adicionar orador original
# também partimos do princípio que órfãos no meio de intervenções = continuação
text = get_text(next_p)
new_text = '[%s] %s: - %s' % (MP_CONT, speaker, text)
self.contents[lookahead] = new_text
lookahead += 1
new_contents.append(self.contents.pop(0))
if len(self.contents) == 0:
break
self.contents = list(new_contents)
for p in self.contents:
if get_type(p) == ORPHAN:
if u'encerrada a sessão' in p:
self.contents[self.contents.index(p)] = change_type(p, PRESIDENT_CLOSE)
def get_txt(self):
output = ''
for s in self.contents:
if not type(s) in (str, unicode):
add_item(output, s)
elif s:
output += s.strip('\n ').encode('utf-8') + '\n\n'
return output
def parse_txt_file(infile, outfile):
f = infile
tagger = RaspadarTagger()
try:
tagger.parse_txt_file(infile)
except:
logging.error('Tagging error in file %s.' % (f))
raise
outfile = open(outfile, 'w')
outfile.write(tagger.get_txt())
outfile.close()
if __name__ == '__main__':
import sys
from ConfigParser import SafeConfigParser
# analisar o ficheiro config
parser = SafeConfigParser()
parser.read('raspadar.conf')
default_input = os.path.abspath(parser.get('txt2taggedtext', 'sourcedir'))
default_output = os.path.abspath(parser.get('txt2taggedtext', 'targetdir'))
# analisar as opções da linha de comandos
import optparse
print 'ARGV :', sys.argv[1:]
parser = optparse.OptionParser()
parser.add_option('-i', '--input',
dest="input",
default="",
help='Input file or directory'
)
parser.add_option('-o', '--output',
dest="output",
default="",
help='Output file or directory'
)
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help='Print verbose information',
)
parser.add_option('-p', '--picky',
dest="picky",
default=False,
action="store_true",
help='Stop batch processing in case an error is found',
)
parser.add_option('-f', '--force',
dest="force",
default=False,
action="store_true",
help='Process file even if the output file already exists',
)
options, remainder = parser.parse_args()
input = options.input
verbose = options.verbose
output = options.output
picky = options.picky
# verificar se input existe
if not os.path.exists(input):
print 'Input not found: ' + str(input)
# tanto input como output têm de ser ambos ficheiros ou ambos directórios
if (os.path.isfile(input) and os.path.isdir(output)) or (os.path.isdir(input) and os.path.isfile(output)):
print 'Input and output must be both filenames or both directory names.'
print 'Input - File: %s. Dir: %s.' % (str(os.path.isfile(input)), str(os.path.isdir(input)))
print 'Output - File: %s. Dir: %s.' % (str(os.path.isfile(input)), str(os.path.isdir(input)))
sys.exit()
# há input e não output? gravar como txt no mesmo dir
if not output:
if not input:
# não há input nem output? Usar defaults da config
input = default_input
output = default_output
if verbose:
print 'Input: ' % input
print 'Output: ' % output
else:
if os.path.isfile(input):
# input é ficheiro, output é o mesmo mas com extensão .txt
output = input.replace('.txt', '.tag.txt')
else:
# input é directório, output vai pra lá também
output = input
# só há output? Herp derp, vamos presumir que o input é o default
if output and not input:
input = default_input
if os.path.isdir(input):
successes = []
failures = []
import glob
inputs = {}
for f in glob.glob(os.path.join(input, '*.txt')):
if output:
inputs[f] = os.path.join(output, os.path.basename(f).replace('.txt', '.tag.txt'))
else:
# sem output -> grava o txt no mesmo dir
inputs[f] = os.path.join(input, os.path.basename(f).replace('.txt', '.tag.txt'))
for i in inputs:
if os.path.exists(inputs[i]) and not options.force:
print 'File %s exists, not overwriting.' % inputs[i]
continue
if verbose: print ' %s -> %s' % (i, inputs[i])
try:
parse_txt_file(i, inputs[i])
successes.append(i)
except:
outfile = open(inputs[i], 'w')
outfile.close()
if picky:
sys.exit()
failures.append(i)
if verbose:
totalcount = len(successes) + len(failures)
print '----------------------------------'
print 'Successfully parsed: %d files (%d%%)' % (len(successes), int(len(successes)/totalcount))
print 'Failed: %d files (%d%%)' % (len(failures), int(len(failures)/totalcount))
print '----------------------------------'
else:
parse_txt_file(input, output)
|
transparenciahackday/dar-scripts
|
scripts/raspadar/txt2taggedtext.py
|
Python
|
gpl-3.0
| 23,048 | 0.008054 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .engine import RouterEngine
from .address import Address
__all__ = ["RouterEngine", "Address"]
|
irinabov/debian-qpid-dispatch
|
python/qpid_dispatch_internal/router/__init__.py
|
Python
|
apache-2.0
| 1,041 | 0 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
from transformer_layers import TransformerBlock
import tensorflow as tf
def mean_pool(x, m):
m = tf.cast(m, tf.float32)
x = tf.multiply(x, tf.expand_dims(m, 2))
x = tf.reduce_sum(x, 1) / tf.reduce_sum(m, 1, keepdims=True)
return x
class RNN(object):
def __init__(self, num_units):
self.rnn_fw = tf.keras.layers.CuDNNLSTM(units=num_units // 2,
return_sequences=True,
go_backwards=False,
name='rnn_fw')
self.rnn_bw = tf.keras.layers.CuDNNLSTM(units=num_units // 2,
return_sequences=True,
go_backwards=False,
name='rnn_bw')
def forward(self, inputs, masks):
def rnn_fn(x, m, rnn):
x = rnn(x)
# x = tf.reduce_max(x, 1) # max pooling
# x = mean_pool(x, m) # mean pooling
indices = tf.reduce_sum(m, 1, keepdims=True) - 1
x = tf.gather_nd(x, tf.cast(indices, tf.int32), batch_dims=1)
return x
lengths = tf.reduce_sum(tf.cast(masks, tf.int32), axis=1)
masks = tf.cast(masks, tf.float32)
inputs = tf.multiply(inputs, tf.expand_dims(masks, 2))
inputs_bw = tf.reverse_sequence(inputs, lengths, 1, 0)
outputs_fw = rnn_fn(inputs, masks, self.rnn_fw)
outputs_bw = rnn_fn(inputs_bw, masks, self.rnn_bw)
outputs = tf.concat([outputs_fw, outputs_bw], axis=1)
return outputs
class Transformer(object):
def __init__(self, num_units):
self.hidden = tf.keras.layers.Dense(num_units)
self.transformer = TransformerBlock(num_units, num_units * 4,
num_layer=2)
def forward(self, inputs, masks):
masks = tf.cast(masks, tf.float32)
inputs = tf.multiply(inputs, tf.expand_dims(masks, 2))
inputs = self.hidden(inputs)
return self.transformer.forward(inputs, masks)
class DAN(object):
def __init__(self, num_units):
self.hidden = tf.keras.layers.Dense(num_units, activation=tf.nn.relu)
def forward(self, inputs, masks):
masks = tf.cast(masks, tf.float32)
inputs = tf.multiply(inputs, tf.expand_dims(masks, 2))
inputs = tf.reduce_sum(inputs, 1) / tf.reduce_sum(masks, 1, keepdims=True)
return self.hidden(inputs)
def get_text_encoder(encoder_type='rnn'):
if encoder_type == 'rnn':
return RNN
elif encoder_type == 'trans':
return Transformer
elif encoder_type == 'dan':
return DAN
else:
raise ValueError(encoder_type)
class ImageTextEmbedding(object):
def __init__(self, word_emb, encoder_dim, encoder_type='rnn', norm=True,
drop_p=0.25, contrastive=False, margin=0.5, num_neg_sample=10,
lambda1=1.0, lambda2=1.0, internal=True):
self.word_emb = tf.Variable(tf.convert_to_tensor(word_emb), name="emb",
trainable=True)
self.text_encoder = get_text_encoder(encoder_type)(encoder_dim)
self.text_feat_proj = tf.keras.layers.Dense(encoder_dim)
self.img_feat_proj = tf.keras.layers.Dense(encoder_dim)
self.dropout = tf.keras.layers.Dropout(drop_p)
self.margin = margin
self.num_neg_sample = num_neg_sample
self.lambda1 = lambda1
self.lambda2 = lambda2
self.contrastive = contrastive
self.internal = internal
self.norm = norm # normalize the embedding
self.text_outputs = []
def forward_img(self, img_inputs, training):
x = self.img_feat_proj(img_inputs)
if self.norm:
x = tf.nn.l2_normalize(x, axis=-1)
return self.dropout(x, training=training)
def forward_text(self, text_inputs, text_masks, training):
if len(text_inputs.get_shape()) == 2:
x = tf.nn.embedding_lookup(self.word_emb, text_inputs)
else:
x = text_inputs
self.text_outputs.append(mean_pool(x, text_masks))
x = self.text_encoder.forward(x, text_masks)
self.text_outputs.append(x)
x = self.text_feat_proj(x)
if self.norm:
x = tf.nn.l2_normalize(x, axis=-1)
return self.dropout(x, training=training)
def encode(self, img_inputs, text_inputs, text_masks, training):
img_feats = self.forward_img(img_inputs, training)
text_feats = self.forward_text(text_inputs, text_masks, training)
return img_feats, text_feats
def forward(self, img_inputs, text_inputs, text_masks, labels, training):
img_feats, text_feats = self.encode(img_inputs, text_inputs,
text_masks, training)
if self.contrastive:
loss = contrastive_loss(img_feats, text_feats, self.margin)
sent_im_dist = - similarity_fn(text_feats, img_feats)
elif self.internal:
loss = internal_loss(img_feats, text_feats, labels)
sent_im_dist = - similarity_fn(text_feats, img_feats)
else:
loss = embedding_loss(img_feats, text_feats, labels, self.margin,
self.num_neg_sample, self.lambda1, self.lambda2)
sent_im_dist = pdist(text_feats, img_feats)
rec = recall_k(sent_im_dist, labels, ks=[1, 5, 10])
return loss, rec
def order_sim(im, s):
im = tf.expand_dims(im, 0)
s = tf.expand_dims(s, 1)
diff = tf.clip_by_value(s - im, 0, 1e6)
dist = tf.sqrt(tf.reduce_sum(diff ** 2, 2))
scores = -tf.transpose(dist)
return scores
def similarity_fn(im, s, order=False):
if order:
return order_sim(im, s)
return tf.matmul(im, s, transpose_b=True)
def internal_loss(im_embeds, sent_embeds, im_labels):
logits_s = tf.matmul(sent_embeds, im_embeds, transpose_b=True)
cost_s = tf.nn.softmax_cross_entropy_with_logits_v2(im_labels, logits_s)
logits_im = tf.matmul(im_embeds, sent_embeds, transpose_b=True)
cost_im = tf.nn.softmax_cross_entropy_with_logits_v2(tf.transpose(im_labels),
logits_im)
return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
def contrastive_loss(im_embeds, sent_embeds, margin, max_violation=True):
""" modified https://github.com/fartashf/vsepp/blob/master/model.py#L260 """
scores = similarity_fn(im_embeds, sent_embeds)
batch_size = tf.shape(im_embeds)[0]
diagonal = tf.diag_part(scores)
d1 = tf.reshape(diagonal, (batch_size, 1))
d2 = tf.reshape(diagonal, (1, batch_size))
cost_s = tf.clip_by_value(margin + scores - d1, 0, 1e6)
cost_im = tf.clip_by_value(margin + scores - d2, 0, 1e6)
zeros = tf.zeros(batch_size)
cost_s = tf.matrix_set_diag(cost_s, zeros)
cost_im = tf.matrix_set_diag(cost_im, zeros)
if max_violation:
cost_s = tf.reduce_max(cost_s, 1)
cost_im = tf.reduce_max(cost_im, 0)
return tf.reduce_sum(cost_s) + tf.reduce_sum(cost_im)
def pdist(x1, x2):
"""
x1: Tensor of shape (h1, w)
x2: Tensor of shape (h2, w)
Return pairwise distance for each row vector in x1, x2 as
a Tensor of shape (h1, h2)
"""
x1_square = tf.reshape(tf.reduce_sum(x1 * x1, axis=1), [-1, 1])
x2_square = tf.reshape(tf.reduce_sum(x2 * x2, axis=1), [1, -1])
return tf.sqrt(x1_square - 2 * tf.matmul(x1, tf.transpose(x2)) + x2_square +
1e-4)
def embedding_loss(im_embeds, sent_embeds, im_labels, margin, num_neg_sample,
lambda1, lambda2):
"""
im_embeds: (b, 512) image embedding tensors
sent_embeds: (sample_size * b, 512) sentence embedding tensors
where the order of sentence corresponds to the order of images and
setnteces for the same image are next to each other
im_labels: (sample_size * b, b) boolean tensor, where (i, j) entry is
True if and only if sentence[i], image[j] is a positive pair
"""
im_labels = tf.cast(im_labels, tf.bool)
# compute embedding loss
num_img = tf.shape(im_embeds)[0]
num_sent = tf.shape(sent_embeds)[0]
sent_im_ratio = tf.div(num_sent, num_img)
sent_im_dist = pdist(sent_embeds, im_embeds)
# image loss: sentence, positive image, and negative image
pos_pair_dist = tf.reshape(tf.boolean_mask(sent_im_dist, im_labels),
[num_sent, 1])
neg_pair_dist = tf.reshape(tf.boolean_mask(sent_im_dist, ~im_labels),
[num_sent, -1])
im_loss = tf.clip_by_value(margin + pos_pair_dist - neg_pair_dist,
0, 1e6)
im_loss = tf.reduce_mean(tf.nn.top_k(im_loss, k=num_neg_sample)[0])
# sentence loss: image, positive sentence, and negative sentence
neg_pair_dist = tf.reshape(
tf.boolean_mask(tf.transpose(sent_im_dist), ~tf.transpose(im_labels)),
[num_img, -1])
neg_pair_dist = tf.reshape(
tf.tile(neg_pair_dist, [1, sent_im_ratio]), [num_sent, -1])
sent_loss = tf.clip_by_value(margin + pos_pair_dist - neg_pair_dist, 0, 1e6)
sent_loss = tf.reduce_mean(tf.nn.top_k(sent_loss, k=num_neg_sample)[0])
# sentence only loss (neighborhood-preserving constraints)
sent_sent_dist = pdist(sent_embeds, sent_embeds)
sent_sent_mask = tf.reshape(tf.tile(tf.transpose(im_labels),
[1, sent_im_ratio]),
[num_sent, num_sent])
pos_pair_dist = tf.reshape(tf.boolean_mask(sent_sent_dist, sent_sent_mask),
[-1, sent_im_ratio])
pos_pair_dist = tf.reduce_max(pos_pair_dist, axis=1, keep_dims=True)
neg_pair_dist = tf.reshape(tf.boolean_mask(sent_sent_dist, ~sent_sent_mask),
[num_sent, -1])
sent_only_loss = tf.clip_by_value(margin + pos_pair_dist - neg_pair_dist,
0, 1e6)
sent_only_loss = tf.reduce_mean(tf.nn.top_k(sent_only_loss,
k=num_neg_sample)[0])
loss = im_loss * lambda1 + sent_loss + sent_only_loss * lambda2
return loss
def recall_k(sent_im_dist, im_labels, ks=(1, 5, 10)):
"""
Compute recall at given ks.
"""
im_labels = tf.cast(im_labels, tf.bool)
def retrieval_recall(dist, labels, k):
# Use negative distance to find the index of
# the smallest k elements in each row.
pred = tf.nn.top_k(-dist, k=k)[1]
# Create a boolean mask for each column (k value) in pred,
# s.t. mask[i][j] is 1 iff pred[i][k] = j.
pred_k_mask = lambda topk_idx: tf.one_hot(topk_idx, tf.shape(labels)[1],
on_value=True, off_value=False,
dtype=tf.bool)
# Create a boolean mask for the predicted indices
# by taking logical or of boolean masks for each column,
# s.t. mask[i][j] is 1 iff j is in pred[i].
pred_mask = tf.reduce_any(tf.map_fn(
pred_k_mask, tf.transpose(pred), dtype=tf.bool), axis=0)
# pred_mask = tf.map_fn(create_pred_mask, pred)
# Entry (i, j) is matched iff pred_mask[i][j] and labels[i][j] are 1.
matched = tf.cast(tf.logical_and(pred_mask, labels), dtype=tf.float32)
return tf.reduce_mean(tf.reduce_max(matched, axis=1))
img_sent_recall = [retrieval_recall(tf.transpose(sent_im_dist),
tf.transpose(im_labels), k) for k in ks]
sent_img_recall = [retrieval_recall(sent_im_dist, im_labels, k) for k in ks]
return img_sent_recall + sent_img_recall
|
google/embedding-tests
|
thought/image_text_model.py
|
Python
|
apache-2.0
| 11,824 | 0.008373 |
'''
Created on 11/02/2010
@author: henry@henryjenkins.name
'''
class webInterface(object):
'''
classdocs
'''
writeFile = None
def __init__(self):
pass
def __openFile(self, fileName):
self.writeFile = open(fileName, 'w')
def closeFile(self):
self.writeFile.close()
def writeHeader(self, title = 'Henry\'s iptables data accounting'):
self.writeFile.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n')
self.writeFile.write('<HTML>\n')
self.writeFile.write('<HEAD>\n')
self.writeFile.write('<TITLE>' + title + '</TITLE>\n')
self.writeFile.write('</HEAD>\n')
def writeBody(self, users):
self.writeFile.write('<BODY>\n')
self.writeFile.write('<table border="1">')
self.writeFile.write('<tr>')
self.writeFile.write('<td>IP address</td>')
self.writeFile.write('<td>On-peak Packets</td>')
self.writeFile.write('<td>On-peak Data</td>')
self.writeFile.write('<td>Off-peak Packets</td>')
self.writeFile.write('<td>Off-peak Data</td>')
self.writeFile.write('<td>Total Packets</td>')
self.writeFile.write('<td>Total Data</td>')
self.writeFile.write('</tr>')
usersList = users.keys()
usersList.sort()
for user in usersList:
self.writeFile.write('<tr>')
self.writeFile.write('<td>' + user + '</td>')
self.writeFile.write('<td>' + str(users[user].getUpData('pkts', date=None, peak='other')) + '</td>')
self.writeFile.write('<td>' + self.humanizeNumber(users[user].getUpData('data', date=None, peak='other')) + '</td>')
self.writeFile.write('<td>' + str(users[user].getDownData('pkts', date=None, peak='other')) + '</td>')
self.writeFile.write('<td>' + self.humanizeNumber(users[user].getDownData('data', date=None, peak='other')) + '</td>')
self.writeFile.write('<td>' + str(users[user].getData(type = 'pkts')) + '</td>')
self.writeFile.write('<td>' + self.humanizeNumber(users[user].getData(type = 'data')) + '</td>')
self.writeFile.write('</tr>')
self.writeFile.write('</table>')
self.writeFile.write('</BODY>\n')
def writeFooter(self):
self.writeFile.write('</HTML>\n')
def humanizeNumber(self,number = 0):
if number > 1024*1024*1024:
number = number/(1024*1024*1024)
number = str(number) + ' GBytes'
elif number > 1024*1024:
number = number/(1024*1024)
number = str(number) + ' MBytes'
elif number > 1024:
number = number/1024
number = str(number) + ' KBytes'
else:
number = str(number) + ' Bytes'
return number
def outputIndex(self,file,users = None):
self.__openFile(file)
self.writeHeader()
self.writeBody(users)
self.writeFooter()
self.closeFile()
|
steakunderscore/Bandwidth-Monitoring
|
src/webInterface.py
|
Python
|
gpl-3.0
| 3,059 | 0.010134 |
import platform
# -----------------------------------------------------------------------------
# Guess platform we are running on
def current_platform():
machine = platform.machine()
if machine == 'armv5tejl':
return 'ev3'
elif machine == 'armv6l':
return 'brickpi'
else:
return 'unsupported'
if current_platform() == 'brickpi':
from .brickpi import *
else:
# Import ev3 by default, so that it is covered by documentation.
from .ev3 import *
|
ddemidov/ev3dev-lang-python-1
|
ev3dev/auto.py
|
Python
|
mit
| 497 | 0.004024 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import requests
import sys
import tempfile
import zipfile
from . import Command
class Deploy(Command):
"""Deploy a module on an Odoo instance"""
def __init__(self):
super(Deploy, self).__init__()
self.session = requests.session()
def deploy_module(self, module_path, url, login, password, db='', force=False):
url = url.rstrip('/')
csrf_token = self.authenticate(url, login, password, db)
module_file = self.zip_module(module_path)
try:
return self.upload_module(url, module_file, force=force, csrf_token=csrf_token)
finally:
os.remove(module_file)
def upload_module(self, server, module_file, force=False, csrf_token=None):
print("Uploading module file...")
url = server + '/base_import_module/upload'
post_data = {'force': '1' if force else ''}
if csrf_token: post_data['csrf_token'] = csrf_token
with open(module_file, 'rb') as f:
res = self.session.post(url, files={'mod_file': f}, data=post_data)
res.raise_for_status()
return res.text
def authenticate(self, server, login, password, db=''):
print("Authenticating on server '%s' ..." % server)
# Fixate session with a given db if any
self.session.get(server + '/web/login', params=dict(db=db))
args = dict(login=login, password=password, db=db)
res = self.session.post(server + '/base_import_module/login', args)
if res.status_code == 404:
raise Exception("The server '%s' does not have the 'base_import_module' installed." % server)
elif res.status_code != 200:
raise Exception(res.text)
return res.headers.get('x-csrf-token')
def zip_module(self, path):
path = os.path.abspath(path)
if not os.path.isdir(path):
raise Exception("Could not find module directory '%s'" % path)
container, module_name = os.path.split(path)
temp = tempfile.mktemp(suffix='.zip')
try:
print("Zipping module directory...")
with zipfile.ZipFile(temp, 'w') as zfile:
for root, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
zfile.write(file_path, file_path.split(container).pop())
return temp
except Exception:
os.remove(temp)
raise
def run(self, cmdargs):
parser = argparse.ArgumentParser(
prog="%s deploy" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__
)
parser.add_argument('path', help="Path of the module to deploy")
parser.add_argument('url', nargs='?', help='Url of the server (default=http://localhost:8069)', default="http://localhost:8069")
parser.add_argument('--db', dest='db', help='Database to use if server does not use db-filter.')
parser.add_argument('--login', dest='login', default="admin", help='Login (default=admin)')
parser.add_argument('--password', dest='password', default="admin", help='Password (default=admin)')
parser.add_argument('--verify-ssl', action='store_true', help='Verify SSL certificate')
parser.add_argument('--force', action='store_true', help='Force init even if module is already installed. (will update `noupdate="1"` records)')
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
if not args.verify_ssl:
self.session.verify = False
try:
if not args.url.startswith(('http://', 'https://')):
args.url = 'https://%s' % args.url
result = self.deploy_module(args.path, args.url, args.login, args.password, args.db, force=args.force)
print(result)
except Exception, e:
sys.exit("ERROR: %s" % e)
|
vileopratama/vitech
|
src/openerp/cli/deploy.py
|
Python
|
mit
| 4,038 | 0.003219 |
#!/usr/bin/python
#
# Urwid html fragment output wrapper for "screen shots"
# Copyright (C) 2004-2007 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
HTML PRE-based UI implementation
"""
from urwid import util
from urwid.main_loop import ExitMainLoop
from urwid.display_common import AttrSpec, BaseScreen
# replace control characters with ?'s
_trans_table = "?" * 32 + "".join([chr(x) for x in range(32, 256)])
_default_foreground = 'black'
_default_background = 'light gray'
class HtmlGeneratorSimulationError(Exception):
pass
class HtmlGenerator(BaseScreen):
# class variables
fragments = []
sizes = []
keys = []
started = True
def __init__(self):
super(HtmlGenerator, self).__init__()
self.colors = 16
self.bright_is_bold = False # ignored
self.has_underline = True # ignored
self.register_palette_entry(None,
_default_foreground, _default_background)
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
if colors is None:
colors = self.colors
if bright_is_bold is None:
bright_is_bold = self.bright_is_bold
if has_underline is None:
has_underline = self.has_underline
self.colors = colors
self.bright_is_bold = bright_is_bold
self.has_underline = has_underline
def set_mouse_tracking(self, enable=True):
"""Not yet implemented"""
pass
def start(self):
pass
def stop(self):
pass
def set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def run_wrapper(self,fn):
"""Call fn."""
return fn()
def draw_screen(self, (cols, rows), r ):
"""Create an html fragment from the render object.
Append it to HtmlGenerator.fragments list.
"""
# collect output in l
l = []
assert r.rows() == rows
if r.cursor is not None:
cx, cy = r.cursor
else:
cx = cy = None
y = -1
for row in r.content():
y += 1
col = 0
for a, cs, run in row:
run = run.translate(_trans_table)
if isinstance(a, AttrSpec):
aspec = a
else:
aspec = self._palette[a][
{1: 1, 16: 0, 88:2, 256:3}[self.colors]]
if y == cy and col <= cx:
run_width = util.calc_width(run, 0,
len(run))
if col+run_width > cx:
l.append(html_span(run,
aspec, cx-col))
else:
l.append(html_span(run, aspec))
col += run_width
else:
l.append(html_span(run, aspec))
l.append("\n")
# add the fragment to the list
self.fragments.append( "<pre>%s</pre>" % "".join(l) )
def clear(self):
"""
Force the screen to be completely repainted on the next
call to draw_screen().
(does nothing for html_fragment)
"""
pass
def get_cols_rows(self):
"""Return the next screen size in HtmlGenerator.sizes."""
if not self.sizes:
raise HtmlGeneratorSimulationError, "Ran out of screen sizes to return!"
return self.sizes.pop(0)
def get_input(self, raw_keys=False):
"""Return the next list of keypresses in HtmlGenerator.keys."""
if not self.keys:
raise ExitMainLoop()
if raw_keys:
return (self.keys.pop(0), [])
return self.keys.pop(0)
_default_aspec = AttrSpec(_default_foreground, _default_background)
(_d_fg_r, _d_fg_g, _d_fg_b, _d_bg_r, _d_bg_g, _d_bg_b) = (
_default_aspec.get_rgb_values())
def html_span(s, aspec, cursor = -1):
fg_r, fg_g, fg_b, bg_r, bg_g, bg_b = aspec.get_rgb_values()
# use real colours instead of default fg/bg
if fg_r is None:
fg_r, fg_g, fg_b = _d_fg_r, _d_fg_g, _d_fg_b
if bg_r is None:
bg_r, bg_g, bg_b = _d_bg_r, _d_bg_g, _d_bg_b
html_fg = "#%02x%02x%02x" % (fg_r, fg_g, fg_b)
html_bg = "#%02x%02x%02x" % (bg_r, bg_g, bg_b)
if aspec.standout:
html_fg, html_bg = html_bg, html_fg
extra = (";text-decoration:underline" * aspec.underline +
";font-weight:bold" * aspec.bold)
def html_span(fg, bg, s):
if not s: return ""
return ('<span style="color:%s;'
'background:%s%s">%s</span>' %
(fg, bg, extra, html_escape(s)))
if cursor >= 0:
c_off, _ign = util.calc_text_pos(s, 0, len(s), cursor)
c2_off = util.move_next_char(s, c_off, len(s))
return (html_span(html_fg, html_bg, s[:c_off]) +
html_span(html_bg, html_fg, s[c_off:c2_off]) +
html_span(html_fg, html_bg, s[c2_off:]))
else:
return html_span(html_fg, html_bg, s)
def html_escape(text):
"""Escape text so that it will be displayed safely within HTML"""
text = text.replace('&','&')
text = text.replace('<','<')
text = text.replace('>','>')
return text
def screenshot_init( sizes, keys ):
"""
Replace curses_display.Screen and raw_display.Screen class with
HtmlGenerator.
Call this function before executing an application that uses
curses_display.Screen to have that code use HtmlGenerator instead.
sizes -- list of ( columns, rows ) tuples to be returned by each call
to HtmlGenerator.get_cols_rows()
keys -- list of lists of keys to be returned by each call to
HtmlGenerator.get_input()
Lists of keys may include "window resize" to force the application to
call get_cols_rows and read a new screen size.
For example, the following call will prepare an application to:
1. start in 80x25 with its first call to get_cols_rows()
2. take a screenshot when it calls draw_screen(..)
3. simulate 5 "down" keys from get_input()
4. take a screenshot when it calls draw_screen(..)
5. simulate keys "a", "b", "c" and a "window resize"
6. resize to 20x10 on its second call to get_cols_rows()
7. take a screenshot when it calls draw_screen(..)
8. simulate a "Q" keypress to quit the application
screenshot_init( [ (80,25), (20,10) ],
[ ["down"]*5, ["a","b","c","window resize"], ["Q"] ] )
"""
try:
for (row,col) in sizes:
assert type(row) == int
assert row>0 and col>0
except (AssertionError, ValueError):
raise Exception, "sizes must be in the form [ (col1,row1), (col2,row2), ...]"
try:
for l in keys:
assert type(l) == list
for k in l:
assert type(k) == str
except (AssertionError, ValueError):
raise Exception, "keys must be in the form [ [keyA1, keyA2, ..], [keyB1, ..], ...]"
import curses_display
curses_display.Screen = HtmlGenerator
import raw_display
raw_display.Screen = HtmlGenerator
HtmlGenerator.sizes = sizes
HtmlGenerator.keys = keys
def screenshot_collect():
"""Return screenshots as a list of HTML fragments."""
l = HtmlGenerator.fragments
HtmlGenerator.fragments = []
return l
|
suizokukan/urwid
|
urwid/html_fragment.py
|
Python
|
lgpl-2.1
| 8,175 | 0.005505 |
from django.contrib import admin
# from models import Agent, ReCa, Accomodation, Beach, Activity, Contact
#
# @admin.register(ReCa, Activity)
# class VenueAdmin(admin.ModelAdmin):
# list_display = ('name', 'internal_rating', 'ready', 'description',)
# list_filter = ('ready', 'internal_rating',)
# search_fields = ['name', 'description', 'address']
# ordering = ['id']
# save_on_top = True
#
#
# @admin.register(Accomodation)
# class AccomodAdmin(VenueAdmin):
# list_display = ('name', 'stars', 'ready', 'description',)
# list_filter = ('ready', 'stars',)
#
#
# @admin.register(Beach)
# class BeachAdmin(admin.ModelAdmin):
# list_display = ('name', 'type', 'description',)
# list_filter = ('name',)
#
#
# admin.site.register(Agent)
# admin.site.register(Contact)
#
#
|
popara/jonny-api
|
matching/admin.py
|
Python
|
mit
| 786 | 0.001272 |
"""Implementation of JSONEncoder
"""
from __future__ import absolute_import
import re
from operator import itemgetter
# Do not import Decimal directly to avoid reload issues
import decimal
from .compat import u, unichr, binary_type, string_types, integer_types, PY3
def _import_speedups():
try:
from . import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
# This is required because u() will mangle the string and ur'' isn't valid
# python3 syntax
ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
for i in [0x2028, 0x2029]:
ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s, _PY3=PY3, _q=u('"')):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q
def py_encode_basestring_ascii(s, _PY3=PY3):
"""Return an ASCII-only JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True, bigint_as_string=False,
item_sort_key=None, for_json=False, ignore_nan=False,
int_as_string_bitcount=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), objects with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
If bigint_as_string is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If int_as_string_bitcount is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, item_sort_key is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key.
If for_json is true (not the default), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized
as ``null`` in compliance with the ECMA-262 specification. If true,
this will override *allow_nan*.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
self.bigint_as_string = bigint_as_string
self.item_sort_key = item_sort_key
self.for_json = for_json
self.ignore_nan = ignore_nan
self.int_as_string_bitcount = int_as_string_bitcount
if indent is not None and not isinstance(indent, string_types):
indent = indent * ' '
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, binary_type):
_encoding = self.encoding
if (_encoding is not None and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if isinstance(o, string_types):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, binary_type):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, ignore_nan=self.ignore_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
if type(o) != float:
# See #118, do not trust custom str/repr
o = float(o)
return _repr(o)
if ignore_nan:
text = 'null'
elif not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
int_as_string_bitcount = (
53 if self.bigint_as_string else self.int_as_string_bitcount)
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
self.ignore_nan, decimal.Decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
Decimal=decimal.Decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
_int_as_string_bitcount, _item_sort_key,
_encoding,_for_json,
## HACK: hand-optimized bytecode; turn globals into locals
_PY3=PY3,
ValueError=ValueError,
string_types=string_types,
Decimal=None,
dict=dict,
float=float,
id=id,
integer_types=integer_types,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
):
if _use_decimal and Decimal is None:
Decimal = decimal.Decimal
if _item_sort_key and not callable(_item_sort_key):
raise TypeError("item_sort_key must be None or callable")
elif _sort_keys and not _item_sort_key:
_item_sort_key = itemgetter(0)
if (_int_as_string_bitcount is not None and
(_int_as_string_bitcount <= 0 or
not isinstance(_int_as_string_bitcount, integer_types))):
raise TypeError("int_as_string_bitcount must be a positive integer")
def _encode_int(value):
skip_quoting = (
_int_as_string_bitcount is None
or
_int_as_string_bitcount < 1
)
if type(value) not in integer_types:
# See #118, do not trust custom str/repr
value = int(value)
if (
skip_quoting or
(-1 << _int_as_string_bitcount)
< value <
(1 << _int_as_string_bitcount)
):
return str(value)
return '"' + str(value) + '"'
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, integer_types):
yield buf + _encode_int(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif isinstance(key, binary_type):
key = key.decode(_encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
if type(key) not in integer_types:
# See #118, do not trust custom str/repr
key = int(key)
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError("key " + repr(key) + " is not a string")
return key
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _PY3:
iteritems = dct.items()
else:
iteritems = dct.iteritems()
if _item_sort_key:
items = []
for k, v in dct.items():
if not isinstance(k, string_types):
k = _stringify_key(k)
if k is None:
continue
items.append((k, v))
items.sort(key=_item_sort_key)
else:
items = iteritems
for key, value in items:
if not (_item_sort_key or isinstance(key, string_types)):
key = _stringify_key(key)
if key is None:
# _skipkeys must be True
continue
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, integer_types):
yield _encode_int(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if (isinstance(o, string_types) or
(_PY3 and isinstance(o, binary_type))):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, integer_types):
yield _encode_int(o)
elif isinstance(o, float):
yield _floatstr(o)
else:
for_json = _for_json and getattr(o, 'for_json', None)
if for_json and callable(for_json):
for chunk in _iterencode(for_json(), _current_indent_level):
yield chunk
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
if _asdict and callable(_asdict):
for chunk in _iterencode_dict(_asdict(),
_current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
samvarankashyap/googlecloudutility2
|
lib/simplejson/simplejson/encoder.py
|
Python
|
apache-2.0
| 25,806 | 0.001589 |
from copy import deepcopy
import numpy as np
from menpo.image.base import Image
from skimage.transform import pyramid_gaussian
class BooleanImage(Image):
r"""
A mask image made from binary pixels. The region of the image that is
left exposed by the mask is referred to as the 'masked region'. The
set of 'masked' pixels is those pixels corresponding to a True value in
the mask.
Parameters
-----------
mask_data : (M, N, ..., L) ndarray
The binary mask data. Note that there is no channel axis - a 2D Mask
Image is built from just a 2D numpy array of mask_data.
Automatically coerced in to boolean values.
"""
def __init__(self, mask_data):
# Enforce boolean pixels, and add a channel dim
mask_data = np.asarray(mask_data[..., None], dtype=np.bool)
super(BooleanImage, self).__init__(mask_data)
@classmethod
def _init_with_channel(cls, image_data_with_channel):
r"""
Constructor that always requires the image has a
channel on the last axis. Only used by from_vector. By default,
just calls the constructor. Subclasses with constructors that don't
require channel axes need to overwrite this.
"""
return cls(image_data_with_channel[..., 0])
@classmethod
def blank(cls, shape, fill=True, round='ceil', **kwargs):
r"""
Returns a blank :class:`BooleanImage` of the requested shape
Parameters
----------
shape : tuple or list
The shape of the image. Any floating point values are rounded
according to the ``round`` kwarg.
fill : True or False, optional
The mask value to be set everywhere
Default: True (masked region is the whole image - meaning the whole
image is exposed)
round: {'ceil', 'floor', 'round'}
Rounding function to be applied to floating point shapes.
Default: 'ceil'
Returns
-------
blank_image : :class:`BooleanImage`
A blank mask of the requested size
"""
if round not in ['ceil', 'round', 'floor']:
raise ValueError('round must be either ceil, round or floor')
# Ensure that the '+' operator means concatenate tuples
shape = tuple(getattr(np, round)(shape))
if fill:
mask = np.ones(shape, dtype=np.bool)
else:
mask = np.zeros(shape, dtype=np.bool)
return cls(mask)
@property
def mask(self):
r"""
Returns the pixels of the mask with no channel axis. This is what
should be used to mask any k-dimensional image.
:type: (M, N, ..., L), np.bool ndarray
"""
return self.pixels[..., 0]
@property
def n_true(self):
r"""
The number of ``True`` values in the mask
:type: int
"""
return np.sum(self.pixels)
@property
def n_false(self):
r"""
The number of ``False`` values in the mask
:type: int
"""
return self.n_pixels - self.n_true
@property
def proportion_true(self):
r"""
The proportion of the mask which is ``True``
:type: double
"""
return (self.n_true * 1.0) / self.n_pixels
@property
def proportion_false(self):
r"""
The proportion of the mask which is ``False``
:type: double
"""
return (self.n_false * 1.0) / self.n_pixels
@property
def true_indices(self):
r"""
The indices of pixels that are true.
:type: (``n_dims``, ``n_true``) ndarray
"""
# Ignore the channel axis
return np.vstack(np.nonzero(self.pixels[..., 0])).T
@property
def false_indices(self):
r"""
The indices of pixels that are false.
:type: (``n_dims``, ``n_false``) ndarray
"""
# Ignore the channel axis
return np.vstack(np.nonzero(~self.pixels[..., 0])).T
@property
def all_indices(self):
r"""
Indices into all pixels of the mask, as consistent with
true_indices and false_indices
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return np.indices(self.shape).reshape([self.n_dims, -1]).T
def __str__(self):
return ('{} {}D mask, {:.1%} '
'of which is True '.format(self._str_shape, self.n_dims,
self.proportion_true))
def from_vector(self, flattened):
r"""
Takes a flattened vector and returns a new
:class:`BooleanImage` formed by
reshaping the vector to the correct dimensions. Note that this is
rebuilding a boolean image **itself** from boolean values. The mask
is in no way interpreted in performing the operation, in contrast to
MaskedImage, where only the masked region is used in from_vector()
and as_vector(). Any image landmarks are transferred in the process.
Parameters
----------
flattened : (``n_pixels``,) np.bool ndarray
A flattened vector of all the pixels of a BooleanImage.
Returns
-------
image : :class:`BooleanImage`
New BooleanImage of same shape as this image
"""
mask = BooleanImage(flattened.reshape(self.shape))
mask.landmarks = self.landmarks
return mask
def invert(self):
r"""
Inverts the current mask in place, setting all True values to False,
and all False values to True.
"""
self.pixels = ~self.pixels
def inverted_copy(self):
r"""
Returns a copy of this Boolean image, which is inverted.
Returns
-------
inverted_image: :class:`BooleanNSImage`
An inverted copy of this boolean image.
"""
inverse = deepcopy(self)
inverse.invert()
return inverse
def bounds_true(self, boundary=0, constrain_to_bounds=True):
r"""
Returns the minimum to maximum indices along all dimensions that the
mask includes which fully surround the True mask values. In the case
of a 2D Image for instance, the min and max define two corners of a
rectangle bounding the True pixel values.
Parameters
----------
boundary : int, optional
A number of pixels that should be added to the extent. A
negative value can be used to shrink the bounds in.
Default: 0
constrain_to_bounds: bool, optional
If True, the bounding extent is snapped to not go beyond
the edge of the image. If False, the bounds are left unchanged.
Default: True
Returns
--------
min_b : (D,) ndarray
The minimum extent of the True mask region with the boundary
along each dimension. If constrain_to_bounds was True,
is clipped to legal image bounds.
max_b : (D,) ndarray
The maximum extent of the True mask region with the boundary
along each dimension. If constrain_to_bounds was True,
is clipped to legal image bounds.
"""
mpi = self.true_indices
maxes = np.max(mpi, axis=0) + boundary
mins = np.min(mpi, axis=0) - boundary
if constrain_to_bounds:
maxes = self.constrain_points_to_bounds(maxes)
mins = self.constrain_points_to_bounds(mins)
return mins, maxes
def bounds_false(self, boundary=0, constrain_to_bounds=True):
r"""
Returns the minimum to maximum indices along all dimensions that the
mask includes which fully surround the False mask values. In the case
of a 2D Image for instance, the min and max define two corners of a
rectangle bounding the False pixel values.
Parameters
----------
boundary : int >= 0, optional
A number of pixels that should be added to the extent. A
negative value can be used to shrink the bounds in.
Default: 0
constrain_to_bounds: bool, optional
If True, the bounding extent is snapped to not go beyond
the edge of the image. If False, the bounds are left unchanged.
Default: True
Returns
--------
min_b : (D,) ndarray
The minimum extent of the False mask region with the boundary
along each dimension. If constrain_to_bounds was True,
is clipped to legal image bounds.
max_b : (D,) ndarray
The maximum extent of the False mask region with the boundary
along each dimension. If constrain_to_bounds was True,
is clipped to legal image bounds.
"""
return self.inverted_copy().bounds_true(
boundary=boundary, constrain_to_bounds=constrain_to_bounds)
def warp_to(self, template_mask, transform, warp_landmarks=False,
interpolator='scipy', **kwargs):
r"""
Warps this BooleanImage into a different reference space.
Parameters
----------
template_mask : :class:`menpo.image.boolean.BooleanImage`
Defines the shape of the result, and what pixels should be
sampled.
transform : :class:`menpo.transform.base.Transform`
Transform **from the template space back to this image**.
Defines, for each True pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : bool, optional
If ``True``, warped_image will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
Default: ``False``
interpolator : 'scipy' or 'c', optional
The interpolator that should be used to perform the warp.
Default: 'scipy'
kwargs : dict
Passed through to the interpolator. See `menpo.interpolation`
for details.
Returns
-------
warped_image : type(self)
A copy of this image, warped.
"""
# enforce the order as 0, for this boolean data, then call super
manually_set_order = kwargs.get('order', 0)
if manually_set_order != 0:
raise ValueError(
"The order of the interpolation on a boolean image has to be "
"0 (attempted to set {})".format(manually_set_order))
kwargs['order'] = 0
return Image.warp_to(self, template_mask, transform,
warp_landmarks=warp_landmarks,
interpolator=interpolator, **kwargs)
def _build_warped_image(self, template_mask, sampled_pixel_values,
**kwargs):
r"""
Builds the warped image from the template mask and
sampled pixel values. Overridden for BooleanImage as we can't use
the usual from_vector_inplace method.
"""
warped_image = BooleanImage.blank(template_mask.shape)
# As we are a mask image, we have to implement the update a little
# more manually than other image classes.
warped_image.pixels[warped_image.mask] = sampled_pixel_values
return warped_image
|
jabooth/menpo-archive
|
menpo/image/boolean.py
|
Python
|
bsd-3-clause
| 11,453 | 0 |
from buildbot.status import tests
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
from buildbot.process.step_twisted import RunUnitTests
from zope.interface import implements
from twisted.python import log, failure
from twisted.spread import jelly
from twisted.pb.tokens import BananaError
from twisted.web.html import PRE
from twisted.web.error import NoResource
class Null: pass
ResultTypes = Null()
ResultTypeNames = ["SKIP",
"EXPECTED_FAILURE", "FAILURE", "ERROR",
"UNEXPECTED_SUCCESS", "SUCCESS"]
try:
from twisted.trial import reporter # introduced in Twisted-1.0.5
# extract the individual result types
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(reporter, name))
except ImportError:
from twisted.trial import unittest # Twisted-1.0.4 has them here
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(unittest, name))
log._keepErrors = 0
from twisted.trial import remote # for trial/jelly parsing
import StringIO
class OneJellyTest(tests.OneTest):
def html(self, request):
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
if isinstance(self.results, failure.Failure):
# it would be nice to remove unittest functions from the
# traceback like unittest.format_exception() does.
if t == 'short':
s = StringIO.StringIO()
self.results.printTraceback(s)
return pptpl % PRE(s.getvalue())
elif t == 'long':
s = StringIO.StringIO()
self.results.printDetailedTraceback(s)
return pptpl % PRE(s.getvalue())
#elif t == 'html':
# return tpl % formatFailure(self.results)
# ACK! source lines aren't stored in the Failure, rather,
# formatFailure pulls them (by filename) from the local
# disk. Feh. Even printTraceback() won't work. Double feh.
return NoResource("No such mode '%s'" % t)
if self.results == None:
return tpl % "No results to show: test probably passed."
# maybe results are plain text?
return pptpl % PRE(self.results)
class TwistedJellyTestResults(tests.TestResults):
oneTestClass = OneJellyTest
def describeOneTest(self, testname):
return "%s: %s\n" % (testname, self.tests[testname][0])
class RunUnitTestsJelly(RunUnitTests):
"""I run the unit tests with the --jelly option, which generates
machine-parseable results as the tests are run.
"""
trialMode = "--jelly"
implements(remote.IRemoteReporter)
ourtypes = { ResultTypes.SKIP: tests.SKIP,
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
ResultTypes.FAILURE: tests.FAILURE,
ResultTypes.ERROR: tests.ERROR,
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
ResultTypes.SUCCESS: tests.SUCCESS,
}
def __getstate__(self):
#d = RunUnitTests.__getstate__(self)
d = self.__dict__.copy()
# Banana subclasses are Ephemeral
if d.has_key("decoder"):
del d['decoder']
return d
def start(self):
self.decoder = remote.DecodeReport(self)
# don't accept anything unpleasant from the (untrusted) build slave
# The jellied stream may have Failures, but everything inside should
# be a string
security = jelly.SecurityOptions()
security.allowBasicTypes()
security.allowInstancesOf(failure.Failure)
self.decoder.taster = security
self.results = TwistedJellyTestResults()
RunUnitTests.start(self)
def logProgress(self, progress):
# XXX: track number of tests
BuildStep.logProgress(self, progress)
def addStdout(self, data):
if not self.decoder:
return
try:
self.decoder.dataReceived(data)
except BananaError:
self.decoder = None
log.msg("trial --jelly output unparseable, traceback follows")
log.deferr()
def remote_start(self, expectedTests, times=None):
print "remote_start", expectedTests
def remote_reportImportError(self, name, aFailure, times=None):
pass
def remote_reportStart(self, testClass, method, times=None):
print "reportStart", testClass, method
def remote_reportResults(self, testClass, method, resultType, results,
times=None):
print "reportResults", testClass, method, resultType
which = testClass + "." + method
self.results.addTest(which,
self.ourtypes.get(resultType, tests.UNKNOWN),
results)
def finished(self, rc):
# give self.results to our Build object
self.build.testsFinished(self.results)
total = self.results.countTests()
count = self.results.countFailures()
result = SUCCESS
if total == None:
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
if count:
result = (FAILURE, ["%d tes%s%s" % (count,
(count == 1 and 't' or 'ts'),
self.rtext(' (%s)'))])
return self.stepComplete(result)
def finishStatus(self, result):
total = self.results.countTests()
count = self.results.countFailures()
color = "green"
text = []
if count == 0:
text.extend(["%d %s" % \
(total,
total == 1 and "test" or "tests"),
"passed"])
else:
text.append("tests")
text.append("%d %s" % \
(count,
count == 1 and "failure" or "failures"))
color = "red"
self.updateCurrentActivity(color=color, text=text)
self.addFileToCurrentActivity("tests", self.results)
#self.finishStatusSummary()
self.finishCurrentActivity()
|
gward/buildbot
|
buildbot/process/step_twisted2.py
|
Python
|
gpl-2.0
| 6,316 | 0.004275 |
#!/usr/bin/env python3
"""
Calculate minor reads coverage.
Minor-read ratio (MRR), which was defined as the ratio of reads for the less
covered allele (reference or variant allele) over the total number of reads
covering the position at which the variant was called. (Only applied to hetero sites.)
@Author: wavefancy@gmail.com
Usage:
MinorReadsCoverage.py (-o| -f cutoff)
MinorReadsCoverage.py -h | --help | -v | --version
Notes:
1. Read vcf file from stdin.
2. MinorReadsCoverage only calculated from hetero sites.
3. Output results to stdout.
Options:
-o Output MinorReadsCoverage statistics.
-f cutoff Filter out sites if MRC < cutoff.
-t tags Comma separated tag list.
-h --help Show this screen.
-v --version Show version.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
# if(args['--format']):
# ShowFormat()
# sys.exit(-1)
from pysam import VariantFile
vcfMetaCols=9 #number of colummns for vcf meta information.
tags = ['GT','AD'] #GATK, AD: reads depth for ref and alt allele.
cutoff = 1
if args['-f']:
cutoff = float(args['-f'])
# def depth(geno):
# '''reformat a genotype record'''
# ss = geno.split(':')
# if ss[outGenoArrayIndex[0]][0] != '.' and :
#
#
# try:
# out = [ss[x] for x in outGenoArrayIndex]
# return out
# except IndexError:
# sys.stderr.write('ERROR: Index out of range. geno: %s, out index: %s\n'%(geno, str(outGenoArrayIndex)))
# sys.exit(-1)
outGenoArrayIndex = []
def setoutGenoArrayIndex(oldFormatTags):
outGenoArrayIndex.clear()
ss = oldFormatTags.upper().split(':')
for x in tags:
try:
y = ss.index(x)
outGenoArrayIndex.append(y)
except ValueError:
sys.stderr.write('ERROR: can not find tag: "%s", from input vcf FORMAT field.\n'%(x))
sys.exit(-1)
infile = VariantFile('-', 'r')
if args['-f']:
sys.stdout.write(str(infile.header))
if args['-o']:
sys.stdout.write('#CHROM\tPOS\tREF\tALT\tMRR\n')
for line in infile:
ss = str(line).strip().split()
setoutGenoArrayIndex(ss[8]) #Check format line by line.
ref = 0
alt = 0
for x in ss[vcfMetaCols:]:
#if not outGenoArrayIndex:
# setoutGenoArrayIndex(ss[8])
#out.append(reformat(x))
temp = x.split(':')
if temp[outGenoArrayIndex[0]][0] != '.' and temp[outGenoArrayIndex[0]][0] != temp[outGenoArrayIndex[0]][2]:
ad =[int(y) for y in temp[outGenoArrayIndex[1]].split(',')]
ref += ad[0]
alt += sum(ad[1:])
out = ss[:2] + ss[3:5]
mrc = 1
if ref == 0 and alt == 0:
mrc = 1
else:
minor = min(alt*1.0/(alt + ref), ref*1.0/(alt + ref))
mrc = minor
if args['-o']:
out = ss[:2] + ss[3:5] + ['%.4f'%(mrc)]
sys.stdout.write('%s\n'%('\t'.join(out)))
if args['-f']:
if mrc >= cutoff:
sys.stdout.write('%s'%(str(line)))
infile.close()
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
wavefancy/BIDMC-PYTHON
|
Exome/MinorReadsCoverage/MinorReadsCoverage.py
|
Python
|
mit
| 3,687 | 0.006509 |
# -*- coding: utf-8 -*-
# pylint: disable=no-init
"""
Django settings for home_web project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import re
from celery.schedules import crontab
from configurations import Configuration, values
class CeleryBrokerURLValue(values.Value):
"""
Value subclass that converts 'unix://' scheme to 'redis+socket://'.
"""
def to_python(self, value):
return re.sub(
r'^unix://', 'redis+socket://', super().to_python(value)
)
class Common(Configuration):
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5w$77!lmo&g)e5j6uhl4i2=nffnnj0y1y07(9@-f)@b7*g%+sd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INTERNAL_IPS = [
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'core.apps.CoreConfig',
'heating.apps.HeatingConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'home_web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'home_web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = values.DatabaseURLValue(
'sqlite:///{}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))
)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'fr-FR'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
REDIS_URL = values.Value()
CELERY_BROKER_URL = CeleryBrokerURLValue(environ_name='REDIS_URL')
CELERY_TASK_ROUTES = {
'heating.tasks.*': {'queue': 'celery', 'delivery_mode': 'transient'},
}
CELERY_BEAT_SCHEDULE = {
'update-pilotwire-status': {
'task': 'heating.pilotwire.update_status',
'schedule': 60,
},
'set-pilotwire-modes': {
'task': 'heating.pilotwire.set_modes',
'schedule': crontab(minute='*/15'),
},
'weekly-clear-old-derogations': {
'task': 'heating.tasks.clearoldderogations',
'schedule': crontab(minute=0, hour=0, day_of_week='mon'),
'args': (7,),
},
}
CELERY_TIME_ZONE = TIME_ZONE
PILOTWIRE_IP = values.IPValue()
PILOTWIRE_PORT = values.IntegerValue()
class Dev(Common):
"""
The in-development settings and the default configuration
"""
INSTALLED_APPS = Common.INSTALLED_APPS + [
'debug_toolbar',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
] + Common.MIDDLEWARE
class Test(Common):
"""
The testing settings
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'pilotwire_testing_handler': {
'level': 'INFO',
'class': 'heating.log.PilotwireHandler',
'logLength': 5,
},
},
'loggers': {
'pilotwire_testing_logger': {
'handlers': ['pilotwire_testing_handler'],
'level': 'INFO',
},
},
}
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
ADMINS = [('Test', 'test@example.com')]
class Prod(Common):
"""
The in-production settings
"""
DEBUG = False
SECRET_KEY = values.SecretValue()
ADMINS = values.SingleNestedTupleValue()
ALLOWED_HOSTS = values.ListValue()
DATABASES = values.DatabaseURLValue()
EMAIL = values.EmailURLValue()
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
}
STATIC_ROOT = values.PathValue()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'pilotwire_handler': {
'level': 'INFO',
'class': 'heating.log.PilotwireHandler',
'logLength': 500,
},
},
'loggers': {
'heating.pilotwire': {
'handlers': ['pilotwire_handler'],
'level': 'INFO',
},
},
}
# Authentication
AUTHENTICATION_BACKENDS = [
'core.auth.backends.SettingsBackend',
] + Common.AUTHENTICATION_BACKENDS # pylint: disable=no-member
ADMIN_LOGIN = values.Value()
ADMIN_PASSWORD = values.SecretValue()
|
cailloumajor/home-web
|
backend/home_web/settings.py
|
Python
|
gpl-3.0
| 7,131 | 0 |
#!/usr/bin/env python
import sys
def gen_test(n):
print "CREATE TABLE t (a CHAR(%d));" % (n)
for v in [ 'hi', 'there', 'people' ]:
print "INSERT INTO t VALUES ('%s');" % (v)
for i in range(2,256):
if i < n:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
else:
print "CREATE TABLE ti LIKE t;"
print "ALTER TABLE ti ENGINE=myisam;"
print "INSERT INTO ti SELECT * FROM t;"
print "ALTER TABLE ti CHANGE COLUMN a a CHAR(%d);" % (i)
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d);" % (i)
if i >= n:
print "let $diff_tables=test.t, test.ti;"
print "source include/diff_tables.inc;"
print "DROP TABLE ti;"
print "DROP TABLE t;"
def main():
print "source include/have_tokudb.inc;"
print "# this test is generated by change_char.py"
print "# test char expansion"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t,ti;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
# all n takes too long to run, so here is a subset of tests
for n in [ 1, 2, 3, 4, 5, 6, 7, 8, 16, 31, 32, 63, 64, 127, 128, 254, 255 ]:
gen_test(n)
return 0
sys.exit(main())
|
tplavcic/percona-xtradb-cluster
|
mysql-test/suite/tokudb/t/change_column_char.py
|
Python
|
gpl-2.0
| 1,383 | 0.006508 |
from django.contrib import admin
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
@admin.register(AssignmentGroupCachedData)
class AssignmentGroupCachedDataAdmin(admin.ModelAdmin):
list_display = [
'id',
'group',
'first_feedbackset',
'last_feedbackset',
'last_published_feedbackset',
'new_attempt_count',
'public_total_comment_count',
'public_student_comment_count',
'public_examiner_comment_count',
'public_admin_comment_count',
'public_student_file_upload_count',
'examiner_count',
'candidate_count'
]
search_fields = [
'id',
'group__id',
'group__parentnode__id',
'group__parentnode__short_name',
'group__parentnode__long_name',
'group__parentnode__parentnode__id',
'group__parentnode__parentnode__short_name',
'group__parentnode__parentnode__long_name',
'group__parentnode__parentnode__parentnode__id',
'group__parentnode__parentnode__parentnode__short_name',
'group__parentnode__parentnode__parentnode__long_name',
'group__candidates__relatedstudent__candidate_id',
'group__candidates__relatedstudent__candidate_id',
'group__candidates__relatedstudent__user__shortname',
'group__candidates__relatedstudent__user__fullname',
'group__examiners__relatedexaminer__user__shortname',
'group__examiners__relatedexaminer__user__fullname',
]
|
devilry/devilry-django
|
devilry/devilry_dbcache/admin.py
|
Python
|
bsd-3-clause
| 1,516 | 0 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTimestamp
# Returns the current date and time, expressed as seconds or milliseconds since January 1, 1970 (epoch time).
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTimestamp(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTimestamp Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTimestamp, self).__init__(temboo_session, '/Library/Utilities/Dates/GetTimestamp')
def new_input_set(self):
return GetTimestampInputSet()
def _make_result_set(self, result, path):
return GetTimestampResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTimestampChoreographyExecution(session, exec_id, path)
class GetTimestampInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTimestamp
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AddDays(self, value):
"""
Set the value of the AddDays input for this Choreo. ((optional, integer) Adds the specified number of days to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddDays', value)
def set_AddHours(self, value):
"""
Set the value of the AddHours input for this Choreo. ((optional, integer) Adds the specified number of hours to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddHours', value)
def set_AddMinutes(self, value):
"""
Set the value of the AddMinutes input for this Choreo. ((optional, integer) Adds the specified number of minutes to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddMinutes', value)
def set_AddMonths(self, value):
"""
Set the value of the AddMonths input for this Choreo. ((optional, integer) Adds the specified number of months to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddMonths', value)
def set_AddSeconds(self, value):
"""
Set the value of the AddSeconds input for this Choreo. ((optional, integer) Adds the specified number of seconds to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddSeconds', value)
def set_AddYears(self, value):
"""
Set the value of the AddYears input for this Choreo. ((optional, integer) Adds the specified number of years to the specified date serial number. A negative number will subtract.)
"""
super(GetTimestampInputSet, self)._set_input('AddYears', value)
def set_Granularity(self, value):
"""
Set the value of the Granularity input for this Choreo. ((optional, string) Set to "seconds" to return the number of seconds since the epoch. Defaults to "milliseconds".)
"""
super(GetTimestampInputSet, self)._set_input('Granularity', value)
def set_SetDay(self, value):
"""
Set the value of the SetDay input for this Choreo. ((optional, integer) Sets the day of month (1–31) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetDay', value)
def set_SetHour(self, value):
"""
Set the value of the SetHour input for this Choreo. ((optional, integer) Sets the hours (0–23) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetHour', value)
def set_SetMinute(self, value):
"""
Set the value of the SetMinute input for this Choreo. ((optional, integer) Sets the minutes (0–59) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetMinute', value)
def set_SetMonth(self, value):
"""
Set the value of the SetMonth input for this Choreo. ((optional, integer) Sets the month (1–12) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetMonth', value)
def set_SetSecond(self, value):
"""
Set the value of the SetSecond input for this Choreo. ((optional, integer) Sets the seconds (0–59) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetSecond', value)
def set_SetYear(self, value):
"""
Set the value of the SetYear input for this Choreo. ((optional, integer) Sets the year (such as 1989) of the specified date serial number.)
"""
super(GetTimestampInputSet, self)._set_input('SetYear', value)
class GetTimestampResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTimestamp Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Timestamp(self):
"""
Retrieve the value for the "Timestamp" output from this Choreo execution. ((date) A the current timestamp, expressed as the number of seconds or milliseconds since January 1, 1970 (epoch time). The Granularity input is used to indicate seconds or milliseconds.)
"""
return self._output.get('Timestamp', None)
class GetTimestampChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTimestampResultSet(response, path)
|
willprice/arduino-sphere-project
|
scripts/example_direction_finder/temboo/Library/Utilities/Dates/GetTimestamp.py
|
Python
|
gpl-2.0
| 6,772 | 0.005472 |
import sys, os
def stop(arv):
pwd = os.getcwd()
# if argv given, folders = [argv]
# else, folders = pwd
### for each folder in folders
##### check pwd/folder/temp/pids for existing pid files
####### kill -15 & rm files
def main():
print "Please don't try to run this script separately."
if __name__ == '__main__':
main()
|
modcracker/Tork
|
tork/core/manage/stop.py
|
Python
|
mit
| 347 | 0.054755 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
class TestListInterface(object):
config = """
templates:
global:
disable: [seen]
tasks:
list_get:
entry_list: test_list
list_1_get:
entry_list: list 1
list_2_get:
entry_list: list 2
test_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: test_list
list_1_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
list_2_add:
mock:
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
accept_all: yes
list_add:
- entry_list: list 2
test_multiple_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
- entry_list: list 2
test_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
test_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
remove_on_match: no
test_multiple_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
test_multiple_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
remove_on_match: no
test_list_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
accept_all: yes
list_remove:
- entry_list: test_list
test_list_reject:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
action: reject
add_for_list_queue:
mock:
- {title: 'The 5th Wave', url: "", imdb_id: "tt2304933"}
- {title: 'Drumline', url: "", imdb_id: "tt0303933"}
accept_all: yes
list_add:
- movie_list: test_list_queue
test_list_queue:
mock:
- {title: 'Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD', url: "http://mock.url/Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent", imdb_id: "tt0303933"}
- {title: 'Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD', url: "http://mock.url/Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent", imdb_id: "tt0303933"}
- {title: 'Drumline 2002 DVDRip x264-FuzerHD', url: "http://mock.url/Drumline 2002 DVDRip x264-FuzerHD.torrent", imdb_id: "tt0303933"}
list_match:
from:
- movie_list: test_list_queue
single_match: yes
get_for_list_queue:
movie_list: test_list_queue
test_list_clear_start:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
test_list_clear_exit:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: exit
test_list_clear_input:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: input
"""
def test_list_add(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_add(self, execute_task):
task = execute_task('test_multiple_list_add')
assert len(task.entries) == 2
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 2
def test_list_accept_with_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_with_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_accept_without_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_without_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_accept_with_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_with_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 0
task = execute_task('list_2_get')
assert len(task.entries) == 0
def test_multiple_list_accept_without_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_without_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
def test_list_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_remove')
assert len(task.accepted) == 1
task = execute_task('list_get')
assert len(task.entries) == 1
def test_list_reject(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_reject')
assert len(task.rejected) == 1
def test_list_queue(self, execute_task):
# List queue test is based off movie_list and not entry_list since it entry_list matching is a
# lot more strict so it doesn't make sense to use it with it
task = execute_task('add_for_list_queue')
assert len(task.entries) == 2
task = execute_task('test_list_queue')
assert len(task.accepted) == 1
assert task.find_entry(title="Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD")
task = execute_task('get_for_list_queue')
assert len(task.entries) == 1
def test_list_clear_start(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_start')
assert len(task.entries) == 0
def test_list_clear_exit(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_exit')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_clear_input(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_input')
assert len(task.entries) == 0
|
oxc/Flexget
|
flexget/tests/test_list_interface.py
|
Python
|
mit
| 9,931 | 0.000503 |
#!/usr/bin/python
###############################################################################
#
#
# Project: ECOOP, sponsored by The National Science Foundation
# Purpose: this code is part of the Cyberinfrastructure developed for the ECOOP project
# http://tw.rpi.edu/web/project/ECOOP
# from the TWC - Tetherless World Constellation
# at RPI - Rensselaer Polytechnic Institute
# founded by NSF
#
# Author: Massimo Di Stefano , distem@rpi.edu -
# http://tw.rpi.edu/web/person/MassimoDiStefano
#
###############################################################################
# Copyright (c) 2008-2014 Tetherless World Constellation at Rensselaer Polytechnic Institute
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from zipfile import ZipFile, ZIP_DEFLATED
from contextlib import closing
import paramiko
import qrcode
from IPython.core.display import HTML, Image
from IPython.display import display, Javascript
import envoy
from datetime import datetime
class shareUtil():
def zipdir(self, basedir, archivename, rm='no'):
"""
utility function to zip a single file or a directory
usage : zipdir(input, output)
@param basedir: input file or directory
@param archivename: output file.zip
@param rm: [yes, no], remove source file (optional, default=no)
"""
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
#print fn
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir) + len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
if rm != 'no':
instruction = 'rm -rf %s' % basedir
os.system(instruction)
def uploadfile(self, username='epi', password='epi', hostname='localhost', port=22,
inputfile=None, outputfile=None, link=False, apacheroot='/var/www/', zip=False, qr=False):
'''
utility to upload file on remote server using sftp protocol
usage : uploadfile(inputfile, outputfile)
@rtype : str
@param username: str - username on remote server
@param password: str - password to access remote server
@param hostname: str - hostname of remote server (default: localhost)
@param port: port number on remote server (default: 22)
@param inputfile: str - local path to the file to uploaded
@param outputfile: remote path to the file to upload
@param link: bolean [True, False] default False, print a link to download the file
(remote path needs to be in a web available directory)
@param apacheroot: path to apache root default to '/var/www/' required if link == True
@param zip: bolean deafault False, zip the output
@param qr: bolean deafault False, return qrcode as image
@return: link to uploaded file if link=True or qr image if qr=True & link=True, none if link is set to false
'''
if zip:
#print 'add zipfile'
zipfile = str(inputfile + '.zip')
self.zipdir(inputfile, zipfile)
inputfile = zipfile
#paramiko.util.log_to_file('/var/www/esr/paramiko.log')
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, username=username, password=password)
transport = paramiko.Transport((hostname, port))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
parts = outputfile.split('/')
for n in range(2, len(parts)):
path = '/'.join(parts[:n])
#print 'Path:', path,
sys.stdout.flush()
try:
s = sftp.stat(path)
#print 'mode =', oct(s.st_mode)
except IOError as e:
#print e
#print 'adding dir: ', path
sftp.mkdir(path)
try:
sftp.put(remotepath=outputfile, localpath=inputfile)
sftp.close()
transport.close()
print 'file uploaded'
if qr:
if link:
pass
if not link:
print 'WORNING: qrcode not generated, set the option link to True'
if link:
filelink = outputfile.replace(apacheroot, '')
link = 'http://' + os.path.normpath(hostname + '/' + filelink)
raw_html = '<a href="%s" target="_blank">ESR results</a>' % link
print 'results are now available for download at : ', link
image = None
if qr:
imagefile = parts[-1].split('.')[0] + '.jpeg'
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)
qr.add_data(link)
qr.make(fit=True)
img = qr.make_image()
img.save(imagefile, "JPEG")
print 'alive'
image = Image(imagefile)
return image
if not qr:
return HTML(raw_html)
except IOError:
print "Error: can\'t find file or read data check if input file exist and or remote location is writable"
def gistit(self, filename, jist='/usr/local/bin/jist', type='notebook'):
'''
use the jist utility to paste a txt file on github as gist and return a link to it
usage : gistit(notebookfile)
@param filename: str - path to the a text file or notebook file (.json)
@param jist: str - path to the executable jist (default=/usr/local/bin/jist)
@param type: str - notebook, text
@return: return a link to gist if type=text, link to nbviewer if type=notebook
'''
try:
with open(filename):
link = None
jist = self.which(jist)
if jist:
try:
r = envoy.run('%s -p %s' % (jist, filename))
if type == 'notebook':
link = r.std_out.replace('\n', '').replace('https://gist.github.com',
'http://nbviewer.ipython.org')
if type == 'text':
link = r.std_out.replace('\n', '')
return link
except:
print "can't generate gist, check if jist works bycommand line with: jist -p filename"
if not jist:
print 'cannot find jist utility, check if it is in your path'
except IOError:
print 'input file %s not found' % filename
def get_id(self, suffix, makedir=True):
'''
generate a directory based on the suffix and a time stamp
output looks like : suffix_Thursday_26_September_2013_06_28_49_PM
usage: getID(suffix)
@param suffix: str - suffix for the directory to be generated,
@return: str - directory name
'''
ID = suffix + '_' + str(datetime.now().utcnow().strftime("%A_%d_%B_%Y_%I_%M_%S_%p"))
if makedir:
self.ensure_dir(ID)
print 'session data directory : ID', ID
return ID
def ensure_dir(self, dir):
'''
make a directory on the file system if it does not exist
usage: ensure_dir(dir)
@param dir: str - path to a directory existent on the local filesystem
@return: None
'''
if not os.path.exists(dir):
os.makedirs(dir)
def save_notebook(self, ID, notebookname, web=None, notebookdir=None):
"""
Save the notebook file as html and or as gist
@param ID: directory name where to store the saved notebook
@param notebookname: name of the notebook
@param web:
@param notebookdir:
"""
if not notebookdir:
notebookdir = os.getcwd()
display(Javascript("IPython.notebook.save_notebook()"))
notebookfile = os.path.join(notebookdir, notebookname)
savedir = os.path.join(os.getcwd(), ID)
command1 = 'cp %s %s' % (notebookfile, savedir)
newnotebook = os.path.join(savedir, notebookname)
command2 = 'ipython nbconvert %s' % newnotebook
os.system(command1)
os.system(command2)
if web:
try:
self.gistit(notebookfile)
except IOError:
print "can't genrate a gist"
def which(self, program):
"""
Check if a program exist and return the full path
@param program: executable name or path to executable
@return: full path to executable
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def getTime(self):
now = datetime.now()
return now
|
epifanio/ecoop
|
ecooputil.py
|
Python
|
lgpl-3.0
| 10,832 | 0.003139 |
#!/usr/bin/env python
""" musings on order of variables, x/y vs. col/row
Everyone agrees that col 2, row 1 is (2,1) which is xy ordered.
This works well with the name.
Remember that the usual iterators (over a list-of-lists)
is outer loop y first."""
from __future__ import absolute_import
import re
import messytables
import os
import six
from six.moves import range
from six.moves import zip
try:
import hamcrest
have_ham = True
except ImportError:
have_ham = False
import sys
if sys.version_info >= (3, 6):
import typing
REGEX_PATTERN_TYPE = typing.Pattern
else:
REGEX_PATTERN_TYPE = re._pattern_type
from collections import defaultdict
from copy import copy
from itertools import product, takewhile
from xypath.contrib import excel as contrib_excel
UP = (0, -1)
RIGHT = (1, 0)
DOWN = (0, 1)
LEFT = (-1, 0)
UP_RIGHT = (1, -1)
DOWN_RIGHT = (1, 1)
UP_LEFT = (-1, -1)
DOWN_LEFT = (-1, 1)
def cmp(x, y):
if x<y:
return -1
if x>y:
return 1
return 0
class XYPathError(Exception):
"""Problems with spreadsheet layouts should raise this or a descendant."""
pass
class JunctionError(RuntimeError, XYPathError):
"""Raised if paranoid _XYCell.junction finds it is returning one of the
input cells - i.e. the input cells are in the same row or column"""
pass
class NoCellsAssertionError(AssertionError, XYPathError):
"""Raised by Bag.assert_one() if the bag contains zero cells."""
pass
class MultipleCellsAssertionError(AssertionError, XYPathError):
"""Raised by Bag.assert_one() if the bag contains multiple cells."""
pass
class LookupConfusionError(AssertionError, XYPathError):
"""Lookup found multiple equally-close headers"""
pass
class NoLookupError(AssertionError, XYPathError):
"""Lookup found no valid header"""
pass
def describe_filter_method(filter_by):
if callable(filter_by):
return "matching a function called {}".format(filter_by.__name__)
if isinstance(filter_by, six.string_types):
return "containing the string {!r}".format(filter_by)
if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher):
return "containing "+str(filter_by)
if isinstance(filter_by, REGEX_PATTERN_TYPE):
return "matching the regex {!r}".format(filter_by.pattern)
else:
return "which we're surprised we found at all"
class _XYCell(object):
"""needs to contain: value, position (x,y), parent bag"""
__slots__ = ['value', 'x', 'y', 'table', 'properties']
def __init__(self, value, x, y, table, properties=None):
self.value = value # of appropriate type
self.x = x # column number
self.y = y # row number
self.table = table
if properties is None:
self.properties = {}
else:
self.properties = properties
def __hash__(self):
"""
In order to make a set of cells (used in Bag), they *must* be hashable.
An _XYCell is uniquely identified (by sets, etc) through its position,
content, and parent table.
Note that `properties` is ignored since dicts are unhashable, and
value may be redundant.
"""
return hash((self.value, self.x, self.y, self.table))
def __eq__(self, rhs):
"""See _XYCell.__hash__ for equality conditions"""
return hash(self) == hash(rhs)
def copy(self, new_table=None):
"""Make a copy of the cell.
Its table will be new_table, if specified"""
if new_table is None:
new_table = self.table
return _XYCell(self.value, self.x, self.y,
new_table, self.properties)
def __repr__(self):
return "_XYCell(%r, %r, %r)" % \
(self.value, self.x, self.y)
def __unicode__(self):
return six.text_type(self.value)
def lookup(self, header_bag, direction, strict=False):
"""
Given a single cell (usually a value), a bag containing the headers
of a particular type for that cell, and the direction in which to
search for the relevant header
e.g. for value cell V, searching up:
[ ] [ ]
[O]
[ ]
---> [ ]
V
[ ]
[ ]
the cell with the arrow will be returned.
Strict restricts the selection to cells in the same row/column as
the value, so O is selected instead."""
def mult(cell):
return cell.x * direction[0] + cell.y * direction[1]
def same_row_col(a, b, direction):
return (a.x - b.x == 0 and direction[0] == 0) or \
(a.y - b.y == 0 and direction[1] == 0)
best_cell = None
second_best_cell = None
for target_cell in header_bag.unordered_cells:
if mult(self) <= mult(target_cell):
if not best_cell or mult(target_cell) <= mult(best_cell):
if not strict or same_row_col(self, target_cell, direction):
second_best_cell = best_cell
best_cell = target_cell
if second_best_cell and mult(best_cell) == mult(second_best_cell):
raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format(
best_cell, second_best_cell, self))
if best_cell is None:
raise NoLookupError("No lookup for {!r}".format(self))
return best_cell
def junction(self, other, direction=DOWN, paranoid=True):
""" gets the lower-right intersection of the row of one, and the
column of the other.
paranoid: should we panic if we're hitting one of our input cells?"""
def junction_coord(cells, direction=DOWN):
"""
Under the hood: given two cells and a favoured direction, get the
position of the cell with the column of one and the row of the
other:
A---->+
| ^
| |
| |
v |
*<----B
Both + and * are candidates for the junction of A and B - we take
the one furthest down by default (specified by direction)
>>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None))
>>> junction_coord(cells_dr, DOWN)
(1, 4)
>>> junction_coord(cells_dr, UP)
(3, 2)
>>> junction_coord(cells_dr, LEFT)
(1, 4)
>>> junction_coord(cells_dr, RIGHT)
(3, 2)
>>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None))
>>> junction_coord(cells_tr, DOWN)
(3, 4)
>>> junction_coord(cells_tr, UP)
(1, 2)
>>> junction_coord(cells_tr, LEFT)
(1, 2)
>>> junction_coord(cells_tr, RIGHT)
(3, 4)
"""
new_cells = (
(cells[0].x, cells[1].y),
(cells[1].x, cells[0].y)
)
for index, value in enumerate(direction):
if value == 0:
continue
if cmp(new_cells[0][index], new_cells[1][index]) == value:
return new_cells[0]
else:
return new_cells[1]
(x, y) = junction_coord((self, other), direction)
if paranoid and (x, y) == (self.x, self.y) or \
(x, y) == (other.x, other.y):
raise JunctionError(
"_XYCell.junction(_XYCell) resulted in a cell which is equal"
" to one of the input cells.\n"
" self: {}\n other: {}\n x: {}\n y: {}".format(
self, other, x, y))
junction_bag = self.table.get_at(x, y)
if len(junction_bag) == 0:
return
self_bag = Bag(self.table)
self_bag.add(self)
other_bag = Bag(self.table)
other_bag.add(other)
yield (self_bag, other_bag, junction_bag)
def shift(self, x=0, y=0):
"""Get the cell which is offset from this cell by x columns, y rows"""
if not isinstance(x, int):
assert y == 0, \
"_XYCell.shift: x=%r not integer and y=%r specified" % (x, y)
return self.shift(x[0], x[1])
return self.table.get_at(self.x + x, self.y + y)._cell
class CoreBag(object):
"""Has a collection of _XYCells"""
def pprint(self, *args, **kwargs):
return contrib_excel.pprint(self, *args, **kwargs)
def as_list(self, *args, **kwargs):
return contrib_excel.as_list(self, *args, **kwargs)
def filter_one(self, filter_by):
return contrib_excel.filter_one(self, filter_by)
def excel_locations(self, *args, **kwargs):
return contrib_excel.excel_locations(self, *args, **kwargs)
def __init__(self, table):
self.__store = set()
self.table = table
def add(self, cell):
"""Add a cell to this bag"""
if not isinstance(cell, _XYCell):
raise TypeError("Can only add _XYCell types to Bags: {}".format(
cell.__class__))
self.__store.add(cell)
def __eq__(self, other):
"""Compare two bags: they are equal if:
* their table are the same table (object)
* they contain the same set of cells"""
if not isinstance(other, CoreBag):
return False
return (self.table is other.table and
self.__store == other.__store)
def __len__(self):
return len(self.__store)
def __repr__(self):
return repr(self.__store)
@classmethod
def singleton(cls, cell, table):
"""
Construct a bag with one cell in it
"""
bag = cls(table=table)
bag.add(cell)
return bag
@property
def unordered(self):
"""
Obtain an unordered iterator over this bag. iter(bag) is sorted on
demand, and therefore inefficient if being done repeatedly where order
does not matter.
"""
return (Bag.singleton(c, table=self.table) for c in self.__store)
@property
def unordered_cells(self):
"""
Analogous to the `unordered` property, except that it returns _XYCells
instead of Bags.
"""
return iter(self.__store)
def __iter__(self):
"""
Return a view of the cells in this back in left-right, top-bottom order
Note: this is expensive for large bags (when done repeatedly). If you
don't care about order, use `bag.unordered`, which gives an unordered
iterator.
"""
def yx(cell):
return cell.y, cell.x
for cell in sorted(self.__store, key=yx):
yield Bag.singleton(cell, table=self.table)
def __sub__(self, rhs):
"""Bags quack like sets. Implements - operator."""
return self.difference(rhs)
def difference(self, rhs):
"""Bags quack like sets."""
assert self.table is rhs.table,\
"Can't difference bags from separate tables"
new = copy(self)
new.__store = self.__store.difference(rhs.__store)
return new
def __or__(self, rhs):
"""Bags quack like sets. Implements | operator.
For mathematical purity, + (__add__) isn't appropriate"""
return self.union(rhs)
def union(self, rhs):
"""Bags quack like sets."""
assert self.table is rhs.table, "Can't union bags from separate tables"
new = copy(self)
new.__store = self.__store.union(rhs.__store)
return new
def __and__(self, rhs):
return self.intersection(rhs)
def intersection(self, rhs):
assert self.table is rhs.table, \
"Can't take intersection of bags from separate tables"
new = copy(self)
new.__store = self.__store.intersection(rhs.__store)
return new
def select(self, function):
"""Select cells from this bag's table based on the cells in this bag.
e.g.
bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y
and bag_cell.value == table_cell.value)
would give cells in the table with the same name on the same row
as a cell in the bag"""
return self.table.select_other(function, self)
def select_other(self, function, other):
"""A more general version of select, where another bag to select from
is explicitly specified rather than using the original bag's table"""
"""note: self.select(f) = self.table.select_other(f, self)"""
newbag = Bag(table=self.table)
for bag_cell in self.__store:
for other_cell in other.__store:
if function(bag_cell, other_cell):
newbag.add(bag_cell)
break
return newbag
def filter(self, filter_by):
"""
Returns a new bag containing only cells which match
the filter_by predicate.
filter_by can be:
a) a callable, which takes a cell as a parameter
and returns True if the cell should be returned,
such as `lambda cell: cell value == 'dog'
b) a string, to match exactly: `u'dog'`
c) a hamcrest match rule: `hamcrest.equal_to("dog")
(requires hamcrest to be available)
d) a compiled regex: `re.compile("dog")
"""
if callable(filter_by):
return self._filter_internal(filter_by)
elif isinstance(filter_by, six.string_types):
return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by)
elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher):
return self._filter_internal(lambda cell: filter_by.matches(cell.value))
elif isinstance(filter_by, REGEX_PATTERN_TYPE):
return self._filter_internal(
lambda cell: re.match(filter_by, six.text_type(cell.value)))
else:
raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.")
def _filter_internal(self, function):
newbag = Bag(table=self.table)
for bag_cell in self.unordered_cells:
if function(bag_cell):
newbag.add(bag_cell)
return newbag
def assert_one(self, message="assert_one() : {} cells in bag, not 1"):
"""Chainable: raise an error if the bag contains 0 or 2+ cells.
Otherwise returns the original (singleton) bag unchanged."""
if len(self.__store) == 1:
return self
elif len(self.__store) == 0:
raise NoCellsAssertionError(
message.format(
len(self.__store)
)
)
elif len(self.__store) > 1:
raise MultipleCellsAssertionError(
message.format(
len(self.__store)
)
)
@property
def _cell(self):
"""Under the hood: get the cell inside a singleton bag.
It's an error for it to not contain precisely one cell."""
try:
xycell = list(self.assert_one().__store)[0]
except AssertionError:
l = len(list(self.__store))
raise XYPathError("Can't use multicell bag as cell: (len %r)" % l)
else:
assert isinstance(xycell, _XYCell)
return xycell
@property
def value(self):
"""Getter for singleton's cell value"""
return self._cell.value
@property
def x(self):
"""Getter for singleton's cell column number"""
return self._cell.x
@property
def y(self):
"""Getter for singleton's cell row number"""
return self._cell.y
@property
def properties(self):
"""Getter for singleton's cell properties"""
return self._cell.properties
class Bag(CoreBag):
@staticmethod
def from_list(cells):
"""
Make a non-bag iterable of cells into a Bag. Some magic may be lost,
especially if it's zero length.
TODO: This should probably be part of the core __init__ class.
TODO: Don't do a piece-by-piece insertion, just slap the whole listed
iterable in, because this is slow.
""" # TODO
bag = Bag(table=None)
for i, cell_bag in enumerate(cells):
bag.add(cell_bag._cell)
if i == 0:
bag.table = cell_bag.table
else:
assert bag.table == cell_bag.table
return bag
def expand(self, direction, stop_before=None):
return self.fill(direction, stop_before=stop_before) | self
def fill(self, direction, stop_before=None):
"""Should give the same output as fill, except it
doesn't support non-cardinal directions or stop_before.
Twenty times faster than fill in test_ravel."""
if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT,
UP_RIGHT):
return self._fill(direction, stop_before)
def what_to_get(cell):
"""converts bag coordinates into thing to pass to get_at"""
cell_coord = (cell.x, cell.y)
retval = []
for cell_coord, direction_coord in zip(cell_coord, direction):
if direction_coord != 0:
retval.append(None)
else:
retval.append(cell_coord)
return tuple(retval) # TODO yuck
if direction not in (UP, RIGHT, DOWN, LEFT):
raise ValueError("Must be a cardinal direction!")
### this is what same_row/col should look like!
small_table = None
for cell in self.unordered_cells:
got_rowcol = self.table.get_at(*what_to_get(cell))
if small_table:
small_table = small_table.union(got_rowcol)
else:
small_table = got_rowcol
if small_table is None:
small_table = Bag(table=self.table)
# now we use the small_table as if it was the table.
(left_right, up_down) = direction
bag = small_table.select_other(
lambda table, bag: cmp(table.x, bag.x) == left_right
and cmp(table.y, bag.y) == up_down,
self
)
if stop_before is not None:
return bag.stop_before(stop_before)
else:
return bag
def stop_before(self, stop_function):
"""Assumes the data is:
* in a single row or column
* proceeding either downwards or rightwards
"""
return Bag.from_list(list(
takewhile(lambda c: not stop_function(c), self)))
def _fill(self, direction, stop_before=None):
"""
If the bag contains only one cell, select all cells in the direction
given, excluding the original cell. For example, from a column heading
cell, you can "fill down" to get all the values underneath it.
If you provide a stop_before function, it will be called on each cell
as a stop condition. For example, if you provide a stop_before function
which tests cell.value for an empty string. This would stop the fill
function before it reaches the bottom of the sheet, for example.
"""
raise DeprecationWarning("2D fill is deprecated. Yell if you need it.")
if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT,
UP_LEFT, DOWN_LEFT):
raise ValueError("Invalid direction! Use one of UP, RIGHT, "
"DOWN_RIGHT etc")
(left_right, up_down) = direction
bag = self.select(
lambda table, bag: cmp(table.x, bag.x) == left_right
and cmp(table.y, bag.y) == up_down
)
if stop_before is not None:
# NOTE(PMF): stop_before is limited to singleton bags, in the DOWN
# or RIGHT direction. This isn't ideal, but with the above "magic"
# cmp code I can't think of an elegant general way of doing this. I
# also can't imagine what it means to run fill in multiple
# directions, or with non singleton bags. TODO: Constrain?
if direction not in (DOWN, RIGHT):
raise ValueError("Oops, stop_before only works down or right!")
self.assert_one("You can't use stop_before for bags with more than"
" one cell inside.")
return Bag.from_list(list(
takewhile(lambda c: not stop_before(c), bag)))
return bag
def junction(self, other, *args, **kwargs):
"""For every combination of pairs of cells from this bag and the other
bag, get the cell that is at the same row as one of them, and column
as the other.
There are two: so we specify a direction to say which one wins (in
the cell-based version of this function) - defaulting to the one
furthest down"""
if not isinstance(other, CoreBag):
raise TypeError(
"Bag.junction() called with invalid type {}, must be "
"(Core)Bag".format(other.__class__))
# Generate ordered lists of dimension cells exactly once (avoid doing
# it in the inner loop because of the sorted() in __iter__)
self_cells = list(self)
other_cells = list(other)
for self_cell in self_cells:
for other_cell in other_cells:
assert self_cell._cell.__class__ == other_cell._cell.__class__
for triple in self_cell._cell.junction(other_cell._cell,
*args, **kwargs):
yield triple
def waffle(self, other, *args, **kwargs):
bag = Bag(table=self.table)
for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs):
bag.add(junction_cell._cell)
return bag
def shift(self, x=0, y=0):
"""
Return a bag in which each cell is offset from the source bag by the
coordinates specified. Coordinates can be specified as:
Bag.shift(0,2) - full specification
Bag.shift(y=2) - partial specification
Bag.shift((0,2)) - use of tuple for x, unspecified y
"""
if not isinstance(x, int):
assert y == 0, \
"Bag.shift: x=%r not integer and y=%r specified" % (x, y)
return self.shift(x[0], x[1])
bag = Bag(table=self.table)
for b_cell in self.unordered:
t_cell = self.table.get_at(b_cell.x + x, b_cell.y + y).assert_one()
bag.add(t_cell._cell)
return bag
def extrude(self, dx, dy):
"""
Extrude all cells in the bag by (dx, dy), by looking
For example, given the bag with a cell at (0, 0):
{(0, 0)}
.extrude(2, 0) gives the bag with the cells (to the right):
{(0, 0), (1, 0), (2, 0)}
.extrude(0, -2) gives the bag with the cells (up):
{(0, 0), (0, -1), (0, -2)}
"""
if dx < 0:
dxs = list(range(0, dx - 1, -1))
else:
dxs = list(range(0, dx + 1, +1))
if dy < 0:
dys = list(range(0, dy - 1, -1))
else:
dys = list(range(0, dy + 1, +1))
bag = Bag(table=self.table)
for cell in self.unordered_cells:
for i, j in product(dxs, dys):
bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell)
return bag
def same_row(self, bag):
"""
Select cells in this bag which are in the same
row as a cell in the other `bag`.
"""
# TODO: make less crap - use Table.get_at()
all_y = set()
for cell in bag.unordered_cells:
all_y.add(cell.y)
return self.filter(lambda c: c.y in all_y)
def same_col(self, bag):
"""
Select cells in this bag which are in the same
row as a cell in the other `bag`.
"""
# TODO: make less crap
all_x = set()
for cell in bag.unordered_cells:
all_x.add(cell.x)
return self.filter(lambda c: c.x in all_x)
def __getattr__(self, name):
if name.startswith("is_not_"):
return lambda: self.filter(lambda cell: not cell.properties[name[7:]])
if name.startswith("is_"): # might need additional layer of indirection
return lambda: self.filter(lambda cell: cell.properties[name[3:]])
if name.endswith("_is_not"):
return lambda value: self.filter(lambda cell: not cell.properties[name[:-7]] == value)
if name.endswith("_is"):
return lambda value: self.filter(lambda cell: cell.properties[name[:-3]] == value)
raise AttributeError("Bag has no attribute {!r}".format(name))
class Table(Bag):
"""A bag which represents an entire sheet.
Features indices to speed retrieval by coordinate.
Also includes functions for importing tables into XYPath"""
def __init__(self, name=""):
super(Table, self).__init__(table=self)
self._x_index = defaultdict(lambda: Bag(self))
self._y_index = defaultdict(lambda: Bag(self))
self._max_x = -1
self._max_y = -1
self.sheet = None
self.name = name
def __hash__(self):
return id(self)
def rows(self):
"""Get bags containing each row's cells, in order"""
for row_num in range(0, self._max_y + 1): # inclusive
yield self._y_index[row_num]
def cols(self):
"""Get bags containing each column's cells, in order"""
for col_num in range(0, self._max_x + 1): # inclusive
yield self._x_index[col_num]
def col(self, column):
if isinstance(column, six.string_types):
c_num = contrib_excel.excel_column_number(column, index=0)
return self.col(c_num)
else:
assert isinstance(column, int)
return self._x_index[column]
def add(self, cell):
"""Under the hood: add a cell to a table and the table's indices.
Used in the construction of a table."""
self._x_index[cell.x].add(cell)
self._y_index[cell.y].add(cell)
self._max_x = max(self._max_x, cell.x)
self._max_y = max(self._max_y, cell.y)
super(Table, self).add(cell)
def get_at(self, x=None, y=None):
"""Directly get a singleton bag via indices. Faster than Bag.filter"""
# we use .get() here to avoid new empty Bags being inserted
# into the index stores when a non-existant coordinate is requested.
assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x)
assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y)
if x is None and y is None:
raise TypeError('get_at requires at least one x or y value')
if x is None:
return self._y_index.get(y, Bag(self))
if y is None:
return self._x_index.get(x, Bag(self))
return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x)
@staticmethod
def from_filename(filename, table_name=None, table_index=None):
"""Wrapper around from_file_object to handle extension extraction"""
# NOTE: this is a messytables table name
extension = os.path.splitext(filename)[1].strip('.')
with open(filename, 'rb') as f:
return Table.from_file_object(f, extension,
table_name=table_name,
table_index=table_index)
@staticmethod
def from_file_object(fobj, extension='',
table_name=None, table_index=None):
"""Load table from file object, you must specify a table's name
or position number. If you don't know these, try from_messy."""
# NOTE this is a messytables table name
if (table_name is not None and table_index is not None) or \
(table_name is None and table_index is None):
raise TypeError("Must give exactly one of table_name, table_index")
table_set = messytables.any.any_tableset(fobj, extension=extension)
if table_name is not None:
return Table.from_messy(table_set[table_name])
elif table_index is not None:
return Table.from_messy(table_set.tables[table_index])
@staticmethod
def from_messy(messy_rowset):
"""Import a rowset (table) from messytables, e.g. to work with each
table in turn:
tables = messytables.any.any_tableset(fobj)
for mt_table in tables:
xy_table = xypath.Table.from_messy(mt_table)
..."""
assert isinstance(messy_rowset, messytables.core.RowSet),\
"Expected a RowSet, got a %r" % type(messy_rowset)
new_table = Table.from_iterable(
messy_rowset,
value_func=lambda cell: cell.value,
properties_func=lambda cell: cell.properties,
name=messy_rowset.name)
if hasattr(messy_rowset, 'sheet'):
new_table.sheet = messy_rowset.sheet
return new_table
@staticmethod
def from_iterable(table, value_func=lambda cell: cell,
properties_func=lambda cell: {},
name=None):
"""Make a table from a pythonic table structure.
The table must be an iterable which returns rows (in top-to-bottom
order), which in turn are iterables which returns cells (in
left-to-right order).
value_func and properties_func specify how the cell maps onto an
_XYCell's value and properties. The defaults assume that you have a
straight-forward list of lists of values."""
new_table = Table(name=name)
for y, row in enumerate(table):
for x, cell in enumerate(row):
new_table.add(
_XYCell(
value_func(cell),
x,
y,
new_table,
properties_func(cell)))
return new_table
@staticmethod
def from_bag(bag, name=None):
"""Make a copy of a bag which is its own table.
Useful when a single imported table is two logical tables"""
if name is None:
name=bag.table.name
new_table = Table(name=name)
for bag_cell in bag.unordered:
new_table.add(bag_cell._cell.copy(new_table))
return new_table
|
scraperwiki/xypath
|
xypath/xypath.py
|
Python
|
bsd-2-clause
| 31,182 | 0.000994 |
from django.db import models
from stdimage import StdImageField
from django.core.validators import RegexValidator
import datetime
YEAR_CHOICES = []
for r in range(1980, (datetime.datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
S_CHOICE = [('1stYear','1stYear'),('2ndYear','2ndYear'),('3rdYear','3rdYear'),('4thYear','4thYear')]
# Create your models here.
class Hostel(models.Model):
HostelName = models.CharField(max_length=100, primary_key=True)
HostelType = models.CharField(max_length=10)
HostelSeat = models.IntegerField()
HostelImage = StdImageField(upload_to='Hostels/logo/',variations={'large': (675, 300,True)})
HostelAddress = models.CharField(max_length=200)
HostelDescription = models.TextField()
HostelEmail = models.EmailField()
phone_regex = RegexValidator(regex=r'^\+?1?\d{10,13}$', message="Phone number must be entered in the format: '+999999999'. Up to 13 digits allowed.")
HostelPhoneNo = models.CharField(max_length=13,validators=[phone_regex], blank=True)
def __str__(self):
return self.HostelName
class HostelEvents(models.Model):
HostelName = models.ForeignKey(Hostel)
HostelEventsName = models.CharField(max_length=100)
HostelEventDescription = models.TextField()
def __str__(self):
return self.HostelEventsName
class HostelPictureGalary(models.Model):
HostelName = models.ForeignKey(Hostel)
PictureName = models.CharField(max_length=100)
PictureLocation = StdImageField(upload_to='Hostels/galary/',variations={'large': (675, 300,True)})
def __str__(self):
return self.PictureName
class HostelBody(models.Model):
HostelName = models.ForeignKey(Hostel)
HostelbodyRole = models.CharField(max_length=100)
HostelbodyRoleYear = models.IntegerField(choices=YEAR_CHOICES, default=datetime.datetime.now().year)
PersonName = models.CharField (max_length=10)
PersonYear = models.CharField (max_length=7, choices=S_CHOICE,default='NA')
PersonImage = StdImageField(upload_to='Hostels/gb/',variations={'thumbnail': (300, 200,True)})
def __str__(self):
return self.HostelbodyRole
|
bpain2010/kgecweb
|
hostels/models.py
|
Python
|
gpl-2.0
| 2,053 | 0.032148 |
#!/usr/bin/python2
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# To run this script please copy "out/<build_name>//pyproto/webrtc/modules/
# audio_coding/audio_network_adaptor/debug_dump_pb2.py" to this folder.
# The you can run this script with:
# "python parse_ana_dump.py -m uplink_bandwidth_bps -f dump_file.dat"
# You can add as may metrics or decisions to the plot as you like.
# form more information call:
# "python parse_ana_dump.py --help"
import struct
from optparse import OptionParser
import matplotlib.pyplot as plt
import debug_dump_pb2
def GetNextMessageSize(file_to_parse):
data = file_to_parse.read(4)
if data == '':
return 0
return struct.unpack('<I', data)[0]
def GetNextMessageFromFile(file_to_parse):
message_size = GetNextMessageSize(file_to_parse)
if message_size == 0:
return None
try:
event = debug_dump_pb2.Event()
event.ParseFromString(file_to_parse.read(message_size))
except IOError:
print 'Invalid message in file'
return None
return event
def InitMetrics():
metrics = {}
event = debug_dump_pb2.Event()
for metric in event.network_metrics.DESCRIPTOR.fields:
metrics[metric.name] = {'time': [], 'value': []}
return metrics
def InitDecisions():
decisions = {}
event = debug_dump_pb2.Event()
for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
decisions[decision.name] = {'time': [], 'value': []}
return decisions
def ParseAnaDump(dump_file_to_parse):
with open(dump_file_to_parse, 'rb') as file_to_parse:
metrics = InitMetrics()
decisions = InitDecisions()
first_time_stamp = None
while True:
event = GetNextMessageFromFile(file_to_parse)
if event == None:
break
if first_time_stamp == None:
first_time_stamp = event.timestamp
if event.type == debug_dump_pb2.Event.ENCODER_RUNTIME_CONFIG:
for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
if event.encoder_runtime_config.HasField(decision.name):
decisions[decision.name]['time'].append(event.timestamp -
first_time_stamp)
decisions[decision.name]['value'].append(
getattr(event.encoder_runtime_config, decision.name))
if event.type == debug_dump_pb2.Event.NETWORK_METRICS:
for metric in event.network_metrics.DESCRIPTOR.fields:
if event.network_metrics.HasField(metric.name):
metrics[metric.name]['time'].append(event.timestamp -
first_time_stamp)
metrics[metric.name]['value'].append(
getattr(event.network_metrics, metric.name))
return (metrics, decisions)
def main():
parser = OptionParser()
parser.add_option(
"-f", "--dump_file", dest="dump_file_to_parse", help="dump file to parse")
parser.add_option(
'-m',
'--metric_plot',
default=[],
type=str,
help='metric key (name of the metric) to plot',
dest='metric_keys',
action='append')
parser.add_option(
'-d',
'--decision_plot',
default=[],
type=str,
help='decision key (name of the decision) to plot',
dest='decision_keys',
action='append')
options = parser.parse_args()[0]
if options.dump_file_to_parse == None:
print "No dump file to parse is set.\n"
parser.print_help()
exit()
(metrics, decisions) = ParseAnaDump(options.dump_file_to_parse)
metric_keys = options.metric_keys
decision_keys = options.decision_keys
plot_count = len(metric_keys) + len(decision_keys)
if plot_count == 0:
print "You have to set at least one metric or decision to plot.\n"
parser.print_help()
exit()
plots = []
if plot_count == 1:
f, mp_plot = plt.subplots()
plots.append(mp_plot)
else:
f, mp_plots = plt.subplots(plot_count, sharex=True)
plots.extend(mp_plots.tolist())
for key in metric_keys:
plot = plots.pop()
plot.grid(True)
plot.set_title(key + " (metric)")
plot.plot(metrics[key]['time'], metrics[key]['value'])
for key in decision_keys:
plot = plots.pop()
plot.grid(True)
plot.set_title(key + " (decision)")
plot.plot(decisions[key]['time'], decisions[key]['value'])
f.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
|
wangcy6/storm_app
|
frame/c++/webrtc-master/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py
|
Python
|
apache-2.0
| 4,718 | 0.010386 |
# coding=utf-8
"""
Faça um programa, contendo subprogramas, que:
a) Leia da entrada padrão as dimensões, quantidade de linhas e quantidade de colunas de uma matriz bidimensional;
b) Gere uma matriz, onde cada célula é um número inteiro gerado aleatoriamente no intervalo 0 a 9;
c) Mostre a matriz, linha a linha na tela;
d) Calcule e escreva a média de todos os valores na matriz;
e) Escreva o conteúdo de todas as linhas que possuam todos os seus valores acima da média calculada em (d).
Dica
Utilize a função random.randint(a, b), disponível na API, que retorna um número
randômico inteiro entre a e b, inclusive.
Restrição
Não serão aceitos na correção programas que utilizam o módulo numpy.
Entrada
Dois números inteiros positivos são lidos, representando respectivamente: a quantidade linhas L e quantidade de colunas
C da matriz a ser gerada.
Saída
Seu programa deverá emitir:
L linhas, com C inteiros cada linha, contendo valores no intervalo 0 e 9;
Uma linha em branco;
Uma linha com um número de ponto flutuante, representando a média solicitada;
Uma linha em branco;
Zero ou mais linhas contendo C inteiros, de cada linha com a propriedade pedida
"""
from random import randint
def gera_matriz(linhas, colunas):
matrix = []
for linha in range(linhas):
linha = []
for coluna in range(colunas):
linha.append(randint(0, 9))
matrix.append(linha)
return matrix
def imprime_matriz(matriz):
for linha in matriz:
for coluna in linha:
print(coluna, end=" ")
print()
print()
def media_da_matriz(matriz):
total = 0.0
for linha in matriz:
for coluna in linha:
total += coluna
return total / (len(matriz) * len(matriz[0]))
def imprive_valores_acima_da_media(matriz, media):
for linha in matriz:
for coluna in linha:
if coluna > media:
print(coluna, end=" ")
quantidade_linhas, quantidade_colunas = input().split()
matriz_gerada = gera_matriz(int(quantidade_linhas), int(quantidade_colunas))
imprime_matriz(matriz_gerada)
media_da_matriz = media_da_matriz(matriz_gerada)
print(media_da_matriz)
print()
imprive_valores_acima_da_media(matriz_gerada, media_da_matriz)
|
deyvedvm/cederj
|
fund-prog/2017-2/ap1/questao3.py
|
Python
|
gpl-3.0
| 2,277 | 0.001778 |
#!/usr/bin/env python
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line application that demonstrates basic BigQuery API usage.
This sample queries a public shakespeare dataset and displays the 10 of
Shakespeare's works with the greatest number of distinct words.
This sample is used on this page:
https://cloud.google.com/bigquery/bigquery-api-quickstart
For more information, see the README.md under /bigquery.
"""
# [START all]
import argparse
import googleapiclient.discovery
from googleapiclient.errors import HttpError
def main(project_id):
# [START build_service]
# Construct the service object for interacting with the BigQuery API.
bigquery_service = googleapiclient.discovery.build('bigquery', 'v2')
# [END build_service]
try:
# [START run_query]
query_request = bigquery_service.jobs()
query_data = {
'query': (
'SELECT TOP(corpus, 10) as title, '
'COUNT(*) as unique_words '
'FROM [publicdata:samples.shakespeare];')
}
query_response = query_request.query(
projectId=project_id,
body=query_data).execute()
# [END run_query]
# [START print_results]
print('Query Results:')
for row in query_response['rows']:
print('\t'.join(field['v'] for field in row['f']))
# [END print_results]
except HttpError as err:
print('Error: {}'.format(err.content))
raise err
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Your Google Cloud Project ID.')
args = parser.parse_args()
main(args.project_id)
# [END all]
|
sharbison3/python-docs-samples
|
bigquery/api/getting_started.py
|
Python
|
apache-2.0
| 2,344 | 0 |
"""
core.mixins - Mixins available to use with models
"""
from django.db.models.signals import post_save
def on_changed(sender, **kwargs):
"""
Calls the `model_changed` method and then resets the state.
"""
instance = kwargs.get("instance")
is_new = kwargs.get("created")
dirty_fields = instance.get_dirty_fields()
instance.model_changed(instance.original_state, dirty_fields, is_new)
instance.original_state = instance.to_dict()
class ModelChangedMixin(object):
"""
Mixin for detecting changes to a model
"""
def __init__(self, *args, **kwargs):
super(ModelChangedMixin, self).__init__(*args, **kwargs)
self.original_state = self.to_dict()
identifier = "{0}_model_changed".format(self.__class__.__name__)
post_save.connect(
on_changed, sender=self.__class__, dispatch_uid=identifier)
def to_dict(self):
"""
Returns the model as a dict
"""
# Get all the field names that are not relations
keys = (f.name for f in self._meta.local_fields if not f.rel)
return {field: getattr(self, field) for field in keys}
def get_dirty_fields(self):
"""
Returns the fields dirty on the model
"""
dirty_fields = {}
current_state = self.to_dict()
for key, value in current_state.items():
if self.original_state[key] != value:
dirty_fields[key] = value
return dirty_fields
def is_dirty(self):
"""
Return whether the model is dirty
An unsaved model is dirty when it has no primary key
or has at least one dirty field.
"""
if not self.pk:
return True
return {} != self.get_dirty_fields()
def model_changed(self, old_fields, new_fields, is_new):
"""
Post-hook for all fields that have been changed.
"""
raise NotImplementedError("Missing method `model_changed`")
|
CCI-MOC/GUI-Backend
|
core/mixins.py
|
Python
|
apache-2.0
| 1,993 | 0 |
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import platform
import collections
from time import sleep
import six
from .interface import Interface
from .common import filter_device_by_usage_page
from ..dap_access_api import DAPAccessIntf
from ....utility.timeout import Timeout
OPEN_TIMEOUT_S = 60.0
LOG = logging.getLogger(__name__)
try:
import pywinusb.hid as hid
except:
if platform.system() == "Windows":
LOG.error("PyWinUSB is required on a Windows Machine")
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyWinUSB(Interface):
"""! @brief CMSIS-DAP USB interface class using pyWinUSB for the backend.
"""
isAvailable = IS_AVAILABLE
def __init__(self):
super(PyWinUSB, self).__init__()
# Vendor page and usage_id = 2
self.report = None
# deque used here instead of synchronized Queue
# since read speeds are ~10-30% faster and are
# comparable to a list based implementation.
self.rcv_data = collections.deque()
self.device = None
# handler called when a report is received
def rx_handler(self, data):
# LOG.debug("rcv<(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data])))
self.rcv_data.append(data[1:])
def open(self):
self.device.set_raw_data_handler(self.rx_handler)
# Attempt to open the device.
# Note - this operation must be retried since
# other instances of pyOCD listing board can prevent
# opening this device with exclusive access.
with Timeout(OPEN_TIMEOUT_S) as t_o:
while t_o.check():
# Attempt to open the device
try:
self.device.open(shared=False)
break
except hid.HIDError:
pass
# Attempt to open the device in shared mode to make
# sure it is still there
try:
self.device.open(shared=True)
self.device.close()
except hid.HIDError as exc:
# If the device could not be opened in read only mode
# Then it either has been disconnected or is in use
# by another thread/process
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device %s"
% self.serial_number), exc)
else:
# If this timeout has elapsed then another process
# has locked this device in shared mode. This should
# not happen.
raise DAPAccessIntf.DeviceError("timed out attempting to open device %s" % self.serial_number)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected CMSIS-DAP devices
"""
all_devices = hid.find_all_hid_devices()
# find devices with good vid/pid
all_mbed_devices = []
for d in all_devices:
if (d.product_name.find("CMSIS-DAP") >= 0):
all_mbed_devices.append(d)
boards = []
for dev in all_mbed_devices:
try:
dev.open(shared=True)
# Perform device-specific filtering.
if filter_device_by_usage_page(dev.vendor_id, dev.product_id, dev.hid_caps.usage_page):
dev.close()
continue
report = dev.find_output_reports()
if len(report) != 1:
dev.close()
continue
new_board = PyWinUSB()
new_board.report = report[0]
new_board.packet_size = len(new_board.report.get_raw_data()) - 1
new_board.vendor_name = dev.vendor_name
new_board.product_name = dev.product_name
new_board.serial_number = dev.serial_number
new_board.vid = dev.vendor_id
new_board.pid = dev.product_id
new_board.device = dev
dev.close()
boards.append(new_board)
except Exception as e:
if (str(e) != "Failure to get HID pre parsed data"):
LOG.error("Receiving Exception: %s", e)
dev.close()
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint associated to the HID interface
"""
data.extend([0] * (self.packet_size - len(data)))
# LOG.debug("snd>(%d) %s" % (len(data), ' '.join(['%02x' % i for i in data])))
self.report.send([0] + data)
def read(self, timeout=20.0):
"""! @brief Read data on the IN endpoint associated to the HID interface
"""
with Timeout(timeout) as t_o:
while t_o.check():
if len(self.rcv_data):
break
sleep(0)
else:
# Read operations should typically take ~1-2ms.
# If this exception occurs, then it could indicate
# a problem in one of the following areas:
# 1. Bad usb driver causing either a dropped read or write
# 2. CMSIS-DAP firmware problem cause a dropped read or write
# 3. CMSIS-DAP is performing a long operation or is being
# halted in a debugger
raise DAPAccessIntf.DeviceError("Read timed out")
return self.rcv_data.popleft()
def close(self):
"""! @brief Close the interface
"""
LOG.debug("closing interface")
self.device.close()
|
mbedmicro/pyOCD
|
pyocd/probe/pydapaccess/interface/pywinusb_backend.py
|
Python
|
apache-2.0
| 6,277 | 0.001912 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorParameters(Model):
"""Parameters that define the operation to create a connection monitor.
All required parameters must be populated in order to send to Azure.
:param source: Required.
:type source:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorSource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start
automatically once created. Default value: True .
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
Default value: 60 .
:type monitoring_interval_in_seconds: int
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorParameters, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/connection_monitor_parameters.py
|
Python
|
mit
| 2,077 | 0.001444 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
integree/hello-world
|
manage.py
|
Python
|
mit
| 648 | 0 |
import numpy as np
import pandas as pd
import pickle
# Return 0 or 1 based on whether Course fulfills a General Education Requirement
def lookupGenEd(cNum, college):
fileName = "data/Dietrich Gen Eds.csv"
picklepath = "data\\dietrich_gen_eds.p"
try:
with open(picklepath,'rb') as file:
gen_eds = pickle.load(file)
except:
df = pd.read_csv(fileName,names=['Dept','Num','Title','1','2'])
gen_eds = set(df['Dept'].values)
with open(picklepath,'wb') as file:
pickle.dump(gen_eds,file)
return cNum in gen_eds
'''
genEdubility = lookupGenEd(73100, "dietrich")
print("73100")
print('Is Gen Ed?:', genEdubility)
print()
genEdubility = lookupGenEd(70100, "tepper")
print("70100")
print('Is Gen Ed?:', genEdubility)
print()
genEdubility = lookupGenEd(15322, "scs")
print("15322")
print('Is Gen Ed?:', genEdubility)
print()
'''
|
calvinhklui/Schedulize
|
GenEdLookup.py
|
Python
|
mit
| 931 | 0.015038 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 21:16:09 2017
@author: immersinn
"""
import regex as re
def billsummaryaref_matcher(tag):
return tag.name =='a' and hasattr(tag, 'text') and tag.text == 'View Available Bill Summaries'
def extract_links(soup):
"""Extract Bill Text Links from Bill Page"""
billtext_links = []
target_a = soup.find_all(billsummaryaref_matcher)
if len(target_a) == 1:
target_a = target_a[0]
content_table = target_a.parent.parent.parent
for row in content_table.find_all('tr')[2:]:
row_info = {}
arefs = row.find_all('td')[0].find_all('a')
for a in arefs:
if a.text == 'HTML':
row_info['html'] = a['href']
else:
row_info['label'] = a.text.encode('utf8').replace(b'\xc2\xa0', b' ').decode('utf8')
row_info['pdf'] = a['href']
billtext_links.append(row_info)
return billtext_links
def extract_meta(soup):
"""Extract the(select) a bout the Bill Info Page"""
chamber_re = re.compile(r"(?:(?<=Chamber=))(H|S)")
userid_re = re.compile(r"(?:(?<=UserID=))([0-9]+)")
meta = {}
for kw in ["Sponsors","Counties", "Statutes", "Keywords"]:
tr = soup.find('th', text=kw + ':').parent
content = tr.find('td')
if kw=='Sponsors':
spons = content.find_all('a')
spons_list = []
for a in spons:
hr = a['href']
spons_list.append({'userid' : userid_re.findall(hr)[0],
'chamber' : chamber_re.findall(hr)[0]})
meta[kw] = spons_list
elif kw in ['Counties', 'Keywords', 'Statutes']:
meta[kw] = content.text.split(', ')
else:
meta[kw] = content.text
if kw == 'Counties' and \
meta[kw][0].lower().strip() == 'no counties specifically cited':
meta[kw] = None
if kw == 'Statutes' and \
meta[kw][0].lower().strip() == 'no affected general statutes':
meta[kw] = None
return meta
|
immersinn/ncga
|
ncga/extract_billpage_content.py
|
Python
|
mit
| 2,219 | 0.00721 |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import inspect
import os
import sys
import traceback
from time import sleep
scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
maindir = os.path.abspath(os.path.join(scriptdir, '../../'))
sys.path.append(maindir)
transitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))
sys.path.append(transitionsdir)
from oscrypto import *
from encryptstates import *
from Common import *
from CommandExecutor import *
from DiskUtil import *
from transitions import *
class Ubuntu1604EncryptionStateMachine(OSEncryptionStateMachine):
states = [
State(name='uninitialized'),
State(name='prereq', on_enter='on_enter_state'),
State(name='stripdown', on_enter='on_enter_state'),
State(name='unmount_oldroot', on_enter='on_enter_state'),
State(name='split_root_partition', on_enter='on_enter_state'),
State(name='encrypt_block_device', on_enter='on_enter_state'),
State(name='patch_boot_system', on_enter='on_enter_state'),
State(name='completed'),
]
transitions = [
{
'trigger': 'skip_encryption',
'source': 'uninitialized',
'dest': 'completed'
},
{
'trigger': 'enter_prereq',
'source': 'uninitialized',
'dest': 'prereq'
},
{
'trigger': 'enter_stripdown',
'source': 'prereq',
'dest': 'stripdown',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_unmount_oldroot',
'source': 'stripdown',
'dest': 'unmount_oldroot',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'retry_unmount_oldroot',
'source': 'unmount_oldroot',
'dest': 'unmount_oldroot',
'before': 'on_enter_state'
},
{
'trigger': 'enter_split_root_partition',
'source': 'unmount_oldroot',
'dest': 'split_root_partition',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_encrypt_block_device',
'source': 'split_root_partition',
'dest': 'encrypt_block_device',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_patch_boot_system',
'source': 'encrypt_block_device',
'dest': 'patch_boot_system',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'stop_machine',
'source': 'patch_boot_system',
'dest': 'completed',
'conditions': 'should_exit_previous_state'
},
]
def on_enter_state(self):
super(Ubuntu1604EncryptionStateMachine, self).on_enter_state()
def should_exit_previous_state(self):
# when this is called, self.state is still the "source" state in the transition
return super(Ubuntu1604EncryptionStateMachine, self).should_exit_previous_state()
def __init__(self, hutil, distro_patcher, logger, encryption_environment):
super(Ubuntu1604EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)
self.state_objs = {
'prereq': PrereqState(self.context),
'stripdown': StripdownState(self.context),
'unmount_oldroot': UnmountOldrootState(self.context),
'split_root_partition': SplitRootPartitionState(self.context),
'encrypt_block_device': EncryptBlockDeviceState(self.context),
'patch_boot_system': PatchBootSystemState(self.context),
}
self.state_machine = Machine(model=self,
states=Ubuntu1604EncryptionStateMachine.states,
transitions=Ubuntu1604EncryptionStateMachine.transitions,
initial='uninitialized')
def start_encryption(self):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="mount",
raise_exception_on_failure=True,
communicator=proc_comm)
if '/dev/mapper/osencrypt' in proc_comm.stdout:
self.logger.log("OS volume is already encrypted")
self.skip_encryption()
self.log_machine_state()
return
self.log_machine_state()
self.enter_prereq()
self.log_machine_state()
self.enter_stripdown()
self.log_machine_state()
oldroot_unmounted_successfully = False
attempt = 1
while not oldroot_unmounted_successfully:
self.logger.log("Attempt #{0} to unmount /oldroot".format(attempt))
try:
if attempt == 1:
self.enter_unmount_oldroot()
elif attempt > 10:
raise Exception("Could not unmount /oldroot in 10 attempts")
else:
self.retry_unmount_oldroot()
self.log_machine_state()
except Exception as e:
message = "Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}".format(attempt,
e,
traceback.format_exc())
self.logger.log(msg=message)
self.hutil.do_status_report(operation='EnableEncryptionOSVolume',
status=CommonVariables.extension_error_status,
status_code=str(CommonVariables.unmount_oldroot_error),
message=message)
sleep(10)
if attempt > 10:
raise Exception(message)
else:
oldroot_unmounted_successfully = True
finally:
attempt += 1
self.enter_split_root_partition()
self.log_machine_state()
self.enter_encrypt_block_device()
self.log_machine_state()
self.enter_patch_boot_system()
self.log_machine_state()
self.stop_machine()
self.log_machine_state()
self._reboot()
|
Azure/azure-linux-extensions
|
VMEncryption/main/oscrypto/ubuntu_1604/Ubuntu1604EncryptionStateMachine.py
|
Python
|
apache-2.0
| 7,381 | 0.003252 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated:
removed_in: "2.5"
why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
alternative: Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
|
hryamzik/ansible
|
lib/ansible/modules/network/cumulus/_cl_ports.py
|
Python
|
gpl-3.0
| 2,580 | 0.00155 |
import numpy as np
def gauss(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
g=1/(2*np.pi*sigma**2)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return g
def gaussx(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
gx=(x-x0)/(2*np.pi*sigma**4)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return gx
def gaussy(win, sigma):
x = np.arange(0, win, 1, float)
y = x[:,np.newaxis]
x0 = y0 = win // 2
gy=(y-y0)/(2*np.pi*sigma**4)*np.exp((((x-x0)**2+(y-y0)**2))/2*sigma**2)
return gy
|
pranka02/image_processing_py
|
gaussian.py
|
Python
|
mit
| 652 | 0.019939 |
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from barf.core.reil import ReilParser
from barf.core.smt.smtsymbol import BitVec
from barf.core.smt.smtsymbol import Bool
from barf.core.smt.smtsolver import Z3Solver as SmtSolver
# from barf.core.smt.smtsolver import CVC4Solver as SmtSolver
class SmtSolverBitVecTests(unittest.TestCase):
def setUp(self):
self._address_size = 32
self._parser = ReilParser()
self._solver = SmtSolver()
# Arithmetic operations.
def test_add(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x + y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val + y_val == z_val)
def test_sub(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x - y == z)
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
# Add constraints to avoid trivial solutions.
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue((x_val - y_val) & 0xffffffff == z_val)
def test_mul(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x * y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue((x_val * y_val) & 0xffffffff == z_val)
def test_div(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x / y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val / y_val == z_val)
def test_mod(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x % y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val % y_val == z_val)
def test_neg(self):
x = BitVec(32, "x")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("z", z)
self._solver.add(-x == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
z_val = self._solver.get_value(z)
self.assertTrue(-x_val & 0xffffffff == z_val)
# Bitwise operations.
def test_and(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x & y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val & y_val == z_val)
def test_xor(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x ^ y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val ^ y_val == z_val)
def test_or(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x | y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val | y_val == z_val)
def test_lshift(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x << y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue((x_val << y_val) & 0xffffffff == z_val)
def test_rshift(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(x >> y == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self._solver.add(x != y)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(x_val >> y_val == z_val)
def test_invert(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.declare_fun("z", z)
self._solver.add(~x == z)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
z_val = self._solver.get_value(z)
self.assertTrue(~x_val & 0xffffffff == z_val)
# Comparison operators (signed)
def test_lt(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x < y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val < y_val)
def test_le(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x <= y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val <= y_val)
def test_eq(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x == y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val == y_val)
def test_neq(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x != y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val != y_val)
def test_gt(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x > y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val > y_val)
def test_ge(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
self._solver.declare_fun("x", x)
self._solver.declare_fun("y", y)
self._solver.add(x >= y)
# Add constraints to avoid trivial solutions.
self._solver.add(x > 1)
self._solver.add(y > 1)
self.assertEqual(self._solver.check(), "sat")
x_val = self._solver.get_value(x)
y_val = self._solver.get_value(y)
self.assertTrue(x_val >= y_val)
# Comparison operators (unsigned)
def test_ult(self):
# TODO Implement.
pass
def test_ule(self):
# TODO Implement.
pass
def test_ugt(self):
# TODO Implement.
pass
def test_uge(self):
# TODO Implement.
pass
# Arithmetic operators (unsigned)
def test_udiv(self):
# TODO Implement.
pass
def test_urem(self):
# TODO Implement.
pass
def main():
unittest.main()
if __name__ == '__main__':
main()
|
cnheitman/barf-project
|
tests/core/smt/test_smtsolver.py
|
Python
|
bsd-2-clause
| 13,055 | 0.000153 |
#!/usr/bin/env python
###############################################################################
# $Id: sgi.py 31335 2015-11-04 00:17:39Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: PNM (Portable Anyware Map) Testing.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Read existing simple 1 band SGI file.
def sgi_1():
tst = gdaltest.GDALTest( 'SGI', 'byte.sgi', 1, 4672 )
return tst.testOpen()
###############################################################################
# Write Test grayscale
def sgi_2():
tst = gdaltest.GDALTest( 'SGI', 'byte.tif', 1, 4672 )
return tst.testCreate()
###############################################################################
# Write Test rgb
def sgi_3():
tst = gdaltest.GDALTest( 'SGI', 'rgbsmall.tif', 2, 21053 )
return tst.testCreate()
gdaltest_list = [
sgi_1,
sgi_2,
sgi_3,
]
if __name__ == '__main__':
gdaltest.setup_run( 'SGI' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/gdrivers/sgi.py
|
Python
|
gpl-2.0
| 2,443 | 0.006959 |
import json
import os
import avasdk
from zipfile import ZipFile, BadZipFile
from avasdk.plugins.manifest import validate_manifest
from avasdk.plugins.hasher import hash_plugin
from django import forms
from django.core.validators import ValidationError
from .validators import ZipArchiveValidator
class PluginArchiveField(forms.FileField):
default_validators = [ZipArchiveValidator()]
label = 'Plugin .zip'
def get_prefix(self, archive):
files = archive.namelist()
return os.path.commonpath(files)
def get_manifest(self, archive):
try:
with ZipFile(archive.temporary_file_path()) as plugin:
prefix = self.get_prefix(plugin)
prefix = prefix + '/' if len(prefix) else ''
with plugin.open('{}manifest.json'.format(prefix)) as myfile:
manifest = json.loads(myfile.read())
validate_manifest(manifest)
return manifest
except BadZipFile:
raise ValidationError('Bad .zip format')
except FileNotFoundError:
raise ValidationError('Error with upload, please try again')
except KeyError:
raise ValidationError('No manifest.json found in archive')
except json.JSONDecodeError:
raise ValidationError('Error with manifest.json, bad Json Format')
except avasdk.exceptions.ValidationError as e:
raise ValidationError('Error in manifest.json ({})'.format(e))
def get_readme(self, archive):
try:
with ZipFile(archive.temporary_file_path()) as plugin:
prefix = self.get_prefix(plugin)
prefix = prefix + '/' if len(prefix) else ''
with plugin.open('{}/README.md'.format(prefix)) as myfile:
readme = myfile.read()
return readme
except FileNotFoundError:
raise ValidationError('Error with upload, please try again')
except KeyError:
return None
def clean(self, data, initial=None):
f = super().clean(data, initial)
manifest = self.get_manifest(f)
readme = self.get_readme(f)
return {
'zipfile': f,
'manifest': manifest,
'readme': readme,
'checksum': hash_plugin(f.temporary_file_path()),
}
class UploadPluginForm(forms.Form):
archive = PluginArchiveField()
|
ava-project/ava-website
|
website/apps/plugins/forms.py
|
Python
|
mit
| 2,440 | 0 |
#author: Tobias Andermann, tobias.andermann@bioenv.gu.se
import os
import sys
import re
import glob
import shutil
import argparse
from Bio import SeqIO
from .utils import CompletePath
# Get arguments
def get_args():
parser = argparse.ArgumentParser(
description="Set the maximum fraction of missing data that you want to allow in an alignment and drop all sequences above this threshold.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--alignment',
required=True,
action=CompletePath,
default=None,
help='The alignment in fasta format.'
)
parser.add_argument(
'--maximum_missing',
type=float,
default=0.8,
help='Define the maximal fraction of missing data that you want to allow. All sequences below this threshold will be exported into a new alignment.'
)
parser.add_argument(
'--output',
required=True,
action=CompletePath,
default=None,
help='The output directory where results will be safed.'
)
return parser.parse_args()
args = get_args()
# Set working directory
out_dir = args.output
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Get other input variables
alignment = args.alignment
max_mis = args.maximum_missing
def manage_homzygous_samples(fasta,threshold,output):
fasta_alignment = SeqIO.parse(open(fasta),'fasta')
with open('%s/cleaned_alignment_all_sequences_less_than_%f_missing_data.fasta' %(output,threshold), 'w') as outfile:
final_seqs = {}
for sample in fasta_alignment:
header = sample.description
sequence = sample.seq
chars = list(sequence)
bad_chars = []
for char in chars:
if char not in ['A','C','T','G','a','c','t','g']:
bad_chars.append(char)
sequence_length = float(len(chars))
count_bad_chars = float(len(bad_chars))
fraction = float(count_bad_chars/sequence_length)
if fraction <= threshold:
final_seqs.setdefault(header,[]).append(sequence)
else:
print("Dropped sequence for", header)
for seqname, seq in final_seqs.items():
sequence = str(seq[0])
outfile.write(">"+seqname+"\n")
outfile.write(sequence+"\n")
outfile.close
manage_homzygous_samples(alignment,max_mis,out_dir)
|
AntonelliLab/seqcap_processor
|
secapr/remove_uninformative_seqs.py
|
Python
|
mit
| 2,167 | 0.032764 |
# Copyright (C)2016 D. Plaindoux.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option) any
# later version.
import unittest
import path_parse_test
import path_match_test
import provider_test
import verb_test
import mime_test
import inspection_test
import wsgi_test
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(path_parse_test.suite())
suite.addTest(path_match_test.suite())
suite.addTest(verb_test.suite())
suite.addTest(mime_test.suite())
suite.addTest(provider_test.suite())
suite.addTest(inspection_test.suite())
suite.addTest(wsgi_test.suite())
unittest.TextTestRunner(verbosity=2).run(suite)
|
d-plaindoux/fluent-rest
|
tests/runall.py
|
Python
|
lgpl-2.1
| 828 | 0 |
from django.apps import AppConfig
class QsiteConfig(AppConfig):
name = 'qsite'
verbose_name = '站点管理'
|
gzqichang/wa
|
qsite/qsite/apps.py
|
Python
|
mit
| 119 | 0.009009 |
import json
from httpretty import HTTPretty
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class YahooOAuth1Test(OAuth1Test):
backend_path = 'social.backends.yahoo.YahooOAuth'
user_data_url = 'https://social.yahooapis.com/v1/user/a-guid/profile?' \
'format=json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
guid_body = json.dumps({
'guid': {
'uri': 'https://social.yahooapis.com/v1/me/guid',
'value': 'a-guid'
}
})
user_data_body = json.dumps({
'profile': {
'bdRestricted': True,
'memberSince': '2007-12-11T14:40:30Z',
'image': {
'width': 192,
'imageUrl': 'http://l.yimg.com/dh/ap/social/profile/'
'profile_b192.png',
'size': '192x192',
'height': 192
},
'created': '2013-03-18T04:15:08Z',
'uri': 'https://social.yahooapis.com/v1/user/a-guid/profile',
'isConnected': False,
'profileUrl': 'http://profile.yahoo.com/a-guid',
'guid': 'a-guid',
'nickname': 'foobar'
}
})
def test_login(self):
HTTPretty.register_uri(
HTTPretty.GET,
'https://social.yahooapis.com/v1/me/guid?format=json',
status=200,
body=self.guid_body
)
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
GDGLima/contentbox
|
third_party/social/tests/backends/test_yahoo.py
|
Python
|
apache-2.0
| 1,798 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
import unittest2
from robottelo.ui.location import Location
from robottelo.ui.locators import common_locators
from robottelo.ui.locators import locators
if six.PY2:
import mock
else:
from unittest import mock
class LocationTestCase(unittest2.TestCase):
def test_creation_without_parent_and_without_unassigned_host(self):
location = Location(None)
location.click = mock.Mock()
location.assign_value = mock.Mock()
location.wait_until_element = mock.Mock(return_value=None)
location._configure_location = mock.Mock()
location.select = mock.Mock()
location.create('foo')
click_calls = [
mock.call(locators['location.new']),
mock.call(common_locators['submit']),
mock.call(common_locators['submit'])
]
self.assertEqual(3, location.click.call_count)
location.click.assert_has_calls(click_calls, any_order=False)
location.assign_value.assert_called_once_with(
locators['location.name'], 'foo')
# not called if parent is None
location.select.assert_not_called()
location._configure_location.assert_called_once_with(
capsules=None, all_capsules=None, domains=None, envs=None,
hostgroups=None, medias=None, organizations=None, ptables=None,
resources=None, select=True, subnets=None, templates=None,
users=None, params=None
)
def test_creation_with_parent_and_unassigned_host(self):
location = Location(None)
location.click = mock.Mock()
location.assign_value = mock.Mock()
location.wait_until_element = mock.Mock()
location._configure_location = mock.Mock()
location.select = mock.Mock()
configure_arguments = {
arg: arg for arg in
'capsules all_capsules domains hostgroups medias organizations '
'envs ptables resources select subnets templates users params '
'select'.split()
}
location.create('foo', 'parent', **configure_arguments)
click_calls = [
mock.call(locators['location.new']),
mock.call(common_locators['submit']),
mock.call(locators['location.proceed_to_edit']),
mock.call(common_locators['submit'])
]
self.assertEqual(4, location.click.call_count)
location.click.assert_has_calls(click_calls, any_order=False)
location.assign_value.assert_called_once_with(
locators['location.name'], 'foo')
# called only if parent is not None
location.select.assert_called_once_with(
locators['location.parent'], 'parent'
)
location._configure_location.assert_called_once_with(
**configure_arguments)
|
sghai/robottelo
|
tests/robottelo/ui/test_location.py
|
Python
|
gpl-3.0
| 2,887 | 0 |
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from glob import glob
import json
from pathlib import Path
import sys
"""Convert existing intent tests to behave tests."""
TEMPLATE = """
Scenario: {scenario}
Given an english speaking user
When the user says "{utterance}"
Then "{skill}" should reply with dialog from "{dialog_file}.dialog"
"""
def json_files(path):
"""Generator function returning paths of all json files in a folder."""
for json_file in sorted(glob(str(Path(path, '*.json')))):
yield Path(json_file)
def generate_feature(skill, skill_path):
"""Generate a feature file provided a skill name and a path to the skill.
"""
test_path = Path(skill_path, 'test', 'intent')
case = []
if test_path.exists() and test_path.is_dir():
for json_file in json_files(test_path):
with open(str(json_file)) as test_file:
test = json.load(test_file)
if 'utterance' and 'expected_dialog' in test:
utt = test['utterance']
dialog = test['expected_dialog']
# Simple handling of multiple accepted dialogfiles
if isinstance(dialog, list):
dialog = dialog[0]
case.append((json_file.name, utt, dialog))
output = ''
if case:
output += 'Feature: {}\n'.format(skill)
for c in case:
output += TEMPLATE.format(skill=skill, scenario=c[0],
utterance=c[1], dialog_file=c[2])
return output
if __name__ == '__main__':
print(generate_feature(*sys.argv[1:]))
|
forslund/mycroft-core
|
test/integrationtests/voight_kampff/generate_feature.py
|
Python
|
apache-2.0
| 2,177 | 0 |
from django import template
import datetime
register = template.Library()
# https://stackoverflow.com/a/8907269/2226755
def strfdelta(tdelta, fmt):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
# TODO add unit test
@register.filter("seconds_to_duration")
def seconds_to_duration(value):
"""
Display a human-readable reading-time (or any other duration)
from a duration in seconds.
"""
if value <= 0:
return ""
duration = datetime.timedelta(seconds=value)
if datetime.timedelta(hours=1) > duration:
return strfdelta(duration, "{minutes}m{seconds}s")
else:
return strfdelta(duration, "{hours}h{minutes}m{seconds}s")
|
ChantyTaguan/zds-site
|
zds/utils/templatetags/seconds_to_duration.py
|
Python
|
gpl-3.0
| 787 | 0 |
import olymap.skill
def test_learn_time():
tests = (
({}, None),
({'SK': {'tl': ['14']}}, '14'),
({'SK': {'an': ['0']}}, None),
({'IT': {'tl': ['1']}}, None),
({'SK': {'an': ['1']}}, None),
)
for box, answer in tests:
assert olymap.skill.get_learn_time(box) == answer
def test_get_required_skill():
tests = (
({}, None),
({'SK': {'rs': ['632']}}, {'id': '632', 'oid': '632', 'name': 'Determine inventory of character'}),
({'SK': {'rs': ['630']}}, {'id': '630', 'oid': '630', 'name': 'Stealth'}),
({'SK': {'re': ['632']}}, None),
({'SL': {'rs': ['632']}}, None),
)
data = {'630': {'firstline': ['630 skill 0'], 'na': ['Stealth'], 'SK': {'tl': ['28'], 'of': ['631', '632', '633', '634', '635'], 're': ['636', '637', '638', '639']}},
'632': {'firstline': ['632 skill 0'], 'na': ['Determine inventory of character'], 'SK': {'tl': ['14'], 'rs': ['630']}}}
for box, answer in tests:
assert olymap.skill.get_required_skill(box, data) == answer
|
olympiag3/olypy
|
tests/unit/test_olymap_skill.py
|
Python
|
apache-2.0
| 1,098 | 0.005464 |
from Model import *
import MemoryDecay
# Note we cannot import TwoConcepts here because that ends up modifying the grammar, ruining it for example loaders
|
joshrule/LOTlib
|
LOTlib/Examples/RationalRules/__init__.py
|
Python
|
gpl-3.0
| 157 | 0.006369 |
# -*- coding: utf-8 -*-
import sys
import os.path
import subprocess
import json
import string
import tempfile
import shutil
import threading
import exceptions
import errno
from collections import defaultdict
from xml.etree import ElementTree
import nixops.statefile
import nixops.backends
import nixops.logger
import nixops.parallel
from nixops.nix_expr import RawValue, Function, Call, nixmerge, py2nix
import re
from datetime import datetime, timedelta
import getpass
import traceback
import glob
import fcntl
import itertools
import platform
from nixops.util import ansi_success
import inspect
import time
class NixEvalError(Exception):
pass
class UnknownBackend(Exception):
pass
debug = False
class Deployment(object):
"""NixOps top-level deployment manager."""
default_description = "Unnamed NixOps network"
name = nixops.util.attr_property("name", None)
nix_exprs = nixops.util.attr_property("nixExprs", [], 'json')
nix_path = nixops.util.attr_property("nixPath", [], 'json')
args = nixops.util.attr_property("args", {}, 'json')
description = nixops.util.attr_property("description", default_description)
configs_path = nixops.util.attr_property("configsPath", None)
rollback_enabled = nixops.util.attr_property("rollbackEnabled", False)
datadog_notify = nixops.util.attr_property("datadogNotify", False, bool)
datadog_event_info = nixops.util.attr_property("datadogEventInfo", "")
datadog_tags = nixops.util.attr_property("datadogTags", [], 'json')
# internal variable to mark if network attribute of network has been evaluated (separately)
network_attr_eval = False
def __init__(self, statefile, uuid, log_file=sys.stderr):
self._statefile = statefile
self._db = statefile._db
self.uuid = uuid
self._last_log_prefix = None
self.extra_nix_path = []
self.extra_nix_flags = []
self.extra_nix_eval_flags = []
self.nixos_version_suffix = None
self._tempdir = None
self.logger = nixops.logger.Logger(log_file)
self._lock_file_path = None
self.expr_path = os.path.realpath(os.path.dirname(__file__) + "/../../../../share/nix/nixops")
if not os.path.exists(self.expr_path):
self.expr_path = os.path.realpath(os.path.dirname(__file__) + "/../../../../../share/nix/nixops")
if not os.path.exists(self.expr_path):
self.expr_path = os.path.dirname(__file__) + "/../nix"
self.resources = {}
with self._db:
c = self._db.cursor()
c.execute("select id, name, type from Resources where deployment = ?", (self.uuid,))
for (id, name, type) in c.fetchall():
r = _create_state(self, type, name, id)
self.resources[name] = r
self.logger.update_log_prefixes()
self.definitions = None
@property
def tempdir(self):
if not self._tempdir:
self._tempdir = nixops.util.SelfDeletingDir(tempfile.mkdtemp(prefix="nixops-tmp"))
return self._tempdir
@property
def machines(self):
return {n: r for n, r in self.resources.items() if is_machine(r)}
@property
def active(self): # FIXME: rename to "active_machines"
return {n: r for n, r in self.resources.items() if is_machine(r) and not r.obsolete}
@property
def active_resources(self):
return {n: r for n, r in self.resources.items() if not r.obsolete}
def get_typed_resource(self, name, type):
res = self.active_resources.get(name, None)
if not res:
raise Exception("resource ‘{0}’ does not exist".format(name))
if res.get_type() != type:
raise Exception("resource ‘{0}’ is not of type ‘{1}’".format(name, type))
return res
def get_machine(self, name):
res = self.active_resources.get(name, None)
if not res:
raise Exception("machine ‘{0}’ does not exist".format(name))
if not is_machine(res):
raise Exception("resource ‘{0}’ is not a machine".format(name))
return res
def _set_attrs(self, attrs):
"""Update deployment attributes in the state file."""
with self._db:
c = self._db.cursor()
for n, v in attrs.iteritems():
if v == None:
c.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, n))
else:
c.execute("insert or replace into DeploymentAttrs(deployment, name, value) values (?, ?, ?)",
(self.uuid, n, v))
def _set_attr(self, name, value):
"""Update one deployment attribute in the state file."""
self._set_attrs({name: value})
def _del_attr(self, name):
"""Delete a deployment attribute from the state file."""
with self._db:
self._db.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name))
def _get_attr(self, name, default=nixops.util.undefined):
"""Get a deployment attribute from the state file."""
with self._db:
c = self._db.cursor()
c.execute("select value from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name))
row = c.fetchone()
if row != None: return row[0]
return nixops.util.undefined
def _create_resource(self, name, type):
c = self._db.cursor()
c.execute("select 1 from Resources where deployment = ? and name = ?", (self.uuid, name))
if len(c.fetchall()) != 0:
raise Exception("resource already exists in database!")
c.execute("insert into Resources(deployment, name, type) values (?, ?, ?)",
(self.uuid, name, type))
id = c.lastrowid
r = _create_state(self, type, name, id)
self.resources[name] = r
return r
def export(self):
with self._db:
c = self._db.cursor()
c.execute("select name, value from DeploymentAttrs where deployment = ?", (self.uuid,))
rows = c.fetchall()
res = {row[0]: row[1] for row in rows}
res['resources'] = {r.name: r.export() for r in self.resources.itervalues()}
return res
def import_(self, attrs):
with self._db:
for k, v in attrs.iteritems():
if k == 'resources': continue
self._set_attr(k, v)
for k, v in attrs['resources'].iteritems():
if 'type' not in v: raise Exception("imported resource lacks a type")
r = self._create_resource(k, v['type'])
r.import_(v)
def clone(self):
with self._db:
new = self._statefile.create_deployment()
self._db.execute("insert into DeploymentAttrs (deployment, name, value) " +
"select ?, name, value from DeploymentAttrs where deployment = ?",
(new.uuid, self.uuid))
new.configs_path = None
return new
def _get_deployment_lock(self):
if self._lock_file_path is None:
lock_dir = os.environ.get("HOME", "") + "/.nixops/locks"
if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700)
self._lock_file_path = lock_dir + "/" + self.uuid
class DeploymentLock(object):
def __init__(self, depl):
self._lock_file_path = depl._lock_file_path
self._logger = depl.logger
self._lock_file = None
def __enter__(self):
self._lock_file = open(self._lock_file_path, "w")
fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
try:
fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
self._logger.log(
"waiting for exclusive deployment lock..."
)
fcntl.flock(self._lock_file, fcntl.LOCK_EX)
def __exit__(self, exception_type, exception_value, exception_traceback):
self._lock_file.close()
return DeploymentLock(self)
def delete_resource(self, m):
del self.resources[m.name]
with self._db:
self._db.execute("delete from Resources where deployment = ? and id = ?", (self.uuid, m.id))
def delete(self, force=False):
"""Delete this deployment from the state file."""
with self._db:
if not force and len(self.resources) > 0:
raise Exception("cannot delete this deployment because it still has resources")
# Delete the profile, if any.
profile = self.get_profile()
assert profile
for p in glob.glob(profile + "*"):
if os.path.islink(p): os.remove(p)
# Delete the deployment from the database.
self._db.execute("delete from Deployments where uuid = ?", (self.uuid,))
def _nix_path_flags(self):
flags = list(itertools.chain(*[["-I", x] for x in (self.extra_nix_path + self.nix_path)])) + self.extra_nix_flags
flags.extend(["-I", "nixops=" + self.expr_path])
return flags
def _eval_flags(self, exprs):
flags = self._nix_path_flags()
args = {key: RawValue(val) for key, val in self.args.iteritems()}
exprs_ = [RawValue(x) if x[0] == '<' else x for x in exprs]
flags.extend(
["--arg", "networkExprs", py2nix(exprs_, inline=True),
"--arg", "args", py2nix(args, inline=True),
"--argstr", "uuid", self.uuid,
"--argstr", "deploymentName", self.name if self.name else "",
"<nixops/eval-machine-info.nix>"])
return flags
def set_arg(self, name, value):
"""Set a persistent argument to the deployment specification."""
assert isinstance(name, basestring)
assert isinstance(value, basestring)
args = self.args
args[name] = value
self.args = args
def set_argstr(self, name, value):
"""Set a persistent argument to the deployment specification."""
assert isinstance(value, basestring)
self.set_arg(name, py2nix(value, inline=True))
def unset_arg(self, name):
"""Unset a persistent argument to the deployment specification."""
assert isinstance(name, str)
args = self.args
args.pop(name, None)
self.args = args
def evaluate_args(self):
"""Evaluate the NixOps network expression's arguments."""
try:
out = subprocess.check_output(
["nix-instantiate"]
+ self.extra_nix_eval_flags
+ self._eval_flags(self.nix_exprs) +
["--eval-only", "--json", "--strict",
"-A", "nixopsArguments"], stderr=self.logger.log_file)
if debug: print >> sys.stderr, "JSON output of nix-instantiate:\n" + xml
return json.loads(out)
except OSError as e:
raise Exception("unable to run ‘nix-instantiate’: {0}".format(e))
except subprocess.CalledProcessError:
raise NixEvalError
def evaluate_config(self, attr):
try:
# FIXME: use --json
xml = subprocess.check_output(
["nix-instantiate"]
+ self.extra_nix_eval_flags
+ self._eval_flags(self.nix_exprs) +
["--eval-only", "--xml", "--strict",
"--arg", "checkConfigurationOptions", "false",
"-A", attr], stderr=self.logger.log_file)
if debug: print >> sys.stderr, "XML output of nix-instantiate:\n" + xml
except OSError as e:
raise Exception("unable to run ‘nix-instantiate’: {0}".format(e))
except subprocess.CalledProcessError:
raise NixEvalError
tree = ElementTree.fromstring(xml)
# Convert the XML to a more Pythonic representation. This is
# in fact the same as what json.loads() on the output of
# "nix-instantiate --json" would yield.
config = nixops.util.xml_expr_to_python(tree.find("*"))
return (tree, config)
def evaluate_network(self):
if not self.network_attr_eval:
# Extract global deployment attributes.
(_, config) = self.evaluate_config("info.network")
self.description = config.get("description", self.default_description)
self.rollback_enabled = config.get("enableRollback", False)
self.datadog_notify = config.get("datadogNotify", False)
self.datadog_event_info = config.get("datadogEventInfo", "")
self.datadog_tags = config.get("datadogTags", [])
self.datadog_downtime = config.get("datadogDowntime", False)
self.datadog_downtime_seconds = config.get("datadogDowntimeSeconds", 3600)
self.network_attr_eval = True
def evaluate(self):
"""Evaluate the Nix expressions belonging to this deployment into a deployment specification."""
self.definitions = {}
self.evaluate_network()
(tree, config) = self.evaluate_config("info")
# Extract machine information.
for x in tree.findall("attrs/attr[@name='machines']/attrs/attr"):
name = x.get("name")
cfg = config["machines"][name]
defn = _create_definition(x, cfg, cfg["targetEnv"])
self.definitions[name] = defn
# Extract info about other kinds of resources.
for x in tree.findall("attrs/attr[@name='resources']/attrs/attr"):
res_type = x.get("name")
for y in x.findall("attrs/attr"):
name = y.get("name")
defn = _create_definition(y, config["resources"][res_type][name], res_type)
self.definitions[name] = defn
def evaluate_option_value(self, machine_name, option_name, xml=False, include_physical=False):
"""Evaluate a single option of a single machine in the deployment specification."""
exprs = self.nix_exprs
if include_physical:
phys_expr = self.tempdir + "/physical.nix"
with open(phys_expr, 'w') as f:
f.write(self.get_physical_spec())
exprs.append(phys_expr)
try:
return subprocess.check_output(
["nix-instantiate"]
+ self.extra_nix_eval_flags
+ self._eval_flags(exprs) +
["--eval-only", "--strict",
"--arg", "checkConfigurationOptions", "false",
"-A", "nodes.{0}.config.{1}".format(machine_name, option_name)]
+ (["--xml"] if xml else []),
stderr=self.logger.log_file)
except subprocess.CalledProcessError:
raise NixEvalError
def get_arguments(self):
try:
return self.evaluate_args()
except Exception as e:
raise Exception("Could not determine arguments to NixOps deployment.")
def get_physical_spec(self):
"""Compute the contents of the Nix expression specifying the computed physical deployment attributes"""
active_machines = self.active
active_resources = self.active_resources
attrs_per_resource = {m.name: [] for m in active_resources.itervalues()}
authorized_keys = {m.name: [] for m in active_machines.itervalues()}
kernel_modules = {m.name: set() for m in active_machines.itervalues()}
trusted_interfaces = {m.name: set() for m in active_machines.itervalues()}
# Hostnames should be accumulated like this:
#
# hosts[local_name][remote_ip] = [name1, name2, ...]
#
# This makes hosts deterministic and is more in accordance to the
# format in hosts(5), which is like this:
#
# ip_address canonical_hostname [aliases...]
#
# This is critical for example when using host names for access
# control, because the canonical_hostname is returned in reverse
# lookups.
hosts = defaultdict(lambda: defaultdict(list))
def index_to_private_ip(index):
n = 105 + index / 256
assert n <= 255
return "192.168.{0}.{1}".format(n, index % 256)
def do_machine(m):
defn = self.definitions[m.name]
attrs_list = attrs_per_resource[m.name]
# Emit configuration to realise encrypted peer-to-peer links.
for m2 in active_resources.itervalues():
ip = m.address_to(m2)
if ip:
hosts[m.name][ip] += [m2.name, m2.name + "-unencrypted"]
# Always use the encrypted/unencrypted suffixes for aliases rather
# than for the canonical name!
hosts[m.name]["127.0.0.1"].append(m.name + "-encrypted")
for m2_name in defn.encrypted_links_to:
if m2_name not in active_machines:
raise Exception("‘deployment.encryptedLinksTo’ in machine ‘{0}’ refers to an unknown machine ‘{1}’"
.format(m.name, m2_name))
m2 = active_machines[m2_name]
# Don't create two tunnels between a pair of machines.
if m.name in self.definitions[m2.name].encrypted_links_to and m.name >= m2.name:
continue
local_ipv4 = index_to_private_ip(m.index)
remote_ipv4 = index_to_private_ip(m2.index)
local_tunnel = 10000 + m2.index
remote_tunnel = 10000 + m.index
attrs_list.append({
('networking', 'p2pTunnels', 'ssh', m2.name): {
'target': '{0}-unencrypted'.format(m2.name),
'targetPort': m2.ssh_port,
'localTunnel': local_tunnel,
'remoteTunnel': remote_tunnel,
'localIPv4': local_ipv4,
'remoteIPv4': remote_ipv4,
'privateKey': '/root/.ssh/id_charon_vpn',
}
})
# FIXME: set up the authorized_key file such that ‘m’
# can do nothing more than create a tunnel.
authorized_keys[m2.name].append(m.public_vpn_key)
kernel_modules[m.name].add('tun')
kernel_modules[m2.name].add('tun')
hosts[m.name][remote_ipv4] += [m2.name, m2.name + "-encrypted"]
hosts[m2.name][local_ipv4] += [m.name, m.name + "-encrypted"]
trusted_interfaces[m.name].add('tun' + str(local_tunnel))
trusted_interfaces[m2.name].add('tun' + str(remote_tunnel))
private_ipv4 = m.private_ipv4
if private_ipv4:
attrs_list.append({
('networking', 'privateIPv4'): private_ipv4
})
public_ipv4 = m.public_ipv4
if public_ipv4:
attrs_list.append({
('networking', 'publicIPv4'): public_ipv4
})
public_vpn_key = m.public_vpn_key
if public_vpn_key:
attrs_list.append({
('networking', 'vpnPublicKey'): public_vpn_key
})
# Set system.stateVersion if the Nixpkgs version supports it.
if nixops.util.parse_nixos_version(defn.config["nixosRelease"]) >= ["15", "09"]:
attrs_list.append({
('system', 'stateVersion'): Call(RawValue("lib.mkDefault"), m.state_version or defn.config["nixosRelease"])
})
if self.nixos_version_suffix:
attrs_list.append({
('system', 'nixosVersionSuffix'): self.nixos_version_suffix
})
for m in active_machines.itervalues():
do_machine(m)
def emit_resource(r):
config = []
config.extend(attrs_per_resource[r.name])
if is_machine(r):
# Sort the hosts by its canonical host names.
sorted_hosts = sorted(hosts[r.name].iteritems(),
key=lambda item: item[1][0])
# Just to remember the format:
# ip_address canonical_hostname [aliases...]
extra_hosts = ["{0} {1}".format(ip, ' '.join(names))
for ip, names in sorted_hosts]
if authorized_keys[r.name]:
config.append({
('users', 'extraUsers', 'root'): {
('openssh', 'authorizedKeys', 'keys'): authorized_keys[r.name]
},
('services', 'openssh'): {
'extraConfig': "PermitTunnel yes\n"
},
})
config.append({
('boot', 'kernelModules'): list(kernel_modules[r.name]),
('networking', 'firewall'): {
'trustedInterfaces': list(trusted_interfaces[r.name])
},
('networking', 'extraHosts'): '\n'.join(extra_hosts) + "\n"
})
# Add SSH public host keys for all machines in network.
for m2 in active_machines.itervalues():
if hasattr(m2, 'public_host_key') and m2.public_host_key:
# Using references to files in same tempdir for now, until NixOS has support
# for adding the keys directly as string. This way at least it is compatible
# with older versions of NixOS as well.
# TODO: after reasonable amount of time replace with string option
config.append({
('services', 'openssh', 'knownHosts', m2.name): {
'hostNames': [m2.name + "-unencrypted",
m2.name + "-encrypted",
m2.name],
'publicKey': m2.public_host_key,
}
})
merged = reduce(nixmerge, config) if len(config) > 0 else {}
physical = r.get_physical_spec()
if len(merged) == 0 and len(physical) == 0:
return {}
else:
return r.prefix_definition({
r.name: Function("{ config, lib, pkgs, ... }", {
'config': merged,
'imports': [physical],
})
})
return py2nix(reduce(nixmerge, [
emit_resource(r) for r in active_resources.itervalues()
], {})) + "\n"
def get_profile(self):
profile_dir = "/nix/var/nix/profiles/per-user/" + getpass.getuser()
if os.path.exists(profile_dir + "/charon") and not os.path.exists(profile_dir + "/nixops"):
os.rename(profile_dir + "/charon", profile_dir + "/nixops")
return profile_dir + "/nixops/" + self.uuid
def create_profile(self):
profile = self.get_profile()
dir = os.path.dirname(profile)
if not os.path.exists(dir): os.makedirs(dir, 0755)
return profile
def build_configs(self, include, exclude, dry_run=False, repair=False):
"""Build the machine configurations in the Nix store."""
self.logger.log("building all machine configurations...")
# Set the NixOS version suffix, if we're building from Git.
# That way ‘nixos-version’ will show something useful on the
# target machines.
nixos_path = subprocess.check_output(
["nix-instantiate", "--find-file", "nixpkgs/nixos"] + self._nix_path_flags()).rstrip()
get_version_script = nixos_path + "/modules/installer/tools/get-version-suffix"
if os.path.exists(nixos_path + "/.git") and os.path.exists(get_version_script):
self.nixos_version_suffix = subprocess.check_output(["/bin/sh", get_version_script] + self._nix_path_flags()).rstrip()
phys_expr = self.tempdir + "/physical.nix"
p = self.get_physical_spec()
nixops.util.write_file(phys_expr, p)
if debug: print >> sys.stderr, "generated physical spec:\n" + p
selected = [m for m in self.active.itervalues() if should_do(m, include, exclude)]
names = map(lambda m: m.name, selected)
# If we're not running on Linux, then perform the build on the
# target machines. FIXME: Also enable this if we're on 32-bit
# and want to deploy to 64-bit.
if platform.system() != 'Linux' and os.environ.get('NIX_REMOTE') != 'daemon':
if os.environ.get('NIX_REMOTE_SYSTEMS') == None:
remote_machines = []
for m in sorted(selected, key=lambda m: m.index):
key_file = m.get_ssh_private_key_file()
if not key_file: raise Exception("do not know private SSH key for machine ‘{0}’".format(m.name))
# FIXME: Figure out the correct machine type of ‘m’ (it might not be x86_64-linux).
remote_machines.append("root@{0} {1} {2} 2 1\n".format(m.get_ssh_name(), 'i686-linux,x86_64-linux', key_file))
# Use only a single machine for now (issue #103).
break
remote_machines_file = "{0}/nix.machines".format(self.tempdir)
with open(remote_machines_file, "w") as f:
f.write("".join(remote_machines))
os.environ['NIX_REMOTE_SYSTEMS'] = remote_machines_file
else:
self.logger.log("using predefined remote systems file: {0}".format(os.environ['NIX_REMOTE_SYSTEMS']))
# FIXME: Use ‘--option use-build-hook true’ instead of setting
# $NIX_BUILD_HOOK, once Nix supports that.
os.environ['NIX_BUILD_HOOK'] = os.path.dirname(os.path.realpath(nixops.util.which("nix-build"))) + "/../libexec/nix/build-remote.pl"
load_dir = "{0}/current-load".format(self.tempdir)
if not os.path.exists(load_dir): os.makedirs(load_dir, 0700)
os.environ['NIX_CURRENT_LOAD'] = load_dir
try:
configs_path = subprocess.check_output(
["nix-build"]
+ self._eval_flags(self.nix_exprs + [phys_expr]) +
["--arg", "names", py2nix(names, inline=True),
"-A", "machines", "-o", self.tempdir + "/configs"]
+ (["--dry-run"] if dry_run else [])
+ (["--repair"] if repair else []),
stderr=self.logger.log_file).rstrip()
except subprocess.CalledProcessError:
raise Exception("unable to build all machine configurations")
if self.rollback_enabled and not dry_run:
profile = self.create_profile()
if subprocess.call(["nix-env", "-p", profile, "--set", configs_path]) != 0:
raise Exception("cannot update profile ‘{0}’".format(profile))
return configs_path
def copy_closures(self, configs_path, include, exclude, max_concurrent_copy):
"""Copy the closure of each machine configuration to the corresponding machine."""
def worker(m):
if not should_do(m, include, exclude): return
m.logger.log("copying closure...")
m.new_toplevel = os.path.realpath(configs_path + "/" + m.name)
if not os.path.exists(m.new_toplevel):
raise Exception("can't find closure of machine ‘{0}’".format(m.name))
m.copy_closure_to(m.new_toplevel)
nixops.parallel.run_tasks(
nr_workers=max_concurrent_copy,
tasks=self.active.itervalues(), worker_fun=worker)
self.logger.log(ansi_success("{0}> closures copied successfully".format(self.name or "unnamed"), outfile=self.logger._log_file))
def activate_configs(self, configs_path, include, exclude, allow_reboot,
force_reboot, check, sync, always_activate, dry_activate, max_concurrent_activate):
"""Activate the new configuration on a machine."""
def worker(m):
if not should_do(m, include, exclude): return
try:
# Set the system profile to the new configuration.
daemon_var = '' if m.state == m.RESCUE else 'env NIX_REMOTE=daemon '
setprof = daemon_var + 'nix-env -p /nix/var/nix/profiles/system --set "{0}"'
if always_activate or self.definitions[m.name].always_activate:
m.run_command(setprof.format(m.new_toplevel))
else:
# Only activate if the profile has changed.
new_profile_cmd = '; '.join([
'old_gen="$(readlink -f /nix/var/nix/profiles/system)"',
'new_gen="$(readlink -f "{0}")"',
'[ "x$old_gen" != "x$new_gen" ] || exit 111',
setprof
]).format(m.new_toplevel)
ret = m.run_command(new_profile_cmd, check=False)
if ret == 111:
m.log("configuration already up to date")
return
elif ret != 0:
raise Exception("unable to set new system profile")
m.send_keys()
if force_reboot or m.state == m.RESCUE:
switch_method = "boot"
elif dry_activate:
switch_method = "dry-activate"
else:
switch_method = "switch"
# Run the switch script. This will also update the
# GRUB boot loader.
res = m.switch_to_configuration(switch_method, sync)
if dry_activate: return
if res != 0 and res != 100:
raise Exception("unable to activate new configuration")
if res == 100 or force_reboot or m.state == m.RESCUE:
if not allow_reboot and not force_reboot:
raise Exception("the new configuration requires a "
"reboot to take effect (hint: use "
"‘--allow-reboot’)".format(m.name))
m.reboot_sync()
res = 0
# FIXME: should check which systemd services
# failed to start after the reboot.
if res == 0:
m.success("activation finished successfully")
# Record that we switched this machine to the new
# configuration.
m.cur_configs_path = configs_path
m.cur_toplevel = m.new_toplevel
except Exception as e:
# This thread shouldn't throw an exception because
# that will cause NixOps to exit and interrupt
# activation on the other machines.
m.logger.error(traceback.format_exc())
return m.name
return None
res = nixops.parallel.run_tasks(nr_workers=max_concurrent_activate, tasks=self.active.itervalues(), worker_fun=worker)
failed = [x for x in res if x != None]
if failed != []:
raise Exception("activation of {0} of {1} machines failed (namely on {2})"
.format(len(failed), len(res), ", ".join(["‘{0}’".format(x) for x in failed])))
def _get_free_resource_index(self):
index = 0
for r in self.resources.itervalues():
if r.index != None and index <= r.index:
index = r.index + 1
return index
def get_backups(self, include=[], exclude=[]):
self.evaluate_active(include, exclude) # unnecessary?
machine_backups = {}
for m in self.active.itervalues():
if should_do(m, include, exclude):
machine_backups[m.name] = m.get_backups()
# merging machine backups into network backups
backup_ids = [b for bs in machine_backups.values() for b in bs.keys()]
backups = {}
for backup_id in backup_ids:
backups[backup_id] = {}
backups[backup_id]['machines'] = {}
backups[backup_id]['info'] = []
backups[backup_id]['status'] = 'complete'
backup = backups[backup_id]
for m in self.active.itervalues():
if should_do(m, include, exclude):
if backup_id in machine_backups[m.name].keys():
backup['machines'][m.name] = machine_backups[m.name][backup_id]
backup['info'].extend(backup['machines'][m.name]['info'])
# status is always running when one of the backups is still running
if backup['machines'][m.name]['status'] != "complete" and backup['status'] != "running":
backup['status'] = backup['machines'][m.name]['status']
else:
backup['status'] = 'incomplete'
backup['info'].extend(["No backup available for {0}".format(m.name)]);
return backups
def clean_backups(self, keep, keep_days, keep_physical = False):
_backups = self.get_backups()
backup_ids = [b for b in _backups.keys()]
backup_ids.sort()
if keep:
index = len(backup_ids)-keep
tbr = backup_ids[:index]
if keep_days:
cutoff = (datetime.now()- timedelta(days=keep_days)).strftime("%Y%m%d%H%M%S")
print cutoff
tbr = [bid for bid in backup_ids if bid < cutoff]
for backup_id in tbr:
print 'Removing backup {0}'.format(backup_id)
self.remove_backup(backup_id, keep_physical)
def remove_backup(self, backup_id, keep_physical = False):
with self._get_deployment_lock():
def worker(m):
m.remove_backup(backup_id, keep_physical)
nixops.parallel.run_tasks(nr_workers=len(self.active), tasks=self.machines.itervalues(), worker_fun=worker)
def backup(self, include=[], exclude=[]):
self.evaluate_active(include, exclude)
backup_id = datetime.now().strftime("%Y%m%d%H%M%S")
def worker(m):
if not should_do(m, include, exclude): return
if m.state != m.STOPPED:
ssh_name = m.get_ssh_name()
res = subprocess.call(["ssh", "root@" + ssh_name] + m.get_ssh_flags() + ["sync"])
if res != 0:
m.logger.log("running sync failed on {0}.".format(m.name))
m.backup(self.definitions[m.name], backup_id)
nixops.parallel.run_tasks(nr_workers=5, tasks=self.active.itervalues(), worker_fun=worker)
return backup_id
def restore(self, include=[], exclude=[], backup_id=None, devices=[]):
with self._get_deployment_lock():
self.evaluate_active(include, exclude)
def worker(m):
if not should_do(m, include, exclude): return
m.restore(self.definitions[m.name], backup_id, devices)
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active.itervalues(), worker_fun=worker)
self.start_machines(include=include, exclude=exclude)
self.logger.warn("restore finished; please note that you might need to run ‘nixops deploy’ to fix configuration issues regarding changed IP addresses")
def evaluate_active(self, include=[], exclude=[], kill_obsolete=False):
self.evaluate()
# Create state objects for all defined resources.
with self._db:
for m in self.definitions.itervalues():
if m.name not in self.resources:
self._create_resource(m.name, m.get_type())
self.logger.update_log_prefixes()
to_destroy = []
# Determine the set of active resources. (We can't just
# delete obsolete resources from ‘self.resources’ because they
# contain important state that we don't want to forget about.)
for m in self.resources.values():
if m.name in self.definitions:
if m.obsolete:
self.logger.log("resource ‘{0}’ is no longer obsolete".format(m.name))
m.obsolete = False
else:
self.logger.log("resource ‘{0}’ is obsolete".format(m.name))
if not m.obsolete: m.obsolete = True
if not should_do(m, include, exclude): continue
if kill_obsolete:
to_destroy.append(m.name)
if to_destroy:
self._destroy_resources(include=to_destroy)
def _deploy(self, dry_run=False, plan_only=False, build_only=False, create_only=False, copy_only=False,
include=[], exclude=[], check=False, kill_obsolete=False,
allow_reboot=False, allow_recreate=False, force_reboot=False,
max_concurrent_copy=5, max_concurrent_activate=-1, sync=True,
always_activate=False, repair=False, dry_activate=False):
"""Perform the deployment defined by the deployment specification."""
self.evaluate_active(include, exclude, kill_obsolete)
# Assign each resource an index if it doesn't have one.
for r in self.active_resources.itervalues():
if r.index == None:
r.index = self._get_free_resource_index()
# FIXME: Logger should be able to do coloring without the need
# for an index maybe?
r.logger.register_index(r.index)
self.logger.update_log_prefixes()
# Start or update the active resources. Non-machine resources
# are created first, because machines may depend on them
# (e.g. EC2 machines depend on EC2 key pairs or EBS volumes).
# FIXME: would be nice to have a more fine-grained topological
# sort.
if not dry_run and not build_only:
for r in self.active_resources.itervalues():
defn = self.definitions[r.name]
if r.get_type() != defn.get_type():
raise Exception("the type of resource ‘{0}’ changed from ‘{1}’ to ‘{2}’, which is currently unsupported"
.format(r.name, r.get_type(), defn.get_type()))
r._created_event = threading.Event()
r._errored = False
def plan_worker(r):
if not should_do(r, include, exclude): return
if hasattr(r, 'plan'):
r.plan(self.definitions[r.name])
else:
r.warn("resource type {} doesn't implement a plan operation".format(r.get_type()))
if plan_only:
for r in self.active_resources.itervalues():
plan_worker(r)
return
def worker(r):
try:
if not should_do(r, include, exclude): return
# Sleep until all dependencies of this resource have
# been created.
deps = r.create_after(self.active_resources.itervalues(), self.definitions[r.name])
for dep in deps:
dep._created_event.wait()
# !!! Should we print a message here?
if dep._errored:
r._errored = True
return
# Now create the resource itself.
if not r.creation_time:
r.creation_time = int(time.time())
r.create(self.definitions[r.name], check=check, allow_reboot=allow_reboot, allow_recreate=allow_recreate)
if is_machine(r):
# The first time the machine is created,
# record the state version. We get it from
# /etc/os-release, rather than from the
# configuration's state.systemVersion
# attribute, because the machine may have been
# booted from an older NixOS image.
if not r.state_version:
os_release = r.run_command("cat /etc/os-release", capture_stdout=True)
match = re.search('VERSION_ID="([0-9]+\.[0-9]+).*"', os_release)
if match:
r.state_version = match.group(1)
r.log("setting state version to {0}".format(r.state_version))
else:
r.warn("cannot determine NixOS version")
r.wait_for_ssh(check=check)
r.generate_vpn_key()
except:
r._errored = True
raise
finally:
r._created_event.set()
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=worker)
if create_only: return
# Build the machine configurations.
# Record configs_path in the state so that the ‘info’ command
# can show whether machines have an outdated configuration.
self.configs_path = self.build_configs(dry_run=dry_run, repair=repair, include=include, exclude=exclude)
if build_only or dry_run: return
# Copy the closures of the machine configurations to the
# target machines.
self.copy_closures(self.configs_path, include=include, exclude=exclude,
max_concurrent_copy=max_concurrent_copy)
if copy_only: return
# Active the configurations.
self.activate_configs(self.configs_path, include=include,
exclude=exclude, allow_reboot=allow_reboot,
force_reboot=force_reboot, check=check,
sync=sync, always_activate=always_activate,
dry_activate=dry_activate, max_concurrent_activate=max_concurrent_activate)
if dry_activate: return
# Trigger cleanup of resources, e.g. disks that need to be detached etc. Needs to be
# done after activation to make sure they are not in use anymore.
def cleanup_worker(r):
if not should_do(r, include, exclude): return
# Now create the resource itself.
r.after_activation(self.definitions[r.name])
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active_resources.itervalues(), worker_fun=cleanup_worker)
self.logger.log(ansi_success("{0}> deployment finished successfully".format(self.name or "unnamed"), outfile=self.logger._log_file))
# can generalize notifications later (e.g. emails, for now just hardcode datadog)
def notify_start(self, action):
self.evaluate_network()
nixops.datadog_utils.create_event(self, title='nixops {} started'.format(action), text=self.datadog_event_info, tags=self.datadog_tags)
nixops.datadog_utils.create_downtime(self)
def notify_success(self, action):
nixops.datadog_utils.create_event(self, title='nixops {} succeeded'.format(action), text=self.datadog_event_info, tags=self.datadog_tags)
nixops.datadog_utils.delete_downtime(self)
def notify_failed(self, action, e):
nixops.datadog_utils.create_event(self, title='nixops {} failed'.format(action), text="Error: {}\n\n{}".format(e.message, self.datadog_event_info), tags=self.datadog_tags)
nixops.datadog_utils.delete_downtime(self)
def run_with_notify(self, action, f):
self.notify_start(action)
try:
f()
self.notify_success(action)
except KeyboardInterrupt as e:
self.notify_failed(action, e)
raise
except Exception as e:
self.notify_failed(action, e)
raise
def deploy(self, **kwargs):
with self._get_deployment_lock():
self.run_with_notify('deploy', lambda: self._deploy(**kwargs))
def _rollback(self, generation, include=[], exclude=[], check=False,
allow_reboot=False, force_reboot=False,
max_concurrent_copy=5, max_concurrent_activate=-1, sync=True):
if not self.rollback_enabled:
raise Exception("rollback is not enabled for this network; please set ‘network.enableRollback’ to ‘true’ and redeploy"
)
profile = self.get_profile()
if subprocess.call(["nix-env", "-p", profile, "--switch-generation", str(generation)]) != 0:
raise Exception("nix-env --switch-generation failed")
self.configs_path = os.path.realpath(profile)
assert os.path.isdir(self.configs_path)
names = set()
for filename in os.listdir(self.configs_path):
if not os.path.islink(self.configs_path + "/" + filename): continue
if should_do_n(filename, include, exclude) and filename not in self.machines:
raise Exception("cannot roll back machine ‘{0}’ which no longer exists".format(filename))
names.add(filename)
# Update the set of active machines.
for m in self.machines.values():
if m.name in names:
if m.obsolete:
self.logger.log("machine ‘{0}’ is no longer obsolete".format(m.name))
m.obsolete = False
else:
self.logger.log("machine ‘{0}’ is obsolete".format(m.name))
if not m.obsolete: m.obsolete = True
self.copy_closures(self.configs_path, include=include, exclude=exclude,
max_concurrent_copy=max_concurrent_copy)
self.activate_configs(self.configs_path, include=include,
exclude=exclude, allow_reboot=allow_reboot,
force_reboot=force_reboot, check=check,
sync=sync, always_activate=True,
dry_activate=False, max_concurrent_activate=max_concurrent_activate)
def rollback(self, **kwargs):
with self._get_deployment_lock():
self._rollback(**kwargs)
def _destroy_resources(self, include=[], exclude=[], wipe=False):
for r in self.resources.itervalues():
r._destroyed_event = threading.Event()
r._errored = False
for rev_dep in r.destroy_before(self.resources.itervalues()):
try:
rev_dep._wait_for.append(r)
except AttributeError:
rev_dep._wait_for = [ r ]
def worker(m):
try:
if not should_do(m, include, exclude): return
try:
for dep in m._wait_for:
dep._destroyed_event.wait()
# !!! Should we print a message here?
if dep._errored:
m._errored = True
return
except AttributeError:
pass
if m.destroy(wipe=wipe): self.delete_resource(m)
except:
m._errored = True
raise
finally:
m._destroyed_event.set()
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.resources.values(), worker_fun=worker)
def destroy_resources(self, include=[], exclude=[], wipe=False):
"""Destroy all active and obsolete resources."""
with self._get_deployment_lock():
self.run_with_notify('destroy', lambda: self._destroy_resources(include, exclude, wipe))
# Remove the destroyed machines from the rollback profile.
# This way, a subsequent "nix-env --delete-generations old" or
# "nix-collect-garbage -d" will get rid of the machine
# configurations.
if self.rollback_enabled: # and len(self.active) == 0:
profile = self.create_profile()
attrs = {m.name:
Call(RawValue("builtins.storePath"), m.cur_toplevel)
for m in self.active.itervalues() if m.cur_toplevel}
if subprocess.call(
["nix-env", "-p", profile, "--set", "*", "-I", "nixops=" + self.expr_path,
"-f", "<nixops/update-profile.nix>",
"--arg", "machines", py2nix(attrs, inline=True)]) != 0:
raise Exception("cannot update profile ‘{0}’".format(profile))
def reboot_machines(self, include=[], exclude=[], wait=False,
rescue=False, hard=False):
"""Reboot all active machines."""
def worker(m):
if not should_do(m, include, exclude): return
if rescue:
m.reboot_rescue(hard=hard)
elif wait:
m.reboot_sync(hard=hard)
else:
m.reboot(hard=hard)
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active.itervalues(), worker_fun=worker)
def stop_machines(self, include=[], exclude=[]):
"""Stop all active machines."""
def worker(m):
if not should_do(m, include, exclude): return
m.stop()
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active.itervalues(), worker_fun=worker)
def start_machines(self, include=[], exclude=[]):
"""Start all active machines."""
def worker(m):
if not should_do(m, include, exclude): return
m.start()
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active.itervalues(), worker_fun=worker)
def is_valid_resource_name(self, name):
p = re.compile('^[\w-]+$')
return not p.match(name) is None
def rename(self, name, new_name):
if not name in self.resources:
raise Exception("resource ‘{0}’ not found".format(name))
if new_name in self.resources:
raise Exception("resource with name ‘{0}’ already exists".format(new_name))
if not self.is_valid_resource_name(new_name):
raise Exception("{0} is not a valid resource identifier".format(new_name))
self.logger.log("renaming resource ‘{0}’ to ‘{1}’...".format(name, new_name))
m = self.resources.pop(name)
self.resources[new_name] = m
with self._db:
self._db.execute("update Resources set name = ? where deployment = ? and id = ?", (new_name, self.uuid, m.id))
def send_keys(self, include=[], exclude=[]):
"""Send LUKS encryption keys to machines."""
def worker(m):
if not should_do(m, include, exclude): return
m.send_keys()
nixops.parallel.run_tasks(nr_workers=-1, tasks=self.active.itervalues(), worker_fun=worker)
def should_do(m, include, exclude):
return should_do_n(m.name, include, exclude)
def should_do_n(name, include, exclude):
if name in exclude: return False
if include == []: return True
return name in include
def is_machine(r):
return isinstance(r, nixops.backends.MachineState)
def is_machine_defn(r):
return isinstance(r, nixops.backends.MachineDefinition)
def _subclasses(cls):
sub = cls.__subclasses__()
return [cls] if not sub else [g for s in sub for g in _subclasses(s)]
def _create_definition(xml, config, type_name):
"""Create a resource definition object from the given XML representation of the machine's attributes."""
for cls in _subclasses(nixops.resources.ResourceDefinition):
if type_name == cls.get_resource_type():
# FIXME: backward compatibility hack
if len(inspect.getargspec(cls.__init__).args) == 2:
return cls(xml)
else:
return cls(xml, config)
raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type_name))
def _create_state(depl, type, name, id):
"""Create a resource state object of the desired type."""
for cls in _subclasses(nixops.resources.ResourceState):
if type == cls.get_type():
return cls(depl, name, id)
raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type))
# Automatically load all resource types.
def _load_modules_from(dir):
for module in os.listdir(os.path.dirname(__file__) + "/" + dir):
if module[-3:] != '.py' or module == "__init__.py": continue
__import__("nixops." + dir + "." + module[:-3], globals(), locals())
_load_modules_from("backends")
_load_modules_from("resources")
|
AmineChikhaoui/nixops
|
nixops/deployment.py
|
Python
|
lgpl-3.0
| 53,483 | 0.004463 |
"""
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.utils import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = self.field_choices(field, request, model_admin)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, ForeignObjectRel) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, ForeignObjectRel) and
(self.field.field.null or isinstance(self.field.field, ManyToManyField)) or
hasattr(self.field, 'rel') and (self.field.null or isinstance(self.field, ManyToManyField))):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
bool(f.rel) if hasattr(f, 'rel') else
isinstance(f, ForeignObjectRel)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items()
if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
limit_choices_to = {'pk__in': set(model_admin.get_queryset(request).values_list(field.name, flat=True))}
return field.get_choices(include_blank=False, limit_choices_to=limit_choices_to)
|
Sonicbids/django
|
django/contrib/admin/filters.py
|
Python
|
bsd-3-clause
| 17,360 | 0.00121 |
from django.contrib import admin
from rawParser.models import flightSearch
# Register your models here.
admin.site.register(flightSearch)
|
anantag/flightsearch
|
backend/flightsearch/rawParser/admin.py
|
Python
|
gpl-2.0
| 139 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
Ircam-Web/mezzanine-organization
|
organization/shop/management/commands/__init__.py
|
Python
|
agpl-3.0
| 846 | 0 |
# -*- coding: utf-8 -*-
#
# powerschool_apps documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'powerschool_apps'
copyright = """2017, Iron County School District"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'powerschool_appsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'powerschool_apps.tex',
'powerschool_apps Documentation',
"""Iron County School District""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'powerschool_apps', 'powerschool_apps Documentation',
["""Iron County School District"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'powerschool_apps', 'powerschool_apps Documentation',
"""Iron County School District""", 'powerschool_apps',
"""PowerSchool customizations written in Django""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
IronCountySchoolDistrict/powerschool_apps
|
docs/conf.py
|
Python
|
mit
| 8,001 | 0.001125 |
from parabem.pan2d import doublet_2_1
import parabem
import numpy as np
v1 = parabem.PanelVector2(-1, 0)
v2 = parabem.PanelVector2(1, 0)
panel = parabem.Panel2([v2, v1])
vals = ([doublet_2_1(parabem.Vector2(x, 0), panel, True) for x in np.linspace(-2, 2, 20)])
print(vals)
|
looooo/panel-method
|
examples/tests/test_linear_2d_doublet.py
|
Python
|
gpl-3.0
| 276 | 0.003623 |
__author__ = 'rls'
class Robot:
"""Represents a robot, with a name."""
# A class variable, counting the number of robots
population = 0
def __init__(self, name):
"""Initializes the data."""
self.name = name
print('(Initializing {})'.format(self.name))
# When this person is created, the robot adds to the population
Robot.population += 1
def die(self):
"""I am dying."""
print("{} is being destroyed!".format(self.name))
Robot.population -= 1
if Robot.population == 0:
print("{} was the last one.".format(self.name))
else:
print("There are still {:d} robots working.".format(Robot.population))
def say_hi(self):
"""Greeting by the robot
Long doc statement."""
print("Greetings, my masters have called me {}".format(self.name))
@classmethod
def how_many(cls):
"""Prints the current population."""
print("We have {:d} robots.".format(cls.population))
droid1 = Robot("R2-D2")
droid1.say_hi()
Robot.how_many()
__version__ = 0.2
|
rlsharpton/byte-of-Python
|
oop_objvar.py
|
Python
|
gpl-2.0
| 1,127 | 0.003549 |
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'fujicoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('fujicoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for fujicoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
fujicoin/electrum-fjc
|
electrum/gui/kivy/main_window.py
|
Python
|
mit
| 44,226 | 0.003346 |
"""
Order module has been split for its complexity.
Proposed clean hierarchy for GASSupplierOrder that
can be used in many contexts such as:
DES: ChooseSupplier ChooseGAS ChooseReferrer
GAS: ChooseSupplier OneGAS ChooseReferrer
Supplier: OneSupplier ChooseGAS ChooseReferrer
Solidal Pact: OneSupplier OneGAS ChooseReferrer
* BaseOrderForm: base for add and edit
|
|---* AddOrderForm: encapsulate Add logic.
| Just this class is enough if Resource API encapsulate
| logic behind specific resource. Otherwise we need to write
| subclasses XAddOrderForm where X is one of DES, GAS, Supplier, Pact.
|
| It manages:
| * common attributes
| * setting of withdrawal and deliveries
|
----* EditOrderForm
* PlannedAddOrderForm: mix-in class to add planning facilities
#TODO LEFT OUT NOW InterGASAddOrderForm: it requires some considerations and
#TODO LEFT OUT NOW so probably it should be managed as a separated module.
#TODO LEFT OUT NOW P.e: deliveries and withdrawals MUST be always specified.
#TODO LEFT OUT NOW It also would need multiple delivery and withdrawal places,
#TODO LEFT OUT NOW but this will be a FUTURE module update
Factory function `form_class_factory_for_request` is there for:
* composition of final classes
(XAddOrderForm, PlannedAddOrderForm, InterGASAddOrderForm)
* follows GAS configuration options and prepare delivery and withdrawal fields
Where can you find above classes:
* base.BaseOrderForm
* base.AddOrderForm
* base.EditOrderForm
* X.XAddOrderForm (where X can be des,gas,supplier,pact)
* __init__.form_class_factory_for_request
* extra.PlannedAddOrderForm
#TODO LEFT OUT NOW * intergas.InterGASAddOrderForm
There are also some other classes that support order interactions:
* gmo.SingleGASMemberOrderForm
* gmo.BasketGASMemberOrderForm
* gsop.GASSupplierOrderProductForm
"""
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from gf.base.models import Place, Person
from lib.widgets import SplitDateTimeFormatAwareWidget
from gf.gas.forms.order.base import AddOrderForm, EditOrderForm
from gf.gas.forms.order.plan import AddPlannedOrderForm
from gf.gas.forms.order.intergas import AddInterGASOrderForm, AddInterGASPlannedOrderForm
from gf.gas.models import GASSupplierOrder
import copy
import logging
log = logging.getLogger(__name__)
def form_class_factory_for_request(request, base):
"""Return appropriate form class basing on GAS configuration
and other request parameters if needed"""
#log.debug("OrderForm--> form_class_factory_for_request")
fields = copy.deepcopy(base.Meta.fields)
gf_fieldsets = copy.deepcopy(base.Meta.gf_fieldsets)
attrs = {}
gas = request.resource.gas
if gas:
if gas.config.use_withdrawal_place:
gf_fieldsets[0][1]['fields'].append('withdrawal_referrer_person')
attrs.update({
'withdrawal_referrer' : forms.ModelChoiceField(
queryset=Person.objects.none(),
required=False
),
})
if gas.config.can_change_delivery_place_on_each_order:
gf_fieldsets[0][1]['fields'].append(('delivery_city', 'delivery_addr_or_place'))
attrs.update({
'delivery_city' : forms.CharField(required=True,
label=_('Delivery city'),
initial=gas.city
),
'delivery_addr_or_place': forms.CharField(
required=True, label=_('Delivery address or place'),
initial=gas.headquarter
),
})
if gas.config.use_withdrawal_place:
if gas.config.can_change_withdrawal_place_on_each_order:
gf_fieldsets[0][1]['fields'].append((
'withdrawal_datetime', 'withdrawal_city',
'withdrawal_addr_or_place')
)
attrs.update({
'withdrawal_datetime' : forms.SplitDateTimeField(
required=False, label=_('Withdrawal on/at'),
widget=SplitDateTimeFormatAwareWidget
),
'withdrawal_city' : forms.CharField(
required=True, label=_('Withdrawal city'),
initial=gas.city
),
'withdrawal_addr_or_place': forms.CharField(required=True,
label=_('Withdrawal address or place'),
initial=gas.headquarter
),
})
attrs.update(Meta=type('Meta', (), {
'model' : GASSupplierOrder,
'fields' : fields,
'gf_fieldsets' : gf_fieldsets
}))
return type('Custom%s' % base.__name__, (base,), attrs)
|
befair/gasistafelice
|
gasistafelice/gf/gas/forms/order/__init__.py
|
Python
|
agpl-3.0
| 5,005 | 0.006194 |
#!/usr/bin/env python
# Copyright (C) 2008,2011 Lanedo GmbH
#
# Author: Tim Janik
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os, re, urllib, csv
pkginstall_configvars = {
'PACKAGE' : 'dummy', 'PACKAGE_NAME' : 'dummy', 'VERSION' : '0.0', 'REVISION' : 'uninstalled',
#@PKGINSTALL_CONFIGVARS_IN24LINES@ # configvars are substituted upon script installation
}
# TODO:
# - support mixing in comments.txt which has "bug# person: task"
bugurls = (
('gb', 'http://bugzilla.gnome.org/buglist.cgi?bug_id='),
('gnome', 'http://bugzilla.gnome.org/buglist.cgi?bug_id='),
('fd', 'https://bugs.freedesktop.org/buglist.cgi?bug_id='),
('freedesktop', 'https://bugs.freedesktop.org/buglist.cgi?bug_id='),
('mb', 'https://bugs.maemo.org/buglist.cgi?bug_id='),
('maemo', 'https://bugs.maemo.org/buglist.cgi?bug_id='),
('nb', 'https://projects.maemo.org/bugzilla/buglist.cgi?bug_id='),
('nokia', 'https://projects.maemo.org/bugzilla/buglist.cgi?bug_id='),
('gcc', 'http://gcc.gnu.org/bugzilla/buglist.cgi?bug_id='),
('libc', 'http://sources.redhat.com/bugzilla/buglist.cgi?bug_id='),
('moz', 'https://bugzilla.mozilla.org/buglist.cgi?bug_id='),
('mozilla', 'https://bugzilla.mozilla.org/buglist.cgi?bug_id='),
('xm', 'http://bugzilla.xamarin.com/buglist.cgi?id='),
('xamarin', 'http://bugzilla.xamarin.com/buglist.cgi?id='),
)
# URL authentication handling
def auth_urls():
import ConfigParser, os, re
cp = ConfigParser.SafeConfigParser()
cp.add_section ('authentication-urls')
cp.set ('authentication-urls', 'urls', '')
cp.read (os.path.expanduser ('~/.urlrc'))
urlstr = cp.get ('authentication-urls', 'urls') # space separated url list
urls = re.split ("\s*", urlstr.strip()) # list urls
urls = [u for u in urls if u] # strip empty urls
global auth_urls; auth_urls = lambda : urls # cache result for the future
return urls
def add_auth (url):
for ai in auth_urls():
prefix = re.sub ('//[^:/@]*:[^:/@]*@', '//', ai)
if url.startswith (prefix):
pl = len (prefix)
return ai + url[pl:]
return url
# carry out online bug queries
def bug_summaries (buglisturl):
if not buglisturl:
return []
# Bugzilla query to use
query = buglisturl + '&ctype=csv' # buglisturl.replace (',', '%2c')
query = add_auth (query)
f = urllib.urlopen (query)
csvdata = f.read()
f.close()
# read CSV lines
reader = csv.reader (csvdata.splitlines (1))
# parse head to interpret columns
col_bug_id = -1
col_description = -1
header = reader.next()
i = 0
for col in header:
col = col.strip()
if col == 'bug_id':
col_bug_id = i
if col == 'short_short_desc':
col_description = i
elif col_description < 0 and col == 'short_desc':
col_description = i
i = i + 1
if col_bug_id < 0:
print >>sys.stderr, 'Failed to identify bug_id from CSV data'
sys.exit (11)
if col_description < 0:
print >>sys.stderr, 'Failed to identify description columns from CSV data'
sys.exit (12)
# parse bug list
result = []
summary = ''
for row in reader:
bug_number = row[col_bug_id]
description = row[col_description]
result += [ (bug_number, description) ]
return result
# parse bug numbers and list bugs
def read_handle_bugs (config, url):
lines = sys.stdin.read()
# print >>sys.stderr, 'Using bugzilla URL: %s' % (bz, url)
for line in [ lines ]:
# find all bug numbers
bugs = re.findall (r'\b[0-9]+\b', line)
# int-convert, dedup and sort bug numbers
ibugs = []
if bugs:
bught = {}
for b in bugs:
b = int (b)
if not b or bught.has_key (b): continue
bught[b] = True
ibugs += [ b ]
del bugs
if config.get ('sort', False):
ibugs.sort()
# construct full query URL
fullurl = url + ','.join ([str (b) for b in ibugs])
# print fullurl
if len (ibugs) and config.get ('show-query', False):
print fullurl
# print bug summaries
if len (ibugs) and config.get ('show-list', False):
bught = {}
for bug in bug_summaries (fullurl):
bught[int (bug[0])] = bug[1] # bug summaries can have random order
for bugid in ibugs: # print bugs in user provided order
iid = int (bugid)
if bught.has_key (iid):
desc = bught[iid]
if len (desc) >= 70:
desc = desc[:67].rstrip() + '...'
print "% 7u - %s" % (iid, desc)
else:
print "% 7u (NOBUG)" % iid
def help (version = False, verbose = False):
print "buglist %s (%s, %s)" % (pkginstall_configvars['VERSION'],
pkginstall_configvars['PACKAGE_NAME'], pkginstall_configvars['REVISION'])
print "Redistributable under GNU GPLv3 or later: http://gnu.org/licenses/gpl.html"
if version: # version *only*
return
print "Usage: %s [options] <BUG-TRACKER> " % os.path.basename (sys.argv[0])
print "List or download bugs from a bug tracker. Bug numbers are read from stdin."
if not verbose:
print "Use the --help option for verbose usage information."
return
# 12345678911234567892123456789312345678941234567895123456789612345678971234567898
print "Options:"
print " -h, --help Print verbose help message."
print " -v, --version Print version information."
print " -U Keep bug list unsorted."
print " --bug-tracker-list List supported bug trackers."
print "Authentication:"
print " An INI-style config file is used to associate bugzilla URLs with account"
print " authentication for secured installations. The file should be unreadable"
print " by others to keep passwords secret, e.g. with: chmod 0600 ~/.urlrc"
print " A sample ~/.urlrc might look like this:"
print "\t# INI-style config file for URLs"
print "\t[authentication-urls]"
print "\turls =\thttps://USERNAME:PASSWORD@projects.maemo.org/bugzilla"
print "\t\thttp://BLOGGER:PASSWORD@blogs.gnome.org/BLOGGER/xmlrpc.php"
def main ():
import getopt
# default configuration
config = {
'sort' : True,
'show-query' : True,
'show-list' : True,
}
# parse options
try:
options, args = getopt.gnu_getopt (sys.argv[1:], 'vhU', [ 'help', 'version', 'bug-tracker-list' ])
except getopt.GetoptError, err:
print >>sys.stderr, "%s: %s" % (os.path.basename (sys.argv[0]), str (err))
help()
sys.exit (126)
for arg, val in options:
if arg == '-h' or arg == '--help': help (verbose=True); sys.exit (0)
if arg == '-v' or arg == '--version': help (version=True); sys.exit (0)
if arg == '-U': config['sort'] = False
if arg == '--bug-tracker-list':
print "Bug Tracker:"
for kv in bugurls:
print " %-20s %s" % kv
sys.exit (0)
if len (args) < 1:
print >>sys.stderr, "%s: Missing bug tracker argument" % os.path.basename (sys.argv[0])
help()
sys.exit (126)
trackerdict = dict (bugurls)
if not trackerdict.has_key (args[0]):
print >>sys.stderr, "%s: Unknown bug tracker: %s" % (os.path.basename (sys.argv[0]), args[0])
sys.exit (10)
# handle bugs
read_handle_bugs (config, trackerdict[args[0]])
if __name__ == '__main__':
main()
|
tim-janik/testbit-tools
|
buglist.py
|
Python
|
gpl-3.0
| 7,933 | 0.024707 |
# *-* coding:utf-8 *-*
from functools import partial
# 可读性好
# range()
print range(0,9,2) #递增列表
for i in xrange(0,9,2): # 只用于for 循环中
print i
albums = ("Poe","Gaudi","Freud","Poe2")
years = (1976,1987,1990,2003)
for album in sorted(albums):
print album
for album in reversed(albums):
print album
for i,album in enumerate(albums):
print i,album
for album,yr in zip(albums,years):
print album,yr
# 列表表达式
# 8.12 列表解析,列表解析的表达式比map lambda效率更高
def fuc(a):
return a**2
x = range(1,10,1)
print x
print map(fuc,x)
# map 对所有的列表元素执行一个操作
# lambda 创建只有一行的函数 -- 只使用一次,非公用的函数
print map(lambda x:x**2,range(6))
print [x**2 for x in range(6) if x>3]
print filter(lambda x:x%2,range(10))
print [x for x in range(10) if x%2]
# 11.7.2 函数式编程
print range(6)
print reduce(lambda x,y:x+y,range(6)) # 累加
# 偏函数 简化代码,提高代码速度
int2 = partial(int,base=2)
print int2('1000')
# 闭包
# 列表生成器
g = (x for x in range(10))
print g.next()
print "------"
for n in g:
print n
# 匿名函数,无函数名,将函数赋给变量
f = lambda x:x*x
print f(2)
# 装饰器
def log():
print 'log'
def now():
print 'time is:','2017-09-14'
# 字典
|
qiudebo/13learn
|
code/python/myPython.py
|
Python
|
mit
| 1,435 | 0.0445 |
# -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
import logging
import urlparse
import werkzeug.urls
import urllib2
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class AcquirerPaypal(osv.Model):
_inherit = 'payment.acquirer'
def _get_paypal_urls(self, cr, uid, environment, context=None):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerPaypal, self)._get_providers(cr, uid, context=context)
providers.append(['paypal', 'Paypal'])
return providers
_columns = {
'paypal_email_account': fields.char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user'),
'paypal_seller_account': fields.char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.'),
'paypal_use_ipn': fields.boolean('Use IPN', help='Paypal Instant Payment Notification', groups='base.group_user'),
# Server 2 server
'paypal_api_enabled': fields.boolean('Use Rest API'),
'paypal_api_username': fields.char('Rest API Username', groups='base.group_user'),
'paypal_api_password': fields.char('Rest API Password', groups='base.group_user'),
'paypal_api_access_token': fields.char('Access Token', groups='base.group_user'),
'paypal_api_access_token_validity': fields.datetime('Access Token Validity', groups='base.group_user'),
}
_defaults = {
'paypal_use_ipn': True,
'fees_active': False,
'fees_dom_fixed': 0.35,
'fees_dom_var': 3.4,
'fees_int_fixed': 0.35,
'fees_int_var': 3.9,
'paypal_api_enabled': False,
}
def _migrate_paypal_account(self, cr, uid, context=None):
""" COMPLETE ME """
cr.execute('SELECT id, paypal_account FROM res_company')
res = cr.fetchall()
for (company_id, company_paypal_account) in res:
if company_paypal_account:
company_paypal_ids = self.search(cr, uid, [('company_id', '=', company_id), ('provider', '=', 'paypal')], limit=1, context=context)
if company_paypal_ids:
self.write(cr, uid, company_paypal_ids, {'paypal_email_account': company_paypal_account}, context=context)
else:
paypal_view = self.pool['ir.model.data'].get_object(cr, uid, 'payment_paypal', 'paypal_acquirer_button')
self.create(cr, uid, {
'name': 'Paypal',
'provider': 'paypal',
'paypal_email_account': company_paypal_account,
'view_template_id': paypal_view.id,
}, context=context)
return True
def paypal_compute_fees(self, cr, uid, id, amount, currency_id, country_id, context=None):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
acquirer = self.browse(cr, uid, id, context=context)
if not acquirer.fees_active:
return 0.0
country = self.pool['res.country'].browse(cr, uid, country_id, context=context)
if country and acquirer.company_id.country_id.id == country.id:
percentage = acquirer.fees_dom_var
fixed = acquirer.fees_dom_fixed
else:
percentage = acquirer.fees_int_var
fixed = acquirer.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed ) / (1 - percentage / 100.0)
return fees
def paypal_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
paypal_tx_values = dict(tx_values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': acquirer.paypal_email_account,
'item_name': '%s: %s' % (acquirer.company_id.name, tx_values['reference']),
'item_number': tx_values['reference'],
'amount': tx_values['amount'],
'currency_code': tx_values['currency'] and tx_values['currency'].name or '',
'address1': partner_values['address'],
'city': partner_values['city'],
'country': partner_values['country'] and partner_values['country'].code or '',
'state': partner_values['state'] and (partner_values['state'].code or partner_values['state'].name) or '',
'email': partner_values['email'],
'zip': partner_values['zip'],
'first_name': partner_values['first_name'],
'last_name': partner_values['last_name'],
'return': '%s' % urlparse.urljoin(base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(base_url, PaypalController._cancel_url),
})
if acquirer.fees_active:
paypal_tx_values['handling'] = '%.2f' % paypal_tx_values.pop('fees', 0.0)
if paypal_tx_values.get('return_url'):
paypal_tx_values['custom'] = json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')})
return partner_values, paypal_tx_values
def paypal_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_paypal_urls(cr, uid, acquirer.environment, context=context)['paypal_form_url']
def _paypal_s2s_get_access_token(self, cr, uid, ids, context=None):
"""
Note: see # see http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
for explanation why we use Authorization header instead of urllib2
password manager
"""
res = dict.fromkeys(ids, False)
parameters = werkzeug.url_encode({'grant_type': 'client_credentials'})
for acquirer in self.browse(cr, uid, ids, context=context):
tx_url = self._get_paypal_urls(cr, uid, acquirer.environment)['paypal_rest_url']
request = urllib2.Request(tx_url, parameters)
# add other headers (https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/)
request.add_header('Accept', 'application/json')
request.add_header('Accept-Language', tools.config.defaultLang)
# add authorization header
base64string = base64.encodestring('%s:%s' % (
acquirer.paypal_api_username,
acquirer.paypal_api_password)
).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request = urllib2.urlopen(request)
result = request.read()
res[acquirer.id] = json.loads(result).get('access_token')
request.close()
return res
class TxPaypal(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'paypal_txn_id': fields.char('Transaction ID'),
'paypal_txn_type': fields.char('Transaction type'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _paypal_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = 'Paypal: received data with missing reference (%s) or txn_id (%s)' % (reference, txn_id)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context)
def _paypal_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('txn_id') != tx.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (tx.amount + tx.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % tx.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != tx.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), tx.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), tx.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), tx.fees))
# check buyer
if tx.partner_reference and data.get('payer_id') != tx.partner_reference:
invalid_parameters.append(('payer_id', data.get('payer_id'), tx.partner_reference))
# check seller
if data.get('receiver_id') and tx.acquirer_id.paypal_seller_account and data['receiver_id'] != tx.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), tx.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not tx.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != tx.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), tx.acquirer_id.paypal_email_account))
return invalid_parameters
def _paypal_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('payment_status')
data = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
'partner_reference': data.get('payer_id')
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (tx.reference))
data.update(state='done', date_validate=data.get('payment_date', fields.datetime.now()))
return tx.write(data)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (tx.reference))
data.update(state='pending', state_message=data.get('pending_reason', ''))
return tx.write(data)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
data.update(state='error', state_message=error)
return tx.write(data)
# --------------------------------------------------
# SERVER2SERVER RELATED METHODS
# --------------------------------------------------
def _paypal_try_url(self, request, tries=3, context=None):
""" Try to contact Paypal. Due to some issues, internal service errors
seem to be quite frequent. Several tries are done before considering
the communication as failed.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
done, res = False, None
while (not done and tries):
try:
res = urllib2.urlopen(request)
done = True
except urllib2.HTTPError as e:
res = e.read()
e.close()
if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':
_logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)
tries = tries - 1
if not res:
pass
# raise openerp.exceptions.
result = res.read()
res.close()
return result
def _paypal_s2s_send(self, cr, uid, values, cc_values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx_id = self.create(cr, uid, values, context=context)
tx = self.browse(cr, uid, tx_id, context=context)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
data = {
'intent': 'sale',
'transactions': [{
'amount': {
'total': '%.2f' % tx.amount,
'currency': tx.currency_id.name,
},
'description': tx.reference,
}]
}
if cc_values:
data['payer'] = {
'payment_method': 'credit_card',
'funding_instruments': [{
'credit_card': {
'number': cc_values['number'],
'type': cc_values['brand'],
'expire_month': cc_values['expiry_mm'],
'expire_year': cc_values['expiry_yy'],
'cvv2': cc_values['cvc'],
'first_name': tx.partner_name,
'last_name': tx.partner_name,
'billing_address': {
'line1': tx.partner_address,
'city': tx.partner_city,
'country_code': tx.partner_country_id.code,
'postal_code': tx.partner_zip,
}
}
}]
}
else:
# TODO: complete redirect URLs
data['redirect_urls'] = {
# 'return_url': 'http://example.com/your_redirect_url/',
# 'cancel_url': 'http://example.com/your_cancel_url/',
},
data['payer'] = {
'payment_method': 'paypal',
}
data = json.dumps(data)
request = urllib2.Request('https://api.sandbox.paypal.com/v1/payments/payment', data, headers)
result = self._paypal_try_url(request, tries=3, context=context)
return (tx_id, result)
def _paypal_s2s_get_invalid_parameters(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
invalid_parameters = []
return invalid_parameters
def _paypal_s2s_validate(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
values = json.loads(data)
status = values.get('state')
if status in ['approved']:
_logger.info('Validated Paypal s2s payment for tx %s: set as done' % (tx.reference))
tx.write({
'state': 'done',
'date_validate': values.get('udpate_time', fields.datetime.now()),
'paypal_txn_id': values['id'],
})
return True
elif status in ['pending', 'expired']:
_logger.info('Received notification for Paypal s2s payment %s: set as pending' % (tx.reference))
tx.write({
'state': 'pending',
# 'state_message': data.get('pending_reason', ''),
'paypal_txn_id': values['id'],
})
return True
else:
error = 'Received unrecognized status for Paypal s2s payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
tx.write({
'state': 'error',
# 'state_message': error,
'paypal_txn_id': values['id'],
})
return False
def _paypal_s2s_get_tx_status(self, cr, uid, tx, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
# TDETODO: check tx.paypal_txn_id is set
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
url = 'https://api.sandbox.paypal.com/v1/payments/payment/%s' % (tx.paypal_txn_id)
request = urllib2.Request(url, headers=headers)
data = self._paypal_try_url(request, tries=3, context=context)
return self.s2s_feedback(cr, uid, tx.id, data, context=context)
|
funkring/fdoo
|
addons/payment_paypal/models/paypal.py
|
Python
|
agpl-3.0
| 19,377 | 0.003716 |
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
A stub SFTP server for loopback SFTP testing.
"""
import os
from paramiko import ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes, \
SFTPHandle, SFTP_OK, AUTH_SUCCESSFUL, AUTH_FAILED, OPEN_SUCCEEDED, RSAKey
from paramiko.common import o666
from pysftpserver.tests.utils import t_path
USERNAME = "test"
PASSWORD = "secret"
RSA_KEY = t_path("id_rsa")
SERVER_ROOT = "server_root"
class StubServer (ServerInterface):
good_pub_key = RSAKey(filename=RSA_KEY)
def check_auth_password(self, username, password):
if username == USERNAME and password == PASSWORD:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_publickey(self, username, key):
if username == USERNAME and key == self.good_pub_key:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_channel_request(self, kind, chanid):
return OPEN_SUCCEEDED
class StubSFTPHandle (SFTPHandle):
def stat(self):
try:
return SFTPAttributes.from_stat(os.fstat(self.readfile.fileno()))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def chattr(self, attr):
# python doesn't have equivalents to fchown or fchmod, so we have to
# use the stored filename
try:
SFTPServer.set_file_attr(self.filename, attr)
return SFTP_OK
except OSError as e:
return SFTPServer.convert_errno(e.errno)
class StubSFTPServer (SFTPServerInterface):
ROOT = t_path(SERVER_ROOT)
def _realpath(self, path):
return self.ROOT + self.canonicalize(path)
def list_folder(self, path):
path = self._realpath(path)
try:
out = []
flist = os.listdir(path)
for fname in flist:
attr = SFTPAttributes.from_stat(
os.lstat(os.path.join(path, fname)))
attr.filename = fname
out.append(attr)
return out
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def stat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.stat(path))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def lstat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.lstat(path))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def open(self, path, flags, attr):
path = self._realpath(path)
try:
binary_flag = getattr(os, 'O_BINARY', 0)
flags |= binary_flag
mode = getattr(attr, 'st_mode', None)
if mode is not None:
fd = os.open(path, flags, mode)
else:
# os.open() defaults to 0777 which is
# an odd default mode for files
fd = os.open(path, flags, o666)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
if (flags & os.O_CREAT) and (attr is not None):
attr._flags &= ~attr.FLAG_PERMISSIONS
SFTPServer.set_file_attr(path, attr)
if flags & os.O_WRONLY:
if flags & os.O_APPEND:
fstr = 'ab'
else:
fstr = 'wb'
elif flags & os.O_RDWR:
if flags & os.O_APPEND:
fstr = 'a+b'
else:
fstr = 'r+b'
else:
# O_RDONLY (== 0)
fstr = 'rb'
try:
f = os.fdopen(fd, fstr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
fobj = StubSFTPHandle(flags)
fobj.filename = path
fobj.readfile = f
fobj.writefile = f
return fobj
def remove(self, path):
path = self._realpath(path)
try:
os.remove(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def rename(self, oldpath, newpath):
oldpath = self._realpath(oldpath)
newpath = self._realpath(newpath)
try:
os.rename(oldpath, newpath)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def mkdir(self, path, attr):
path = self._realpath(path)
try:
os.mkdir(path)
if attr is not None:
SFTPServer.set_file_attr(path, attr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def rmdir(self, path):
path = self._realpath(path)
try:
os.rmdir(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def chattr(self, path, attr):
path = self._realpath(path)
try:
SFTPServer.set_file_attr(path, attr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def symlink(self, target_path, path):
path = self._realpath(path)
if (len(target_path) > 0) and (target_path[0] == '/'):
# absolute symlink
target_path = os.path.join(self.ROOT, target_path[1:])
try:
os.symlink(target_path, path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def readlink(self, path):
path = self._realpath(path)
try:
symlink = os.readlink(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
# if it's absolute, remove the root
if os.path.isabs(symlink):
if symlink[:len(self.ROOT)] == self.ROOT:
symlink = symlink[len(self.ROOT):]
if (len(symlink) == 0) or (symlink[0] != '/'):
symlink = '/' + symlink
else:
symlink = '<error>'
return symlink
|
rauburtin/pysftpserver
|
pysftpserver/tests/stub_sftp.py
|
Python
|
mit
| 6,888 | 0.000145 |
import numpy
import itertools
import random
import math
def convert_spike_list_to_timed_spikes(spike_list, min_idx, max_idx, tmin, tmax, tstep):
times = numpy.array(range(tmin, tmax, tstep))
spike_ids = sorted(spike_list)
possible_neurons = range(min_idx, max_idx)
spikeArray = dict([(neuron, times) for neuron in spike_ids if neuron in possible_neurons])
return spikeArray
def convert_file_to_spikes(input_file_name, min_idx=None, max_idx=None, tmin=None, tmax=None, compatible_input=True):
data = numpy.array(numpy.loadtxt(fname=input_file_name), dtype=int) # get the array from the original text file
if compatible_input: data = numpy.roll(data, 1, axis=1) # swap neuron ID and time if necessary
if min_idx is None: min_idx = numpy.fmin.reduce(data[:,0], 0)
if max_idx is None: max_idx = numpy.fmax.reduce(data[:,0], 0) + 1
if tmin is None: tmin = numpy.fmin.reduce(data[:,1], 0)
if tmax is None: tmax = numpy.fmax.reduce(data[:,1], 0)
data = data[(data[:,1]>=tmin) & (data[:,1]<tmax) & (data[:,0]>=min_idx) & (data[:,0]<max_idx),:] # filter by mins and maxes
if data.shape == (0,): return {} # nothing left: return an empty dict.
sort_keys = numpy.lexsort((data[:,1], data[:,0])) # otherwise sort, grouping by neuron ID then time.
data = data[sort_keys,:]
spiking_neurons = itertools.groupby(data, lambda x: x[0]) # and taking one group at a time#,
spikeArray = dict([(neuron[0], numpy.array([spike_time[1] for spike_time in neuron[1]])) for neuron in spiking_neurons]) # create a dictionary indexed by neuron number of the spike times.
return spikeArray
def loop_array(input_array, runtime=0, num_repeats=1, sampletime=0):
spikeArray = {}
for neuron in input_array:
if not sampletime:
sampletime = int(numpy.fmax.reduce(input_array[neuron],0))
last_array = numpy.array([])
if sampletime*num_repeats < runtime or (runtime > 0 and sampletime*num_repeats > runtime):
num_repeats = runtime/sampletime
last_array = input_array[neuron][input_array[neuron] <= (runtime%sampletime)]
spikeArray[neuron] = numpy.concatenate([input_array[neuron]+repeat*sampletime for repeat in range(num_repeats)])
if len(last_array): spikeArray[neuron] = numpy.concatenate([spikeArray[neuron], last_array])
return spikeArray
def splice_arrays(input_arrays, input_times=None, input_neurons=None):
spikeArray = {}
if input_neurons is None: input_neurons = [None]*len(input_arrays)
if input_times is None: input_times = [[(reduce(lambda x, y: min(x, numpy.fmin.reduce(y,0)), input_group.values(), 0), reduce(lambda x, y: max(x, numpy.fmax.reduce(y,0)), input_group.values(), 0))] for input_group in input_arrays]
for in_idx in range(len(input_arrays)):
for neuron in input_arrays[in_idx].items():
if input_neurons[in_idx] is None or neuron[0] in input_neurons[in_idx]:
for time_range in input_times[in_idx]:
if time_range is None: time_range = (reduce(lambda x, y: min(x, numpy.fmin.reduce(y,0)), input_arrays[in_idx].values(), 0), reduce(lambda x, y: max(x, numpy.fmax.reduce(y,0)), input_arrays[in_idx].values(), 0))
if neuron[0] in spikeArray:
spikeArray[neuron[0]].extend([time for time in neuron[1] if time >= time_range[0] and time < time_range[1]])
else:
spikeArray[neuron[0]] = [time for time in neuron[1] if time >= time_range[0] and time < time_range[1]]
for neuron in spikeArray.items():
spikeArray[neuron[0]] = numpy.sort(numpy.unique(numpy.array(neuron[1])))
return spikeArray
def splice_files(input_files, input_times=None, input_neurons=None, compatible_input=True):
# splice_files expects a list of files, a list of lists, one for each file, giving the onset
# and offset times for each file, and a list of neurons relevant to each file, which will be
# spliced together into a single spike list.
spikeArray = {}
if input_times is None: input_times = [[(None, None)] for file_idx in len(input_files)]
for file_idx in len(input_files):
if input_neurons is None or input_neurons[file_idx] is None:
max_neuron_id = numpy.fmax.reduce(input_files[file_idx].keys(), 0) + 1
min_neuron_id = numpy.fmin.reduce(input_files[file_idx].keys(), 0)
else:
max_neuron_id = numpy.fmax.reduce(input_neurons[file_idx], 0) + 1
min_neuron_id = numpy.fmin.reduce(input_neurons[file_idx], 0)
for time_range in input_times[file_idx]:
for neuron in convert_file_to_spikes(input_file_name=input_files[file_idx], min_idx=min_neuron_id, max_idx=max_neuron_id, tmin=time_range[0], tmax=time_range[1], compatible_input=compatible_input).items():
if neuron[0] in spikeArray:
spikeArray[neuron[0]].append(neuron[1])
else:
spikeArray[neuron[0]] = neuron[1]
for neuron in spikeArray.items():
spikeArray[neuron[0]] = numpy.sort(numpy.unique(numpy.array(neuron[1])))
return spikeArray
def subsample_spikes_by_time(spikeArray, start, stop, step):
subsampledArray = {}
for neuron in spikeArray:
times = numpy.sort(spikeArray[neuron][(spikeArray[neuron] >= start) & (spikeArray[neuron] < stop)])
interval = step/2 + step%2
t_now = times[0]
t_start = times[0]
t_last = len(times)
t_index = 0
subsampled_times = []
while t_index < t_last:
spikes_in_interval = 0
while t_index < t_last and times[t_index] <= t_start + interval:
spikes_in_interval += 1
if spikes_in_interval >= interval:
t_start = times[t_index] + interval
subsampled_times.append(times[t_index])
try:
t_index = next(i for i in range(t_index, t_last) if times[i] >= t_start)
except StopIteration:
t_index = t_last
break
t_index += 1
else:
if t_index < t_last:
t_start = times[t_index]
subsampledArray[neuron] = numpy.array(subsampled_times)
return subsampledArray
def random_skew_times(spikeArray, skewtime, seed=3425670):
random.seed(seed)
#return dict([(neuron, [int(abs(t+random.uniform(-skewtime, skewtime))) for t in spikeArray[neuron]]) for neuron in spikeArray])
spikeDict = dict([(neuron, numpy.array(numpy.fabs(spikeArray[neuron]+numpy.random.uniform(-skewtime, skewtime, len(spikeArray[neuron]))), dtype=int)) for neuron in spikeArray])
#test_out = open('spikeArray.txt', 'w')
#test_out.write('%s' % spikeDict)
#test_out.close()
return spikeDict
def generate_shadow_spikes(spikeArray, dim_x, dim_y, move_velocity):
"""
generate a second set of spikes as if coming from a DVS retina.
imagines that the offset pixels perfectly register with perfect
timing precision.
args: spikeArray, in the format above for an array of Spikes: a
dict of {id: [times]}
dim_x, size of the field in the x-dimension
dim_y, size of the field in the y-dimension
move_velocity: an (s, theta) tuple, where s is the speed
measured in pixels/ms, theta the angle of virtual movement
measured in radians anticlockwise (0 is horizontal movement
to the right). The function will displace the shadow by
s pixels in the reverse of direction indicated for each time
point where spikes are registered. It will add one last set
of spikes at time tmax+1, at position of the source spikes at
time tmax.
"""
motion_x = -move_velocity[0]*math.cos(move_velocity[1])
motion_y = -move_velocity[0]*math.sin(move_velocity[1])
spikeArray_out = dict([(int(motion_x+spike[0]%dim_x)+dim_x*int(motion_y+spike[0]/dim_x), spike[1][1:]) for spike in spikeArray.items() if len(spike[1]) > 1])
spikeArray_out = dict([item for item in spikeArray_out.items() if item[0] >= 0 and item[0] < dim_x*dim_y])
spikeArray_out.update(dict([(spike[0], numpy.append(spikeArray_out.get(spike[0], numpy.array([], dtype=int)), [int(max(spike[1]))])) for spike in spikeArray.items()]))
return spikeArray_out
|
dhgarcia/babelModules
|
pynnModules/Network/spike_file_to_spike_array.py
|
Python
|
gpl-3.0
| 8,559 | 0.015189 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
import os
sourcefiles = [ 'fast_likelihood.pyx']
ext_modules = [Extension("fast_likelihood",
sourcefiles,
include_dirs = [np.get_include()],
extra_compile_args=['-O3', '-fopenmp', '-lc++'],
extra_link_args=['-fopenmp'],
language='c++')]
setup(
name = 'fastgmm',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
|
jeremy-ma/gmmmc
|
gmmmc/fastgmm/setup_fast_likelihood.py
|
Python
|
mit
| 597 | 0.025126 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
r += tx.serialize()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = block(27, spend=out[7])
yield rejected(RejectResult(0, b'bad-prevblk'))
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = block(29, spend=out[7])
yield rejected(RejectResult(0, b'bad-prevblk'))
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
b41 = block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# A block with invalid work
tip(44)
b47 = block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.scrypt256 < target: #changed > to <
b47.nNonce += 1
b47.rehash()
yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
b65 = block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
b66 = block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
b67 = block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
b68 = block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
b83 = block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
b84 = block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
|
EntropyFactory/creativechain-core
|
qa/rpc-tests/p2p-fullblocktest.py
|
Python
|
mit
| 52,732 | 0.003926 |
# encoding: utf-8
import datetime
import django
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
# Django 1.5+ compatibility
if django.VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
from django.contrib.auth.models import User
def get_user_model():
return User
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Flag'
db.create_table('waffle_flag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('everyone', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('percent', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('superusers', self.gf('django.db.models.fields.BooleanField')(default=True)),
('staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('authenticated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('rollout', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('waffle', ['Flag'])
# Adding M2M table for field groups on 'Flag'
db.create_table('waffle_flag_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('flag', models.ForeignKey(orm['waffle.flag'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('waffle_flag_groups', ['flag_id', 'group_id'])
# Adding M2M table for field users on 'Flag'
db.create_table('waffle_flag_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('flag', models.ForeignKey(orm['waffle.flag'], null=False)),
('user', models.ForeignKey(get_user_model(), null=False))
))
db.create_unique('waffle_flag_users', ['flag_id', 'user_id'])
# Adding model 'Switch'
db.create_table('waffle_switch', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('waffle', ['Switch'])
def backwards(self, orm):
# Deleting model 'Flag'
db.delete_table('waffle_flag')
# Removing M2M table for field groups on 'Flag'
db.delete_table('waffle_flag_groups')
# Removing M2M table for field users on 'Flag'
db.delete_table('waffle_flag_users')
# Deleting model 'Switch'
db.delete_table('waffle_switch')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'waffle.flag': {
'Meta': {'object_name': 'Flag'},
'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'waffle.switch': {
'Meta': {'object_name': 'Switch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['waffle']
|
mark-adams/django-waffle
|
waffle/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 7,674 | 0.007297 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2006-2012 Johan Dahlin
#
# glib/__init__.py: initialisation file for glib module
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from . import _glib
# Internal API
_PyGLib_API = _glib._PyGLib_API
# Types
GError = _glib.GError
IOChannel = _glib.IOChannel
Idle = _glib.Idle
MainContext = _glib.MainContext
MainLoop = _glib.MainLoop
OptionContext = _glib.OptionContext
OptionGroup = _glib.OptionGroup
Pid = _glib.Pid
PollFD = _glib.PollFD
Source = _glib.Source
Timeout = _glib.Timeout
# Constants
IO_ERR = _glib.IO_ERR
IO_FLAG_APPEND = _glib.IO_FLAG_APPEND
IO_FLAG_GET_MASK = _glib.IO_FLAG_GET_MASK
IO_FLAG_IS_READABLE = _glib.IO_FLAG_IS_READABLE
IO_FLAG_IS_SEEKABLE = _glib.IO_FLAG_IS_SEEKABLE
IO_FLAG_IS_WRITEABLE = _glib.IO_FLAG_IS_WRITEABLE
IO_FLAG_MASK = _glib.IO_FLAG_MASK
IO_FLAG_NONBLOCK = _glib.IO_FLAG_NONBLOCK
IO_FLAG_SET_MASK = _glib.IO_FLAG_SET_MASK
IO_HUP = _glib.IO_HUP
IO_IN = _glib.IO_IN
IO_NVAL = _glib.IO_NVAL
IO_OUT = _glib.IO_OUT
IO_PRI = _glib.IO_PRI
IO_STATUS_AGAIN = _glib.IO_STATUS_AGAIN
IO_STATUS_EOF = _glib.IO_STATUS_EOF
IO_STATUS_ERROR = _glib.IO_STATUS_ERROR
IO_STATUS_NORMAL = _glib.IO_STATUS_NORMAL
OPTION_ERROR = _glib.OPTION_ERROR
OPTION_ERROR_BAD_VALUE = _glib.OPTION_ERROR_BAD_VALUE
OPTION_ERROR_FAILED = _glib.OPTION_ERROR_FAILED
OPTION_ERROR_UNKNOWN_OPTION = _glib.OPTION_ERROR_UNKNOWN_OPTION
OPTION_FLAG_FILENAME = _glib.OPTION_FLAG_FILENAME
OPTION_FLAG_HIDDEN = _glib.OPTION_FLAG_HIDDEN
OPTION_FLAG_IN_MAIN = _glib.OPTION_FLAG_IN_MAIN
OPTION_FLAG_NOALIAS = _glib.OPTION_FLAG_NOALIAS
OPTION_FLAG_NO_ARG = _glib.OPTION_FLAG_NO_ARG
OPTION_FLAG_OPTIONAL_ARG = _glib.OPTION_FLAG_OPTIONAL_ARG
OPTION_FLAG_REVERSE = _glib.OPTION_FLAG_REVERSE
OPTION_REMAINING = _glib.OPTION_REMAINING
PRIORITY_DEFAULT = _glib.PRIORITY_DEFAULT
PRIORITY_DEFAULT_IDLE = _glib.PRIORITY_DEFAULT_IDLE
PRIORITY_HIGH = _glib.PRIORITY_HIGH
PRIORITY_HIGH_IDLE = _glib.PRIORITY_HIGH_IDLE
PRIORITY_LOW = _glib.PRIORITY_LOW
SPAWN_CHILD_INHERITS_STDIN = _glib.SPAWN_CHILD_INHERITS_STDIN
SPAWN_DO_NOT_REAP_CHILD = _glib.SPAWN_DO_NOT_REAP_CHILD
SPAWN_FILE_AND_ARGV_ZERO = _glib.SPAWN_FILE_AND_ARGV_ZERO
SPAWN_LEAVE_DESCRIPTORS_OPEN = _glib.SPAWN_LEAVE_DESCRIPTORS_OPEN
SPAWN_SEARCH_PATH = _glib.SPAWN_SEARCH_PATH
SPAWN_STDERR_TO_DEV_NULL = _glib.SPAWN_STDERR_TO_DEV_NULL
SPAWN_STDOUT_TO_DEV_NULL = _glib.SPAWN_STDOUT_TO_DEV_NULL
USER_DIRECTORY_DESKTOP = _glib.USER_DIRECTORY_DESKTOP
USER_DIRECTORY_DOCUMENTS = _glib.USER_DIRECTORY_DOCUMENTS
USER_DIRECTORY_DOWNLOAD = _glib.USER_DIRECTORY_DOWNLOAD
USER_DIRECTORY_MUSIC = _glib.USER_DIRECTORY_MUSIC
USER_DIRECTORY_PICTURES = _glib.USER_DIRECTORY_PICTURES
USER_DIRECTORY_PUBLIC_SHARE = _glib.USER_DIRECTORY_PUBLIC_SHARE
USER_DIRECTORY_TEMPLATES = _glib.USER_DIRECTORY_TEMPLATES
USER_DIRECTORY_VIDEOS = _glib.USER_DIRECTORY_VIDEOS
# Functions
child_watch_add = _glib.child_watch_add
filename_display_basename = _glib.filename_display_basename
filename_display_name = _glib.filename_display_name
filename_from_utf8 = _glib.filename_from_utf8
find_program_in_path = _glib.find_program_in_path
get_application_name = _glib.get_application_name
get_current_time = _glib.get_current_time
get_prgname = _glib.get_prgname
get_system_config_dirs = _glib.get_system_config_dirs
get_system_data_dirs = _glib.get_system_data_dirs
get_user_cache_dir = _glib.get_user_cache_dir
get_user_config_dir = _glib.get_user_config_dir
get_user_data_dir = _glib.get_user_data_dir
get_user_special_dir = _glib.get_user_special_dir
glib_version = _glib.glib_version
idle_add = _glib.idle_add
io_add_watch = _glib.io_add_watch
main_context_default = _glib.main_context_default
main_depth = _glib.main_depth
markup_escape_text = _glib.markup_escape_text
pyglib_version = _glib.pyglib_version
set_application_name = _glib.set_application_name
set_prgname = _glib.set_prgname
source_remove = _glib.source_remove
spawn_async = _glib.spawn_async
threads_init = _glib.threads_init
timeout_add = _glib.timeout_add
timeout_add_seconds = _glib.timeout_add_seconds
uri_list_extract_uris = _glib.uri_list_extract_uris
|
davidmalcolm/pygobject
|
gi/_glib/__init__.py
|
Python
|
lgpl-2.1
| 4,827 | 0 |
"""
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from urlparse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
self.current_link = None
def handle_data(self, data):
if self.current_link and not self.current_link.text:
self.current_link.text = data.strip()
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
|
mzdaniel/oh-mainline
|
vendor/packages/scrapy/scrapy/contrib/linkextractors/htmlparser.py
|
Python
|
agpl-3.0
| 2,447 | 0.001635 |
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Sum
from django.urls import reverse
from mptt.models import MPTTModel, TreeForeignKey
from taggit.managers import TaggableManager
from dcim.choices import *
from dcim.constants import *
from dcim.fields import MACAddressField
from extras.models import ObjectChange, TaggedItem
from extras.utils import extras_features
from utilities.fields import NaturalOrderingField
from utilities.mptt import TreeManager
from utilities.ordering import naturalize_interface
from utilities.querysets import RestrictedQuerySet
from utilities.query_functions import CollateAsChar
from utilities.utils import serialize_object
__all__ = (
'BaseInterface',
'CableTermination',
'ConsolePort',
'ConsoleServerPort',
'DeviceBay',
'FrontPort',
'Interface',
'InventoryItem',
'PathEndpoint',
'PowerOutlet',
'PowerPort',
'RearPort',
)
class ComponentModel(models.Model):
"""
An abstract model inherited by any model which has a parent Device.
"""
device = models.ForeignKey(
to='dcim.Device',
on_delete=models.CASCADE,
related_name='%(class)ss'
)
name = models.CharField(
max_length=64
)
_name = NaturalOrderingField(
target_field='name',
max_length=100,
blank=True
)
label = models.CharField(
max_length=64,
blank=True,
help_text="Physical label"
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
class Meta:
abstract = True
def __str__(self):
if self.label:
return f"{self.name} ({self.label})"
return self.name
def to_objectchange(self, action):
# Annotate the parent Device
try:
device = self.device
except ObjectDoesNotExist:
# The parent Device has already been deleted
device = None
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=device,
object_data=serialize_object(self)
)
@property
def parent(self):
return getattr(self, 'device', None)
class CableTermination(models.Model):
"""
An abstract model inherited by all models to which a Cable can terminate (certain device components, PowerFeed, and
CircuitTermination instances). The `cable` field indicates the Cable instance which is terminated to this instance.
`_cable_peer` is a GenericForeignKey used to cache the far-end CableTermination on the local instance; this is a
shortcut to referencing `cable.termination_b`, for example. `_cable_peer` is set or cleared by the receivers in
dcim.signals when a Cable instance is created or deleted, respectively.
"""
cable = models.ForeignKey(
to='dcim.Cable',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
_cable_peer_type = models.ForeignKey(
to=ContentType,
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
_cable_peer_id = models.PositiveIntegerField(
blank=True,
null=True
)
_cable_peer = GenericForeignKey(
ct_field='_cable_peer_type',
fk_field='_cable_peer_id'
)
# Generic relations to Cable. These ensure that an attached Cable is deleted if the terminated object is deleted.
_cabled_as_a = GenericRelation(
to='dcim.Cable',
content_type_field='termination_a_type',
object_id_field='termination_a_id'
)
_cabled_as_b = GenericRelation(
to='dcim.Cable',
content_type_field='termination_b_type',
object_id_field='termination_b_id'
)
class Meta:
abstract = True
def get_cable_peer(self):
return self._cable_peer
class PathEndpoint(models.Model):
"""
An abstract model inherited by any CableTermination subclass which represents the end of a CablePath; specifically,
these include ConsolePort, ConsoleServerPort, PowerPort, PowerOutlet, Interface, PowerFeed, and CircuitTermination.
`_path` references the CablePath originating from this instance, if any. It is set or cleared by the receivers in
dcim.signals in response to changes in the cable path, and complements the `origin` GenericForeignKey field on the
CablePath model. `_path` should not be accessed directly; rather, use the `path` property.
`connected_endpoint()` is a convenience method for returning the destination of the associated CablePath, if any.
"""
_path = models.ForeignKey(
to='dcim.CablePath',
on_delete=models.SET_NULL,
null=True,
blank=True
)
class Meta:
abstract = True
def trace(self):
if self._path is None:
return []
# Construct the complete path
path = [self, *self._path.get_path()]
while (len(path) + 1) % 3:
# Pad to ensure we have complete three-tuples (e.g. for paths that end at a RearPort)
path.append(None)
path.append(self._path.destination)
# Return the path as a list of three-tuples (A termination, cable, B termination)
return list(zip(*[iter(path)] * 3))
@property
def path(self):
return self._path
@property
def connected_endpoint(self):
"""
Caching accessor for the attached CablePath's destination (if any)
"""
if not hasattr(self, '_connected_endpoint'):
self._connected_endpoint = self._path.destination if self._path else None
return self._connected_endpoint
#
# Console ports
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class ConsolePort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical console port within a Device. ConsolePorts connect to ConsoleServerPorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Console server ports
#
@extras_features('webhooks', 'custom_links')
class ConsoleServerPort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical port within a Device (typically a designated console server) which provides access to ConsolePorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleserverport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Power ports
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class PowerPort(CableTermination, PathEndpoint, ComponentModel):
"""
A physical power supply (intake) port within a Device. PowerPorts connect to PowerOutlets.
"""
type = models.CharField(
max_length=50,
choices=PowerPortTypeChoices,
blank=True,
help_text='Physical port type'
)
maximum_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Maximum power draw (watts)"
)
allocated_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Allocated power draw (watts)"
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:powerport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.maximum_draw,
self.allocated_draw,
self.description,
)
def clean(self):
super().clean()
if self.maximum_draw is not None and self.allocated_draw is not None:
if self.allocated_draw > self.maximum_draw:
raise ValidationError({
'allocated_draw': f"Allocated draw cannot exceed the maximum draw ({self.maximum_draw}W)."
})
def get_power_draw(self):
"""
Return the allocated and maximum power draw (in VA) and child PowerOutlet count for this PowerPort.
"""
# Calculate aggregate draw of all child power outlets if no numbers have been defined manually
if self.allocated_draw is None and self.maximum_draw is None:
poweroutlet_ct = ContentType.objects.get_for_model(PowerOutlet)
outlet_ids = PowerOutlet.objects.filter(power_port=self).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(
_cable_peer_type=poweroutlet_ct,
_cable_peer_id__in=outlet_ids
).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret = {
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
'legs': [],
}
# Calculate per-leg aggregates for three-phase feeds
if getattr(self._cable_peer, 'phase', None) == PowerFeedPhaseChoices.PHASE_3PHASE:
for leg, leg_name in PowerOutletFeedLegChoices:
outlet_ids = PowerOutlet.objects.filter(power_port=self, feed_leg=leg).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(
_cable_peer_type=poweroutlet_ct,
_cable_peer_id__in=outlet_ids
).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret['legs'].append({
'name': leg_name,
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
})
return ret
# Default to administratively defined values
return {
'allocated': self.allocated_draw or 0,
'maximum': self.maximum_draw or 0,
'outlet_count': PowerOutlet.objects.filter(power_port=self).count(),
'legs': [],
}
#
# Power outlets
#
@extras_features('webhooks', 'custom_links')
class PowerOutlet(CableTermination, PathEndpoint, ComponentModel):
"""
A physical power outlet (output) within a Device which provides power to a PowerPort.
"""
type = models.CharField(
max_length=50,
choices=PowerOutletTypeChoices,
blank=True,
help_text='Physical port type'
)
power_port = models.ForeignKey(
to='dcim.PowerPort',
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='poweroutlets'
)
feed_leg = models.CharField(
max_length=50,
choices=PowerOutletFeedLegChoices,
blank=True,
help_text="Phase (for three-phase feeds)"
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:poweroutlet', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.power_port.name if self.power_port else None,
self.get_feed_leg_display(),
self.description,
)
def clean(self):
super().clean()
# Validate power port assignment
if self.power_port and self.power_port.device != self.device:
raise ValidationError(
"Parent power port ({}) must belong to the same device".format(self.power_port)
)
#
# Interfaces
#
class BaseInterface(models.Model):
"""
Abstract base class for fields shared by dcim.Interface and virtualization.VMInterface.
"""
enabled = models.BooleanField(
default=True
)
mac_address = MACAddressField(
null=True,
blank=True,
verbose_name='MAC Address'
)
mtu = models.PositiveIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1), MaxValueValidator(65536)],
verbose_name='MTU'
)
mode = models.CharField(
max_length=50,
choices=InterfaceModeChoices,
blank=True
)
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Remove untagged VLAN assignment for non-802.1Q interfaces
if not self.mode:
self.untagged_vlan = None
# Only "tagged" interfaces may have tagged VLANs assigned. ("tagged all" implies all VLANs are assigned.)
if self.pk and self.mode != InterfaceModeChoices.MODE_TAGGED:
self.tagged_vlans.clear()
return super().save(*args, **kwargs)
@extras_features('export_templates', 'webhooks', 'custom_links')
class Interface(CableTermination, PathEndpoint, ComponentModel, BaseInterface):
"""
A network interface within a Device. A physical Interface can connect to exactly one other Interface.
"""
# Override ComponentModel._name to specify naturalize_interface function
_name = NaturalOrderingField(
target_field='name',
naturalize_function=naturalize_interface,
max_length=100,
blank=True
)
lag = models.ForeignKey(
to='self',
on_delete=models.SET_NULL,
related_name='member_interfaces',
null=True,
blank=True,
verbose_name='Parent LAG'
)
type = models.CharField(
max_length=50,
choices=InterfaceTypeChoices
)
mgmt_only = models.BooleanField(
default=False,
verbose_name='Management only',
help_text='This interface is used only for out-of-band management'
)
untagged_vlan = models.ForeignKey(
to='ipam.VLAN',
on_delete=models.SET_NULL,
related_name='interfaces_as_untagged',
null=True,
blank=True,
verbose_name='Untagged VLAN'
)
tagged_vlans = models.ManyToManyField(
to='ipam.VLAN',
related_name='interfaces_as_tagged',
blank=True,
verbose_name='Tagged VLANs'
)
ip_addresses = GenericRelation(
to='ipam.IPAddress',
content_type_field='assigned_object_type',
object_id_field='assigned_object_id',
related_query_name='interface'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = [
'device', 'name', 'label', 'lag', 'type', 'enabled', 'mac_address', 'mtu', 'mgmt_only', 'description', 'mode',
]
class Meta:
ordering = ('device', CollateAsChar('_name'))
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:interface', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier if self.device else None,
self.name,
self.label,
self.lag.name if self.lag else None,
self.get_type_display(),
self.enabled,
self.mac_address,
self.mtu,
self.mgmt_only,
self.description,
self.get_mode_display(),
)
def clean(self):
super().clean()
# Virtual interfaces cannot be connected
if self.type in NONCONNECTABLE_IFACE_TYPES and (
self.cable or getattr(self, 'circuit_termination', False)
):
raise ValidationError({
'type': "Virtual and wireless interfaces cannot be connected to another interface or circuit. "
"Disconnect the interface or choose a suitable type."
})
# An interface's LAG must belong to the same device or virtual chassis
if self.lag and self.lag.device != self.device:
if self.device.virtual_chassis is None:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to a different device ({self.lag.device})."
})
elif self.lag.device.virtual_chassis != self.device.virtual_chassis:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to {self.lag.device}, which is not part "
f"of virtual chassis {self.device.virtual_chassis}."
})
# A virtual interface cannot have a parent LAG
if self.type == InterfaceTypeChoices.TYPE_VIRTUAL and self.lag is not None:
raise ValidationError({'lag': "Virtual interfaces cannot have a parent LAG interface."})
# A LAG interface cannot be its own parent
if self.pk and self.lag_id == self.pk:
raise ValidationError({'lag': "A LAG interface cannot be its own parent."})
# Validate untagged VLAN
if self.untagged_vlan and self.untagged_vlan.site not in [self.parent.site, None]:
raise ValidationError({
'untagged_vlan': "The untagged VLAN ({}) must belong to the same site as the interface's parent "
"device, or it must be global".format(self.untagged_vlan)
})
@property
def parent(self):
return self.device
@property
def is_connectable(self):
return self.type not in NONCONNECTABLE_IFACE_TYPES
@property
def is_virtual(self):
return self.type in VIRTUAL_IFACE_TYPES
@property
def is_wireless(self):
return self.type in WIRELESS_IFACE_TYPES
@property
def is_lag(self):
return self.type == InterfaceTypeChoices.TYPE_LAG
@property
def count_ipaddresses(self):
return self.ip_addresses.count()
#
# Pass-through ports
#
@extras_features('webhooks', 'custom_links')
class FrontPort(CableTermination, ComponentModel):
"""
A pass-through port on the front of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
rear_port = models.ForeignKey(
to='dcim.RearPort',
on_delete=models.CASCADE,
related_name='frontports'
)
rear_port_position = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = (
('device', 'name'),
('rear_port', 'rear_port_position'),
)
def get_absolute_url(self):
return reverse('dcim:frontport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.rear_port.name,
self.rear_port_position,
self.description,
)
def clean(self):
super().clean()
# Validate rear port assignment
if self.rear_port.device != self.device:
raise ValidationError({
"rear_port": f"Rear port ({self.rear_port}) must belong to the same device"
})
# Validate rear port position assignment
if self.rear_port_position > self.rear_port.positions:
raise ValidationError({
"rear_port_position": f"Invalid rear port position ({self.rear_port_position}): Rear port "
f"{self.rear_port.name} has only {self.rear_port.positions} positions"
})
@extras_features('webhooks', 'custom_links')
class RearPort(CableTermination, ComponentModel):
"""
A pass-through port on the rear of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
positions = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'positions', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:rearport', kwargs={'pk': self.pk})
def clean(self):
super().clean()
# Check that positions count is greater than or equal to the number of associated FrontPorts
frontport_count = self.frontports.count()
if self.positions < frontport_count:
raise ValidationError({
"positions": f"The number of positions cannot be less than the number of mapped front ports "
f"({frontport_count})"
})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.positions,
self.description,
)
#
# Device bays
#
@extras_features('webhooks', 'custom_links')
class DeviceBay(ComponentModel):
"""
An empty space within a Device which can house a child device
"""
installed_device = models.OneToOneField(
to='dcim.Device',
on_delete=models.SET_NULL,
related_name='parent_bay',
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'installed_device', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:devicebay', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.installed_device.identifier if self.installed_device else None,
self.description,
)
def clean(self):
super().clean()
# Validate that the parent Device can have DeviceBays
if not self.device.device_type.is_parent_device:
raise ValidationError("This type of device ({}) does not support device bays.".format(
self.device.device_type
))
# Cannot install a device into itself, obviously
if self.device == self.installed_device:
raise ValidationError("Cannot install a device into itself.")
# Check that the installed device is not already installed elsewhere
if self.installed_device:
current_bay = DeviceBay.objects.filter(installed_device=self.installed_device).first()
if current_bay and current_bay != self:
raise ValidationError({
'installed_device': "Cannot install the specified device; device is already installed in {}".format(
current_bay
)
})
#
# Inventory items
#
@extras_features('export_templates', 'webhooks', 'custom_links')
class InventoryItem(MPTTModel, ComponentModel):
"""
An InventoryItem represents a serialized piece of hardware within a Device, such as a line card or power supply.
InventoryItems are used only for inventory purposes.
"""
parent = TreeForeignKey(
to='self',
on_delete=models.CASCADE,
related_name='child_items',
blank=True,
null=True,
db_index=True
)
manufacturer = models.ForeignKey(
to='dcim.Manufacturer',
on_delete=models.PROTECT,
related_name='inventory_items',
blank=True,
null=True
)
part_id = models.CharField(
max_length=50,
verbose_name='Part ID',
blank=True,
help_text='Manufacturer-assigned part identifier'
)
serial = models.CharField(
max_length=50,
verbose_name='Serial number',
blank=True
)
asset_tag = models.CharField(
max_length=50,
unique=True,
blank=True,
null=True,
verbose_name='Asset tag',
help_text='A unique tag used to identify this item'
)
discovered = models.BooleanField(
default=False,
help_text='This item was automatically discovered'
)
tags = TaggableManager(through=TaggedItem)
objects = TreeManager()
csv_headers = [
'device', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered', 'description',
]
class Meta:
ordering = ('device__id', 'parent__id', '_name')
unique_together = ('device', 'parent', 'name')
def get_absolute_url(self):
return reverse('dcim:inventoryitem', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.name or '{{{}}}'.format(self.device.pk),
self.name,
self.label,
self.manufacturer.name if self.manufacturer else None,
self.part_id,
self.serial,
self.asset_tag,
self.discovered,
self.description,
)
|
digitalocean/netbox
|
netbox/dcim/models/device_components.py
|
Python
|
apache-2.0
| 27,423 | 0.002079 |
'''
GameData.py
Last Updated: 3/16/17
'''
import json, os
import numpy as np
import pygame as pg
from GameAssets import GameAssets as ga
class GameData():
"""
GameData class is used to stores game state information.
"""
def __init__(self):
'''
Method initiates game state variables.
'''
self.debug = False
self.game_name = "SpaceManBash"
self.delta_sum = 0
self.running = True
# GameFrome Data
self.frames = []
self.frame_current = None
# Configs
self.screen_dim = (800, 600)
self.controls = {
'LEFT' : pg.K_a,
'RIGHT' : pg.K_d,
'UP' : pg.K_w,
'DOWN' : pg.K_s,
'CROUCH' : pg.K_LALT,
'ATTACK' : pg.K_j,
'ALTATTACK' : pg.K_k,
'JUMP' : pg.K_SPACE,
'SPRINT' : pg.K_LSHIFT,
'PAUSE' : pg.K_ESCAPE,
'ENTER' : pg.K_RETURN,
'HOME' : pg.K_h
}
# Save Data
self.saves = []
self.save_index = None
# Level Data
self.levels = []
self.level_index = 0
self.level_background = None
self.level_midground = None
self.camera_pos = np.array([0.0, 0.0, 0.0, 0.0])
self.camera_limits = [0.0, 0.0, 0.0, 0.0]
self.game_objects = []
self.collisions = {}
self.level_scripts = []
self.script_vars = {}
# Player Data
self.player_pos = np.array([0.0, 0.0])
self.player_health = 100
def switch_frame(self, frame):
'''
Method switches current frame to desired frame. Instantiates desired
frame if not found.
Param:
frame ;GameFrame new current frame
'''
for f in self.frames:
if f.__class__.__name__ == frame:
self.frame_current = f
return
module = __import__("GameFrames")
class_ = getattr(module, frame)
instance = class_(self)
self.frames.append(instance)
self.frame_current = self.frames[-1]
def save_config(self, filename):
'''
Method saves game data configurations to file.
Param:
filename ;str config filename
'''
try:
with open("../data/" + filename, "w") as f:
data = {}
data['controls'] = self.controls
data['screen_dim'] = self.screen_dim
json_dump = json.dumps(data)
f.write(json_dump)
except Exception as e:
print("Could Save Config:", filename)
print(e)
def load_config(self, filename):
'''
Method loads game data configurations to file.
Param:
filename ;str config filename
'''
try:
with open("../data/" + filename, "r") as f:
for json_dump in f:
data = json.loads(json_dump)
self.controls = data['controls']
self.screen_dim = data['screen_dim']
except Exception as e:
print("Could Load Config:", filename)
print(e)
def save_save(self, filename):
'''
Method saves game data state to save file.
Param:
filename ;str save filename
'''
try:
with open("../data/saves/" + filename, "w") as f:
data = {}
data["level_index"] = self.level_index
json_dump = json.dumps(data)
f.write(json_dump + '\n')
except Exception as e:
print("Could Save Save Data:", filename)
print(e)
def load_save(self, filename):
'''
Method loads game data state from save file.
Param:
filename ;str save filename
'''
try:
with open("../data/saves/" + filename, "r") as f:
for json_dump in f:
data = json.loads(json_dump)
self.level_index = data["level_index"]
except Exception as e:
print("Could Load Save Data:", filename)
print(e)
def load_game_data(self):
'''
Method loads all game level data from file.
'''
for filename in sorted(os.listdir("../data/levels/")):
if filename.endswith(".lev"):
try:
with open("../data/levels/" + filename, "r") as f:
self.levels.append(f.read())
except Exception as e:
print("Could Load Game Data:", filename)
print(e)
def load_level(self):
'''
Method loads current level.
'''
try:
data = json.loads(self.levels[self.level_index])
self.camera_pos = np.array(data['camera_pos'])
self.camera_limits = np.array(data['camera_limits'])
for go in data['game_objects']:
module = __import__("GameObjects")
class_ = getattr(module, go[0])
instance = class_(go[1:])
self.add_game_object(instance)
pg.mixer.music.load("../data/music/"+data['music'])
pg.mixer.music.set_volume(0.15)
pg.mixer.music.play(loops=3)
self.level_background = getattr(ga, data['background'])
self.level_midground = getattr(ga, data['midground'])
for script in data['scripts']: self.add_level_script(script)
except Exception as e:
print("Couldn't Load Level:", self.level_index)
print(e)
def reset_level(self):
'''
Method resets current level.
'''
self.frame_current.level_loaded = False
self.game_objects = []
self.collisions = {}
self.load_level()
def switch_level(self, index):
'''
Method switches level.
Param:
index ;int index of desired level
'''
self.level_index = index
self.frame_current.level_loaded = False
self.game_objects = []
self.collisions = {}
self.save_save("save_0.sav")
self.load_level()
def add_game_object(self, game_object):
'''
Method adds game object.
Param:
game_object ;GameObject
'''
self.game_objects.append(game_object)
def remove_game_object(self, game_object):
'''
Method adds game object.
Param:
game_object ;GameObject
'''
self.game_objects.remove(game_object)
def add_level_script(self, script):
'''
'''
self.level_scripts.append(script)
def remove_level_script(self, script):
'''
'''
self.level_scripts.remove(script)
def update_collisions(self):
'''
Method calculates collisions of game objects at current game state.
Collisions are stored in self.collisions dictionary object.
'''
self.collisions = {}
for go in self.game_objects:
temp = []
for goo in self.game_objects:
if go != goo and go.check_collision(goo.rect):
temp.append(goo)
self.collisions[go] = temp
def center_camera_on_game_object(self, game_object):
'''
Method updates camera position to be centered on desired game object while
remaining in the self.camera_limits boundaries.
Param:
game_object ;GameObject
'''
x = -(game_object.rect[0] + (game_object.rect[2]/2.0)) + (self.screen_dim[0]/2.0)
y = -(game_object.rect[1] + (game_object.rect[3]/2.0)) + (self.screen_dim[1]/2.0)
if x < self.camera_limits[2] and x > self.camera_limits[0]: self.camera_pos[0] = x
if y < self.camera_limits[3] and y > self.camera_limits[1]: self.camera_pos[1] = y
|
rz4/SpaceManBash
|
src/GameData.py
|
Python
|
mit
| 8,056 | 0.002731 |
# -*- coding: utf-8 -*-
# Third Party Stuff
# Third Party Stuff
from rest_framework.pagination import PageNumberPagination as DrfPageNumberPagination
class PageNumberPagination(DrfPageNumberPagination):
# Client can control the page using this query parameter.
page_query_param = 'page'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = 'per_page'
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = 1000
|
aniketmaithani/kimani-adserver
|
Adserver/base/api/pagination.py
|
Python
|
gpl-2.0
| 640 | 0.001563 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.