repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
seibert-media/Highton
|
highton/models/comment.py
|
1
|
1176
|
from highton.models import HightonModel
from highton.highton_constants import HightonConstants
from highton import fields
from highton import call_mixins
class Comment(
HightonModel,
call_mixins.DetailCallMixin,
call_mixins.CreateCallMixin,
call_mixins.UpdateCallMixin,
call_mixins.DeleteCallMixin,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar parent_id: fields.IntegerField(name=HightonConstants.PARENT_ID)
:ivar author_id: fields.IntegerField(name=HightonConstants.AUTHOR_ID)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar body: fields.StringField(name=HightonConstants.BODY)
"""
TAG_NAME = HightonConstants.COMMENT
ENDPOINT = HightonConstants.COMMENTS
def __init__(self, **kwargs):
self.parent_id = fields.IntegerField(name=HightonConstants.PARENT_ID, required=True)
self.body = fields.StringField(name=HightonConstants.BODY, required=True)
self.author_id = fields.IntegerField(name=HightonConstants.AUTHOR_ID)
self.created_at = fields.DatetimeField(name=HightonConstants.CREATED_AT)
super().__init__(**kwargs)
|
apache-2.0
| -1,080,316,019,135,606,400 | 35.75 | 92 | 0.743197 | false |
kaosbeat/monaxo
|
midi.py
|
1
|
1847
|
#! /usr/bin/env python
import time, random, threading, monome, sched
import rtmidi
import loop16
from seq16 import Seq16
from monome import Monome
from midiout import *
# try to find a monome (you can skip this if you already know the host/port)
print "looking for a monome..."
host, port = monome.find_any_monome()
print "found!"
m = Monome((host, port))
m.start()
#init the MIDI port
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
print available_ports
if available_ports:
midiout.open_port(1)
else:
midiout.open_virtual_port("My virtual output")
#setup the clock
class Clock:
bpm = 120
s = sched.scheduler(time.time, time.sleep)
print 'clockeventinit'
def runclockedstuff(self):
self.s.enter(500, 1, runclockedstuff, ())
print 'clockevent'
clock = Clock()
#config
m.monomeRowConfig = ['none', 'none', 'none','none', 'loop16', 'toggle16','seq16', 'loop16']
m.r0 = Seq16(m,0,0)
m.r1 = Seq16(m,1,1)
m.r2 = Seq16(m,2,0)
m.r3 = Seq16(m,3,1)
# m.monomeState = [
# ]
# if __name__ == '__main__':
# main()
def keypressed(x, y, s):
if y == 0:
m.r0.dostuff(x,y,s)
m.r0.changestuff()
if y == 1:
m.r1.dostuff(x,y,s)
print 'y = 1'
if y == 2:
m.r3.dostuff(x,y,s)
if y == 3:
m.r4.dostuff(x,y,s)
# rowType = m.monomeRowConfig[y]
# if s == 1:
# if rowType == 'seq16':
# if m.get_led(x,y) == 1:
# m.led_set(x, y, 1 )
# else:
# m.led_set(x, y, 0 )
# if rowType == 'loop16':
# note_on(0x99,60, 112)
# m.led_row(x,y,0xff, 0xff)
# else:
# # if rowType == 'seq16':
# if rowType == '`loop16':
# note_off(0x99, 60)
# m.led_row(x,y,0x00, 0x00)
print(clock.runclockedstuff)
m.grid_key = keypressed
# #repaint monome
# m.led_all(0)
try:
while True:
for i in range(8):
time.sleep(1.0/20)
except KeyboardInterrupt:
# r.panic()
m.led_all(0)
m.close()
|
mit
| 4,509,165,289,863,783,400 | 16.590476 | 92 | 0.624256 | false |
CtrlC-Root/cse3341
|
Core/cse3341/pt/write_statement.py
|
1
|
1161
|
from ..token import Token
from .node import Node
from .identifier_list import IdentifierList
class WriteStatement(Node):
"""
A write statement.
"""
def __init__(self):
"""
Create the write statement.
"""
self.identifiers = IdentifierList()
@classmethod
def parse(cls, tokenizer, identifiers):
"""
Parse the write statement.
"""
statement = WriteStatement()
cls.extract_token(tokenizer, Token.WRITE)
statement.identifiers = IdentifierList.parse(tokenizer, identifiers)
cls.extract_token(tokenizer, Token.SEMICOLON)
return statement
def execute(self, identifiers, data):
"""
Execute the statement.
"""
for identifier in self.identifiers:
print("{0} = {1}".format(
identifier.name,
identifiers.get_value(identifier.name)))
def __str__(self):
"""
Human-readable string representation.
"""
return "{0} {1}{2}".format(
Token.WRITE.value[1],
self.identifiers,
Token.SEMICOLON.value[1])
|
mit
| -2,275,082,178,127,117,300 | 23.1875 | 76 | 0.565891 | false |
makefu/repo-sync
|
reposync/cli.py
|
1
|
4555
|
#! /usr/bin/env python3
""" usage: repo-sync CONFIG
Creates a sync repo in PWD.
Environment:
REPONAME name of the sync repo in workdir (Default: repo-sync.git)
Configuration:
Sync configuration in json format,defines which branch of
"origin.url" will be mirrored to "mirror.url"
$name.origin.ref defaults to "heads/master"
$name.mirror.ref defaults to "heads/${name}"
A special "@latest" entry defines where the ref with the latest
update is pushed to the mirror-url.
@latest.mirror.ref defaults to "heads/master"
Literal example for config file:
{
"$username-repo": {
"origin": {
"url": "http://github.com/username/repo"
"ref": "heads/dev"
},
"mirror": {
"url": "git@internal:mirror-repo",
"ref": "heads/github-mirror-dev"
}
},
...
"@latest": {
"mirror": {
"url": "git@internal:mirror",
"ref": "heads/master"
}
}
}
"""
from git import Repo
import git
from docopt import docopt
import logging
import os
from datetime import datetime
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("repo-sync")
def load_config(fname):
import json
with open(fname) as f:
return json.load(f)
def sync_ref(repo,k,v):
log.info("begin sync of {}".format(k))
oname = k
mname = oname+'-mirror'
ourl = v['origin']['url']
murl = v['mirror']['url']
# it is easier to ask for forgiveness than to ask for permission
try: repo.delete_remote(oname)
except git.exc.GitCommandError: pass
try: repo.delete_remote(mname)
except git.exc.GitCommandError: pass
# Step 1: fetch remote_ref:local_ref
# Step 2: push local_ref:mirror_ref
remote_ref = "refs/" + v['origin']['ref'] if 'ref' in v['origin'] \
else 'refs/heads/master'
local_ref = "refs/remotes/{}/master".format(oname)
refspec = "+{}:{}".format(remote_ref,local_ref)
oremote = repo.create_remote(oname,url=ourl)
log.info("fetching refspec {}".format(refspec))
fetch = oremote.fetch(refspec=refspec)[0]
print("{} - {}".format(fetch.commit.summary,datetime.fromtimestamp(fetch.commit.committed_date)))
mremote = repo.create_remote(mname,murl)
mirror_ref = "refs/" + v['mirror']['ref'] if 'ref' in v['mirror'] \
else "refs/heads/{}".format(oname)
mrefspec = "{}:{}".format(local_ref,mirror_ref)
log.info("pushing refspec {}".format(mrefspec))
mremote.push(refspec=mrefspec,force=True)
return { "mirror_ref": mirror_ref,
"remote_ref": remote_ref,
"local_ref": local_ref,
"remote": oremote }
def push_latest(repo,k,v,local_ref):
""" push the `local_ref` to `v['mirror']['url']`
`k` is the remote name
essentially the second half of sync_ref
"""
try: repo.delete_remote(k)
except git.exc.GitCommandError: pass
remote = repo.create_remote(k,url=v['mirror']['url'])
mirror_ref = "refs/" + v['mirror']['ref'] if 'ref' in v['mirror'] \
else "refs/heads/master"
mrefspec = "{}:{}".format(local_ref,mirror_ref)
log.info("pushing refspec {}".format(mrefspec))
remote.push(refspec=mrefspec,force=True)
def get_latest_change(repo,sync):
""" takes { "name" : { "local_ref": "refs/remotes/name/master" },
"name2": { "local_ref": "..." } }
returns "refs/remotes/name/master" of the entry with the latest change
"""
last_change = sorted(sync,key=lambda k:
git.objects.base.Object.new(repo,sync[k]['local_ref']).committed_date).pop()
log.info("latest change seen in: {}".format(last_change))
return sync[last_change]['local_ref']
def mirror(reponame,cfg):
from os.path import join
log.info("init repo at {}".format(join(os.curdir,reponame)))
repo = Repo.init(reponame,bare=True)
sync = {}
lk = '@latest'
for k,v in cfg.items():
if k == lk: continue
try:
sync[k] = sync_ref(repo,k,v)
except Exception as e:
log.error("failed to sync repo {} because of {}".format(k,e))
if lk in cfg:
log.info("found `latest` entry, starting push")
push_latest(repo,lk,cfg[lk],get_latest_change(repo,sync))
else:
log.debug("no `@latest` entry.")
log.info("finish sync")
def main():
args = docopt(__doc__)
name = os.environ.get("REPONAME","repo-sync.git")
mirror(name,load_config(args['CONFIG']))
if __name__ == "__main__":
main()
|
mit
| -8,171,029,444,356,945,000 | 28.967105 | 101 | 0.605049 | false |
frmdstryr/enaml-kivy
|
enamlkv/kv/api.py
|
1
|
8801
|
#------------------------------------------------------------------------------
# Copyright (c) 2016, frmdstryr.
#
# Distributed under the terms of the MIT License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
import sys
from types import ModuleType
from enamlkv.kv.kv_factories import get_control
KV_CONTROLS = dict(
# Layouts
BoxLayout=lambda: get_control('kivy.uix.boxlayout.BoxLayout'),
FloatLayout = lambda: get_control('kivy.uix.floatlayout.FloatLayout'),
RelativeLayout = lambda: get_control('kivy.uix.relativelayout.RelativeLayout'),
GridLayout = lambda: get_control('kivy.uix.gridlayout.GridLayout'),
AnchorLayout = lambda: get_control('kivy.uix.anchorlayout.AnchorLayout'),
PageLayout = lambda: get_control('kivy.uix.pagelayout.PageLayout'),
ScatterLayout = lambda: get_control('kivy.uix.scatterlayout.ScatterLayout'),
ScatterPlaneLayout = lambda: get_control('kivy.uix.scatterlayout.ScatterPlaneLayout'),
Spacer = lambda: get_control('kivy.uix.widget.Widget'),
StackLayout = lambda: get_control('kivy.uix.stacklayout.StackLayout'),
# Ux Widgets
Label = lambda: get_control('kivy.uix.label.Label'),
Button = lambda: get_control('kivy.uix.button.Button'),
CheckBox = lambda: get_control('kivy.uix.checkbox.CheckBox'),
Image = lambda: get_control('kivy.uix.image.Image'),
AsyncImage = lambda: get_control('kivy.uix.image.AsyncImage'),
Slider = lambda: get_control('kivy.uix.slider.Slider'),
ProgressBar = lambda: get_control('kivy.uix.progressbar.ProgressBar'),
TextInput = lambda: get_control('kivy.uix.textinput.TextInput',read_only_properties=['keyboard','cursor_pos','cursor_col','cursor_row','minimum_height']),
ToggleButton = lambda: get_control('kivy.uix.togglebutton.ToggleButton'),
Switch = lambda: get_control('kivy.uix.switch.Switch'),
Video = lambda: get_control('kivy.uix.video.Video'),
Camera = lambda: get_control('kivy.uix.camera.Camera',read_only_properties=['norm_image_size']),
Accordion = lambda: get_control("kivy.uix.accordion.Accordion"),
AccordionItem = lambda: get_control("kivy.uix.accordion.AccordionItem"),
ActionBar = lambda: get_control("kivy.uix.actionbar.ActionBar"),
ActionButton = lambda: get_control("kivy.uix.actionbar.ActionButton"),
ActionToggleButton = lambda: get_control("kivy.uix.actionbar.ActionToggleButton"),
ActionCheck = lambda: get_control("kivy.uix.actionbar.ActionCheck"),
ActionSeparator = lambda: get_control("kivy.uix.actionbar.ActionSeparator"),
ActionDropDown = lambda: get_control("kivy.uix.actionbar.ActionDropDown"),
ActionGroup = lambda: get_control("kivy.uix.actionbar.ActionGroup"),
ActionOverflow = lambda: get_control("kivy.uix.actionbar.ActionOverflow"),
ActionView = lambda: get_control("kivy.uix.actionbar.ActionView"),
ActionPrevious = lambda: get_control("kivy.uix.actionbar.ActionPrevious"),
ScreenManager = lambda: get_control("kivy.uix.screenmanager.ScreenManager",read_only_properties=['screen_names']),
Screen = lambda: get_control("kivy.uix.screenmanager.Screen"),
CodeInput = lambda: get_control("kivy.uix.codeinput.CodeInput"),
DropDown = lambda: get_control("kivy.uix.dropdown.DropDown"),
Spinner = lambda: get_control("kivy.uix.spinner.Spinner"),
#SpinnerButton = lambda: get_control("kivy.uix.spinner.SpinnerButton"),
Splitter = lambda: get_control("kivy.uix.splitter.Splitter"),
ColorPicker = lambda: get_control("kivy.uix.colorpicker.ColorPicker",read_only_properties=['wheel']),
Popup = lambda: get_control("kivy.uix.popup.Popup"),
TabbedPanel = lambda: get_control("kivy.uix.tabbedpanel.TabbedPanel",read_only_properties=['content','tab_list','default_tab',"_current_tab","_default_tab"]),
TabbedPanelContent = lambda: get_control("kivy.uix.tabbedpanel.TabbedPanelContent"),
TabbedPanelHeader = lambda: get_control("kivy.uix.tabbedpanel.TabbedPanelHeader"),
TabbedPanelItem = lambda: get_control("kivy.uix.tabbedpanel.TabbedPanelItem"),
TabbedPanelStrip = lambda: get_control("kivy.uix.tabbedpanel.TabbedPanelStrip"),
ScrollView = lambda: get_control("kivy.uix.scrollview.ScrollView",read_only_properties=['hbar','vbar','bbox']),
RstDocument = lambda: get_control("kivy.uix.rst.RstDocument"),
RstVideoPlayer = lambda: get_control("kivy.uix.rst.RstVideoPlayer"),
RstTitle = lambda: get_control("kivy.uix.rst.RstTitle"),
RstParagraph = lambda: get_control("kivy.uix.rst.RstParagraph"),
RstTerm = lambda: get_control("kivy.uix.rst.RstTerm"),
RstBlockQuote = lambda: get_control("kivy.uix.rst.RstBlockQuote"),
RstLiteralBlock = lambda: get_control("kivy.uix.rst.RstLiteralBlock"),
RstList = lambda: get_control("kivy.uix.rst.RstList"),
RstListItem = lambda: get_control("kivy.uix.rst.RstListItem"),
RstListBullet = lambda: get_control("kivy.uix.rst.RstListBullet"),
RstSystemMessage = lambda: get_control("kivy.uix.rst.RstSystemMessage"),
RstWarning = lambda: get_control("kivy.uix.rst.RstWarning"),
RstNote = lambda: get_control("kivy.uix.rst.RstNote"),
RstImage = lambda: get_control("kivy.uix.rst.RstImage"),
RstAsyncImage = lambda: get_control("kivy.uix.rst.RstAsyncImage"),
RstDefinitionList = lambda: get_control("kivy.uix.rst.RstDefinitionList"),
RstDefinition = lambda: get_control("kivy.uix.rst.RstDefinition"),
RstFieldList = lambda: get_control("kivy.uix.rst.RstFieldList"),
RstFieldName = lambda: get_control("kivy.uix.rst.RstFieldName"),
RstFieldBody = lambda: get_control("kivy.uix.rst.RstFieldBody"),
RstGridLayout = lambda: get_control("kivy.uix.rst.RstGridLayout"),
RstTable = lambda: get_control("kivy.uix.rst.RstTable"),
RstEntry = lambda: get_control("kivy.uix.rst.RstEntry"),
RstTransition = lambda: get_control("kivy.uix.rst.RstTransition"),
RstEmptySpace = lambda: get_control("kivy.uix.rst.RstEmptySpace"),
RstDefinitionSpace = lambda: get_control("kivy.uix.rst.RstDefinitionSpace"),
Sandbox = lambda: get_control("kivy.uix.sandbox.Sandbox"),
Scatter = lambda: get_control("kivy.uix.scatter.Scatter",read_only_properties=['bbox']),
ScatterPlane = lambda: get_control("kivy.uix.scatter.ScatterPlane"),
Settings = lambda: get_control("kivy.uix.settings.Settings"),
SettingsWithSidebar = lambda: get_control("kivy.uix.settings.SettingsWithSidebar"),
SettingsWithSpinner = lambda: get_control("kivy.uix.settings.SettingsWithSpinner"),
SettingsWithTabbedPanel = lambda: get_control("kivy.uix.settings.SettingsWithTabbedPanel"),
SettingsWithNoMenu = lambda: get_control("kivy.uix.settings.SettingsWithNoMenu"),
SettingSpacer = lambda: get_control("kivy.uix.settings.SettingSpacer"),
SettingItem = lambda: get_control("kivy.uix.settings.SettingItem"),
SettingBoolean = lambda: get_control("kivy.uix.settings.SettingBoolean"),
SettingString = lambda: get_control("kivy.uix.settings.SettingString"),
SettingPath = lambda: get_control("kivy.uix.settings.SettingPath"),
SettingNumeric = lambda: get_control("kivy.uix.settings.SettingNumeric"),
SettingOptions = lambda: get_control("kivy.uix.settings.SettingOptions"),
SettingTitle = lambda: get_control("kivy.uix.settings.SettingTitle"),
SettingsPanel = lambda: get_control("kivy.uix.settings.SettingsPanel"),
InterfaceWithSidebar = lambda: get_control("kivy.uix.settings.InterfaceWithSidebar"),
InterfaceWithSpinner = lambda: get_control("kivy.uix.settings.InterfaceWithSpinner"),
InterfaceWithNoMenu = lambda: get_control("kivy.uix.settings.InterfaceWithNoMenu"),
InterfaceWithTabbedPanel = lambda: get_control("kivy.uix.settings.InterfaceWithTabbedPanel"),
ContentPanel = lambda: get_control("kivy.uix.settings.ContentPanel"),
MenuSidebar = lambda: get_control("kivy.uix.settings.MenuSidebar"),
SettingSidebarLabel = lambda: get_control("kivy.uix.settings.SettingSidebarLabel"),
StencilView = lambda: get_control("kivy.uix.stencilview.StencilView"),
VKeyboard = lambda: get_control("kivy.uix.vkeyboard.VKeyboard"),
)
class DynamicImporter(ModuleType):
""" Only create widgets that are actually used so that
unused widgets do not need to be imported.
"""
def __getattr__(self,name):
#print("Loading {}".format(name))
return KV_CONTROLS[name]()
old_module = sys.modules[__name__] # So it's not garbage collected
new_module = sys.modules[__name__] = DynamicImporter(__name__)
new_module.__dict__.update({
'__file__': __file__,
'__doc__': __doc__,
'__all__': KV_CONTROLS.keys(),
})
|
mit
| 530,521,388,394,943,040 | 58.466216 | 162 | 0.709465 | false |
Nefry/taurus
|
bzt/modules/aggregator.py
|
1
|
20567
|
"""
Aggregating results into DataPoints
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import copy
import logging
import math
import re
from collections import Counter
from bzt.utils import BetterDict
from bzt.engine import EngineModule
from bzt.modules.moves import iteritems
class KPISet(BetterDict):
"""
Main entity in results, contains all KPIs for single label,
capable of merging other KPISet's into it to compose cumulative results
"""
ERRORS = "errors"
SAMPLE_COUNT = "throughput"
CONCURRENCY = "concurrency"
SUCCESSES = "succ"
FAILURES = "fail"
RESP_TIMES = "rt"
AVG_RESP_TIME = "avg_rt"
STDEV_RESP_TIME = "stdev_rt"
AVG_LATENCY = "avg_lt"
AVG_CONN_TIME = "avg_ct"
PERCENTILES = "perc"
RESP_CODES = "rc"
ERRTYPE_ERROR = 0
ERRTYPE_ASSERT = 1
def __init__(self, perc_levels=()):
super(KPISet, self).__init__()
self.sum_rt = 0
self.sum_lt = 0
self.sum_cn = 0
self.perc_levels = perc_levels
# scalars
self.get(self.SAMPLE_COUNT, 0)
self.get(self.CONCURRENCY, 0)
self.get(self.SUCCESSES, 0)
self.get(self.FAILURES, 0)
self.get(self.AVG_RESP_TIME, 0)
self.get(self.STDEV_RESP_TIME, 0)
self.get(self.AVG_LATENCY, 0)
self.get(self.AVG_CONN_TIME, 0)
# vectors
self.get(self.ERRORS, [])
self.get(self.RESP_TIMES, Counter())
self.get(self.RESP_CODES, Counter())
self.get(self.PERCENTILES)
self._concurrencies = BetterDict()
def __deepcopy__(self, memo):
mycopy = KPISet(self.perc_levels)
mycopy.sum_rt = self.sum_rt
mycopy.sum_lt = self.sum_lt
mycopy.sum_cn = self.sum_cn
for key, val in iteritems(self):
mycopy[key] = copy.deepcopy(val, memo)
return mycopy
@staticmethod
def error_item_skel(error, ret_c, cnt, errtype, urls):
"""
:type error: str
:type ret_c: str
:type cnt: int
:type errtype: int
:type urls: Counter
:rtype: dict
"""
return {
"cnt": cnt,
"msg": error,
"rc": ret_c,
"type": errtype,
"urls": urls
}
def add_sample(self, sample):
"""
Add sample, consisting of: cnc, rt, cn, lt, rc, error, trname
:type sample: tuple
"""
# TODO: introduce a flag to not count failed in resp times? or offer it always?
cnc, r_time, con_time, latency, r_code, error, trname = sample
self[self.SAMPLE_COUNT] = self.get(self.SAMPLE_COUNT, 0) + 1
if cnc:
self._concurrencies[trname] = cnc
if r_code is not None:
resp_codes = self.get(self.RESP_CODES)
resp_codes[r_code] = resp_codes.get(r_code, 0) + 1
# count times only if we have RCs
if con_time:
self.sum_cn += con_time
self.sum_lt += latency
self.sum_rt += r_time
if error is not None:
self[self.FAILURES] = self.get(self.FAILURES, 0) + 1
item = self.error_item_skel(error, r_code, 1, KPISet.ERRTYPE_ERROR, Counter())
self.inc_list(self.get(self.ERRORS), ("msg", error), item)
else:
self[self.SUCCESSES] = self.get(self.SUCCESSES, 0) + 1
self.get(self.RESP_TIMES)[r_time] += 1
# TODO: max/min rt? there is percentiles...
# TODO: throughput if interval is not 1s
@staticmethod
def inc_list(values, selector, value):
"""
Increment list item, based on selector criteria
:param values: list to update
:type values: list
:param selector: tuple of 2 values, field name and value to match
:type selector: tuple
:param value: dict to put into list
:type value: dict
"""
found = False
for item in values:
if item[selector[0]] == selector[1]:
item['cnt'] += value['cnt']
item['urls'] += value['urls']
found = True
break
if not found:
values.append(value)
def recalculate(self):
"""
Recalculate averages, stdev and percentiles
:return:
"""
if self[self.SAMPLE_COUNT]:
self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT]
self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT]
self[self.AVG_RESP_TIME] = self.sum_rt / self[self.SAMPLE_COUNT]
if self._concurrencies:
self[self.CONCURRENCY] = sum(self._concurrencies.values())
perc, stdev = self.__perc_and_stdev(self[self.RESP_TIMES], self.perc_levels, self[self.AVG_RESP_TIME])
for level, val in perc:
self[self.PERCENTILES][str(float(level))] = val
self[self.STDEV_RESP_TIME] = stdev
return self
def merge_kpis(self, src, sid=None):
"""
Merge other instance into self
:param sid: source ID to use when suming up concurrency
:type src: KPISet
:return:
"""
src.recalculate()
self.sum_cn += src.sum_cn
self.sum_lt += src.sum_lt
self.sum_rt += src.sum_rt
self[self.SAMPLE_COUNT] += src[self.SAMPLE_COUNT]
self[self.SUCCESSES] += src[self.SUCCESSES]
self[self.FAILURES] += src[self.FAILURES]
# NOTE: should it be average? mind the timestamp gaps
self._concurrencies[sid] = src[self.CONCURRENCY]
self[self.RESP_TIMES].update(src[self.RESP_TIMES])
self[self.RESP_CODES].update(src[self.RESP_CODES])
for src_item in src[self.ERRORS]:
self.inc_list(self[self.ERRORS], ('msg', src_item['msg']), src_item)
@staticmethod
def from_dict(obj):
"""
:type obj: dict
:rtype: KPISet
"""
inst = KPISet()
for key, val in iteritems(obj):
inst[key] = val
inst.sum_cn = obj[inst.AVG_CONN_TIME] * obj[inst.SAMPLE_COUNT]
inst.sum_lt = obj[inst.AVG_LATENCY] * obj[inst.SAMPLE_COUNT]
inst.sum_rt = obj[inst.AVG_RESP_TIME] * obj[inst.SAMPLE_COUNT]
inst.perc_levels = [float(x) for x in inst[inst.PERCENTILES].keys()]
inst[inst.RESP_TIMES] = {float(level): inst[inst.RESP_TIMES][level] for level in inst[inst.RESP_TIMES].keys()}
for error in inst[KPISet.ERRORS]:
error['urls'] = Counter(error['urls'])
return inst
@staticmethod
def __perc_and_stdev(cnts_dict, percentiles_to_calc=(), avg=0):
"""
from http://stackoverflow.com/questions/25070086/percentiles-from-counts-of-values
Returns [(percentile, value)] with nearest rank percentiles.
Percentile 0: <min_value>, 100: <max_value>.
cnts_dict: { <value>: <count> }
percentiles_to_calc: iterable for percentiles to calculate; 0 <= ~ <= 100
upd: added stdev calc to have it in single-pass for mans of efficiency
:type percentiles_to_calc: list(float)
:type cnts_dict: collections.Counter
"""
assert all(0 <= percentile <= 100 for percentile in percentiles_to_calc)
percentiles = []
if not cnts_dict:
return percentiles, 0
num = sum(cnts_dict.values())
cnts = sorted(cnts_dict.items())
curr_cnts_pos = 0 # current position in cnts
curr_pos = cnts[0][1] # sum of freqs up to current_cnts_pos
sqr_diffs = 0
for percentile in sorted(percentiles_to_calc):
if percentile < 100:
percentile_pos = percentile / 100.0 * num
while curr_pos <= percentile_pos and curr_cnts_pos < len(cnts):
sqr_diffs += cnts[curr_cnts_pos][1] * math.pow(cnts[curr_cnts_pos][0] - avg, 2)
curr_cnts_pos += 1
curr_pos += cnts[curr_cnts_pos][1]
percentiles.append((percentile, cnts[curr_cnts_pos][0]))
else:
percentiles.append((percentile, cnts[-1][0])) # we could add a small value
while curr_cnts_pos < len(cnts):
sqr_diffs += cnts[curr_cnts_pos][1] * math.pow(cnts[curr_cnts_pos][0] - avg, 2)
curr_cnts_pos += 1
stdev = math.sqrt(sqr_diffs / len(cnts))
return percentiles, stdev
class DataPoint(BetterDict):
"""
Represents an aggregate data poing
:param ts: timestamp of this point
"""
SOURCE_ID = 'id'
TIMESTAMP = "ts"
CURRENT = "current"
CUMULATIVE = "cumulative"
SUBRESULTS = "subresults"
def __init__(self, ts, perc_levels=()):
super(DataPoint, self).__init__()
self.perc_levels = perc_levels
self[self.SOURCE_ID] = None
self[self.TIMESTAMP] = ts
self[self.CUMULATIVE] = BetterDict()
self[self.CURRENT] = BetterDict()
self[self.SUBRESULTS] = []
def __merge_kpis(self, src, dst, sid):
"""
:param src: KPISet
:param dst: KPISet
:param sid: int
:return:
"""
for label, val in iteritems(src):
dest = dst.get(label, KPISet(self.perc_levels))
if not isinstance(val, KPISet):
val = KPISet.from_dict(val)
val.perc_levels = self.perc_levels
dest.merge_kpis(val, sid)
def recalculate(self):
"""
Recalculate all KPISet's
"""
for val in self[self.CURRENT].values():
val.recalculate()
for val in self[self.CUMULATIVE].values():
val.recalculate()
def merge_point(self, src):
"""
:type src: DataPoint
"""
if self[self.TIMESTAMP] != src[self.TIMESTAMP]:
self.log.warning("Tried to merge data for %s and %s", self[self.TIMESTAMP], src[self.TIMESTAMP])
raise ValueError("Cannot merge different timestamps")
self[DataPoint.SUBRESULTS].append(src)
self.__merge_kpis(src[self.CURRENT], self[self.CURRENT], src[DataPoint.SOURCE_ID])
self.__merge_kpis(src[self.CUMULATIVE], self[self.CUMULATIVE], src[DataPoint.SOURCE_ID])
self.recalculate()
class ResultsProvider(object):
"""
:type listeners: list[AggregatorListener]
"""
def __init__(self):
super(ResultsProvider, self).__init__()
self.cumulative = BetterDict()
self.track_percentiles = []
self.listeners = []
def add_listener(self, listener):
"""
Add aggregate results listener
:type listener: AggregatorListener
"""
self.listeners.append(listener)
def __merge_to_cumulative(self, current):
"""
Merge current KPISet to cumulative
:param current: KPISet
:return:
"""
for label, data in iteritems(current):
cumul = self.cumulative.get(label, KPISet(self.track_percentiles))
cumul.merge_kpis(data)
def datapoints(self, final_pass=False):
"""
Generator object that returns datapoints from the reader
:type final_pass: bool
"""
for datapoint in self._calculate_datapoints(final_pass):
current = datapoint[DataPoint.CURRENT]
self.__merge_to_cumulative(current)
datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative)
datapoint.recalculate()
for listener in self.listeners:
listener.aggregated_second(datapoint)
yield datapoint
@abstractmethod
def _calculate_datapoints(self, final_pass=False):
"""
:rtype : tuple
"""
pass
class ResultsReader(ResultsProvider):
"""
Aggregator that reads samples one by one,
supposed to be attached to every executor
"""
label_generalize_regexps = [
(re.compile(r"\b[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\b"), "U"),
(re.compile(r"\b[0-9a-fA-F]{2,}\b"), "U"),
# (re.compile(r"\b[0-9a-fA-F]{32}\b"), "U"), # implied by previous, maybe prev is too wide
(re.compile(r"\b\d{2,}\b"), "N")
]
def __init__(self, perc_levels=()):
super(ResultsReader, self).__init__()
self.generalize_labels = True
self.ignored_labels = []
self.log = logging.getLogger(self.__class__.__name__)
self.buffer = {}
self.buffer_len = 2
self.min_timestamp = 0
self.track_percentiles = perc_levels
def __process_readers(self, final_pass=False):
"""
:param final_pass: True if in post-process stage
:return:
"""
for result in self._read(final_pass):
if result is None:
self.log.debug("No data from reader")
break
elif isinstance(result, list) or isinstance(result, tuple):
t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname = result
if label in self.ignored_labels:
continue
if t_stamp < self.min_timestamp:
self.log.warning("Putting sample %s into %s", t_stamp, self.min_timestamp)
t_stamp = self.min_timestamp
if t_stamp not in self.buffer:
self.buffer[t_stamp] = []
self.buffer[t_stamp].append((label, conc, r_time, con_time, latency, r_code, error, trname))
else:
raise ValueError("Unsupported results from reader: %s" % result)
def __aggreagate_current(self, datapoint, samples):
"""
:param datapoint: DataPoint
:param samples: list of samples
:return:
"""
current = datapoint[DataPoint.CURRENT]
for sample in samples:
label, r_time, concur, con_time, latency, r_code, error, trname = sample
if label == '':
label = '[empty]'
if self.generalize_labels:
label = self.__generalize_label(label)
label = current.get(label, KPISet(self.track_percentiles))
# empty means overall
label.add_sample((r_time, concur, con_time, latency, r_code, error, trname))
overall = KPISet(self.track_percentiles)
for label in current.values():
overall.merge_kpis(label, datapoint[DataPoint.SOURCE_ID])
current[''] = overall
return current
def _calculate_datapoints(self, final_pass=False):
"""
A generator to read available datapoints
:type final_pass: bool
:rtype: DataPoint
"""
self.__process_readers(final_pass)
self.log.debug("Buffer len: %s", len(self.buffer))
if not self.buffer:
return
timestamps = sorted(self.buffer.keys())
while final_pass or (timestamps[-1] >= (timestamps[0] + self.buffer_len)):
timestamp = timestamps.pop(0)
self.min_timestamp = timestamp + 1
self.log.debug("Aggregating: %s", timestamp)
samples = self.buffer.pop(timestamp)
datapoint = self.__get_new_datapoint(timestamp)
self.__aggreagate_current(datapoint, samples)
yield datapoint
if not timestamps:
break
def __get_new_datapoint(self, timestamp):
"""
:rtype: DataPoint
"""
point = DataPoint(timestamp, self.track_percentiles)
point[DataPoint.SOURCE_ID] = id(self)
return point
@abstractmethod
def _read(self, final_pass=False):
"""
:param final_pass: True if called from post-process stage, when reader
should report possible rests of results
:rtype: list
:return: timestamp, label, concurrency, rt, latency, rc, error
"""
yield
def __generalize_label(self, label):
for regexp, replacement in self.label_generalize_regexps:
label = regexp.sub(replacement, label)
return label
class ConsolidatingAggregator(EngineModule, ResultsProvider):
"""
:type underlings: list[bzt.modules.aggregator.ResultsProvider]
"""
# FIXME: it was oscillating with remote test of 100 servers
def __init__(self):
EngineModule.__init__(self)
ResultsProvider.__init__(self)
self.generalize_labels = True
self.ignored_labels = []
self.underlings = []
self.buffer = BetterDict()
self.buffer_len = 2
def prepare(self):
"""
Read aggregation options
"""
super(ConsolidatingAggregator, self).prepare()
self.track_percentiles = self.settings.get("percentiles", self.track_percentiles)
self.buffer_len = self.settings.get("buffer-seconds", self.buffer_len)
self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels)
self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels)
def add_underling(self, underling):
"""
Add source for aggregating
:type underling: ResultsProvider
"""
underling.track_percentiles = self.track_percentiles
if isinstance(underling, ResultsReader):
underling.ignored_labels = self.ignored_labels
underling.generalize_labels = self.generalize_labels
# underling.buffer_len = self.buffer_len # NOTE: is it ok for underling to have the same buffer len?
self.underlings.append(underling)
def check(self):
"""
Check if there is next aggregate data present
:rtype: bool
"""
for point in self.datapoints():
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
return super(ConsolidatingAggregator, self).check()
def post_process(self):
"""
Process all remaining aggregate data
"""
super(ConsolidatingAggregator, self).post_process()
for point in self.datapoints(True):
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
def _process_underlings(self, final_pass):
for underling in self.underlings:
for data in [x for x in underling.datapoints(final_pass)]:
tstamp = data[DataPoint.TIMESTAMP]
if self.buffer:
mints = min(self.buffer.keys())
if tstamp < mints:
self.log.warning("Putting datapoint %s into %s", tstamp, mints)
data[DataPoint.TIMESTAMP] = mints
tstamp = mints
self.buffer.get(tstamp, []).append(data)
def _calculate_datapoints(self, final_pass=False):
"""
Override ResultsProvider._calculate_datapoints
"""
self._process_underlings(final_pass)
self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys())
if not self.buffer:
return
timestamps = sorted(self.buffer.keys())
while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)):
tstamp = timestamps.pop(0)
self.log.debug("Merging into %s", tstamp)
points_to_consolidate = self.buffer.pop(tstamp)
point = DataPoint(tstamp, self.track_percentiles)
for subresult in points_to_consolidate:
self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
point.merge_point(subresult)
point.recalculate()
yield point
class NoneAggregator(EngineModule, ResultsProvider):
"""
Dummy aggregator
"""
def __init__(self):
EngineModule.__init__(self)
ResultsProvider.__init__(self)
def _calculate_datapoints(self, final_pass=False):
pass
|
apache-2.0
| -1,317,869,710,146,653,200 | 32.771757 | 118 | 0.581611 | false |
huguesv/azure-sdk-for-python
|
azure-mgmt-storage/setup.py
|
1
|
2220
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from setuptools import setup
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name='azure-mgmt-storage',
version='0.20.0',
description='Microsoft Azure Storage Resource Management Client Library for Python',
long_description=open('README.rst', 'r').read(),
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: Apache Software License',
],
zip_safe=False,
packages=[
'azure',
'azure.mgmt',
'azure.mgmt.storage',
],
install_requires=[
'azure-mgmt-common',
],
)
|
apache-2.0
| 1,544,755,160,999,081,500 | 33.6875 | 88 | 0.605405 | false |
USGSDenverPychron/pychron
|
pychron/experiment/tasks/experiment_panes.py
|
1
|
26112
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.traits_dock_pane import TraitsDockPane
from traits.api import Color, Instance, DelegatesTo, List, Any, Property
from traitsui.api import View, Item, UItem, VGroup, HGroup, spring, \
Group, Spring, VFold, Label, InstanceEditor, \
VSplit, TabularEditor, UReadonly, ListEditor, Readonly
from traitsui.editors import TableEditor
from traitsui.table_column import ObjectColumn
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.ui.combobox_editor import ComboboxEditor
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.led_editor import LEDEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.experiment.plot_panel import PlotPanel
from pychron.experiment.utilities.identifier import SPECIAL_NAMES
from pychron.pychron_constants import MEASUREMENT_COLOR, EXTRACTION_COLOR, \
NOT_EXECUTABLE_COLOR, SKIP_COLOR, SUCCESS_COLOR, CANCELED_COLOR, \
TRUNCATED_COLOR, FAILED_COLOR, END_AFTER_COLOR
# ===============================================================================
# editing
# ===============================================================================
def spacer(w):
return Spring(width=w, springy=False)
def queue_factory_name(name):
return 'object.queue_factory.{}'.format(name)
def run_factory_name(name):
return 'object.run_factory.{}'.format(name)
def queue_factory_item(name, **kw):
return Item(queue_factory_name(name), **kw)
def run_factory_item(name, **kw):
return Item(run_factory_name(name), **kw)
class ExperimentFactoryPane(TraitsDockPane):
id = 'pychron.experiment.factory'
name = 'Experiment Editor'
info_label = Property(depends_on='model.run_factory.info_label')
def _get_info_label(self):
return '<font color="green"><b>{}</b></font>'.format(self.model.run_factory.info_label)
def traits_view(self):
add_button = icon_button_editor('add_button', 'add',
# enabled_when='ok_add',
tooltip='Add run')
save_button = icon_button_editor('save_button', 'disk',
tooltip='Save queue to file')
edit_button = icon_button_editor('edit_mode_button', 'table_edit',
enabled_when='edit_enabled',
tooltip='Toggle edit mode')
clear_button = icon_button_editor('clear_button',
'table_row_delete',
tooltip='Clear all runs added using "frequency"')
email_grp = VGroup(HGroup(queue_factory_item('use_email',
label='Use Email',
tooltip='Send email notifications'),
queue_factory_item('use_group_email',
tooltip='Email a group of users',
label='Email Group'),
icon_button_editor(queue_factory_name('edit_emails'), 'cog',
tooltip='Edit user group')),
Item(queue_factory_name('email')),
show_border=True,
label='Email')
user_grp = HGroup(UItem(queue_factory_name('username'),
show_label=False,
editor=ComboboxEditor(name=queue_factory_name('usernames'))),
icon_button_editor(queue_factory_name('edit_user'), 'database_edit'),
show_border=True,
label='User')
lgrp = HGroup(queue_factory_item('load_name',
width=150,
label='Load',
editor=ComboboxEditor(name=queue_factory_name('load_names'))),
icon_button_editor('generate_queue_button', 'brick-go',
tooltip='Generate a experiment queue from the selected load',
enabled_when='load_name'),
icon_button_editor('edit_queue_config_button', 'cog',
tooltip='Configure experiment queue generation'))
ms_ed_grp = VGroup(HGroup(queue_factory_item('mass_spectrometer',
show_label=False,
editor=myEnumEditor(
name=queue_factory_name('mass_spectrometers'))),
queue_factory_item('extract_device',
show_label=False,
editor=myEnumEditor(name=queue_factory_name('extract_devices')))),
lgrp,
HGroup(queue_factory_item('queue_conditionals_name',
label='Queue Conditionals',
editor=myEnumEditor(
name=queue_factory_name('available_conditionals')))),
label='Spectrometer/Extract Device',
show_border=True)
delay_grp = VGroup(queue_factory_item('delay_before_analyses'),
queue_factory_item('delay_between_analyses'),
show_border=True,
label='Delays')
queue_grp = VGroup(user_grp, email_grp, ms_ed_grp, delay_grp,
label='Queue')
button_bar = HGroup(save_button,
add_button,
clear_button,
edit_button,
CustomLabel(run_factory_name('edit_mode_label'),
color='red',
width=40),
spring)
button_bar2 = HGroup(Item('auto_increment_id', label='Auto Increment L#'),
Item('auto_increment_position', label='Position'), )
edit_grp = VFold(queue_grp,
VGroup(self._get_info_group(),
self._get_extract_group(),
enabled_when=queue_factory_name('ok_make'),
label='General'),
self._get_script_group(),
self._get_truncate_group())
v = View(VGroup(button_bar,
button_bar2,
UItem('pane.info_label', style='readonly'),
edit_grp,
# style_sheet=load_stylesheet('experiment_factory')
),
kind='live',
width=225)
return v
def _get_info_group(self):
grp = Group(HGroup(run_factory_item('selected_irradiation',
show_label=False,
editor=myEnumEditor(name=run_factory_name('irradiations'))),
run_factory_item('selected_level',
show_label=False,
editor=myEnumEditor(name=run_factory_name('levels')))),
HGroup(run_factory_item('special_labnumber',
show_label=False,
editor=myEnumEditor(values=SPECIAL_NAMES)),
run_factory_item('run_block', show_label=False,
editor=myEnumEditor(name=run_factory_name('run_blocks'))),
icon_button_editor(run_factory_name('edit_run_blocks'), 'cog'),
run_factory_item('frequency_model.frequency_int', width=50),
icon_button_editor(run_factory_name('edit_frequency_button'), 'cog'),
# run_factory_item('freq_before', label='Before'),
# run_factory_item('freq_after', label='After'),
spring),
HGroup(run_factory_item('labnumber',
tooltip='Enter a Labnumber',
width=100,
enabled_when='object.run_factory.special_labnumber == "Special Labnumber"',
editor=myEnumEditor(name=run_factory_name('labnumbers'))),
run_factory_item('aliquot',
width=50),
spring),
HGroup(run_factory_item('repository_identifier',
label='Repository ID',
editor=myEnumEditor(name=run_factory_name('repository_identifiers'))),
icon_button_editor(run_factory_name('add_repository_identifier'), 'add',
tooltip='Add a new repository'),
icon_button_editor(run_factory_name('set_repository_identifier_button'), 'arrow_right',
tooltip='Set select runs repository_identifier to current value'),
icon_button_editor(run_factory_name('clear_repository_identifier_button'), 'clear'),
UItem(run_factory_name('use_project_based_repository_identifier'),
tooltip='Use repository identifier based on project name')),
HGroup(run_factory_item('weight',
label='Weight (mg)',
tooltip='(Optional) Enter the weight of the sample in mg. '
'Will be saved in Database with analysis'),
run_factory_item('comment',
tooltip='(Optional) Enter a comment for this sample. '
'Will be saved in Database with analysis'),
run_factory_item('auto_fill_comment',
show_label=False,
tooltip='Auto fill "Comment" with IrradiationLevel:Hole, e.g A:9'),
icon_button_editor(run_factory_name('edit_comment_template'), 'cog',
tooltip='Edit comment template')),
HGroup(run_factory_item('flux'),
Label(u'\u00b1'),
run_factory_item('flux_error', show_label=False),
icon_button_editor(run_factory_name('save_flux_button'),
'database_save',
tooltip='Save flux to database'),
enabled_when=run_factory_name('labnumber')),
show_border=True,
label='Sample Info')
return grp
def _get_truncate_group(self):
grp = VGroup(HGroup(run_factory_item('use_simple_truncation', label='Use Simple'),
icon_button_editor(run_factory_name('clear_conditionals'),
'delete',
tooltip='Clear Conditionals from selected runs')),
HGroup(run_factory_item('trunc_attr',
editor=myEnumEditor(name=run_factory_name('trunc_attrs')),
show_label=False),
run_factory_item('trunc_comp', show_label=False),
run_factory_item('trunc_crit', show_label=False),
spacer(-10),
run_factory_item('trunc_start', label='Start Count'),
show_border=True,
label='Simple'),
HGroup(run_factory_item('conditionals_path',
editor=myEnumEditor(name=run_factory_name('conditionals')),
label='Path'),
icon_button_editor(run_factory_name('edit_conditionals_button'), 'table_edit',
enabled_when=run_factory_name('conditionals_path'),
tooltip='Edit the selected conditionals file'),
icon_button_editor(run_factory_name('new_conditionals_button'), 'table_add',
tooltip='Add a new conditionals file. Duplicated currently '
'selected file if applicable'),
show_border=True,
label='File'),
enabled_when=queue_factory_name('ok_make'),
label='Run Conditionals')
return grp
def _get_script_group(self):
script_grp = VGroup(run_factory_item('extraction_script', style='custom', show_label=False),
run_factory_item('measurement_script', style='custom', show_label=False),
run_factory_item('post_equilibration_script', style='custom', show_label=False),
run_factory_item('post_measurement_script', style='custom', show_label=False),
run_factory_item('script_options', style='custom', show_label=False),
HGroup(spring,
run_factory_item('default_fits_button',
show_label=False,
enabled_when=run_factory_name('default_fits_enabled'),
label='Default Fits'),
run_factory_item('load_defaults_button',
tooltip='load the default scripts for this analysis type',
show_label=False,
enabled_when=run_factory_name('labnumber'))),
enabled_when=queue_factory_name('ok_make'),
show_border=True,
label='Scripts')
return script_grp
def _get_extract_group(self):
return Group(run_factory_item('factory_view', style='custom', show_label=False))
# ===============================================================================
# execution
# ===============================================================================
class ConnectionStatusPane(TraitsDockPane):
id = 'pychron.experiment.connection_status'
name = 'Connection Status'
def traits_view(self):
cols = [ObjectColumn(name='name', editable=False),
ObjectColumn(name='connected', editable=False)]
v = View(UItem('connectables',
editor=TableEditor(editable=False,
sortable=False,
columns=cols)))
return v
class StatsPane(TraitsDockPane):
id = 'pychron.experiment.stats'
name = 'Stats'
def traits_view(self):
gen_grp = VGroup(HGroup(Readonly('nruns', width=150, label='Total Runs'),
UReadonly('total_time')),
HGroup(Readonly('nruns_finished', width=150, label='Completed'),
UReadonly('elapsed')),
Readonly('remaining', label='Remaining'),
Readonly('etf', label='Est. finish'),
show_border=True, label='General')
cur_grp = VGroup(Readonly('current_run_duration', ),
Readonly('run_elapsed'),
show_border=True,
label='Current')
sel_grp = VGroup(Readonly('start_at'),
Readonly('end_at'),
Readonly('run_duration'),
label='Selection', show_border=True)
v = View(VGroup(gen_grp, cur_grp, sel_grp))
return v
class ControlsPane(TraitsDockPane):
# name = 'Controls'
id = 'pychron.experiment.controls'
movable = False
closable = False
floatable = False
def traits_view(self):
cancel_tt = '''Cancel current run and continue to next run'''
stop_tt = '''Cancel current run and stop queue'''
start_tt = '''Start current experiment queue.
Will continue to next opened queue when completed'''
truncate_tt = '''Stop the current measurement process and continue to
the next step in the measurement script'''
truncate_style_tt = '''Normal= measure_iteration stopped at current step
script continues
Quick= measure_iteration stopped at current step
script continues using abbreviated_count_ratio*counts'''
end_tt = '''Stop the queue and the end of the current run'''
schedule_tt = '''Set a scheduled start time'''
v = View(HGroup(UItem('executing_led', editor=LEDEditor(radius=30)),
spacer(-20),
icon_button_editor('start_button',
'start',
enabled_when='can_start',
tooltip=start_tt),
icon_button_editor('configure_scheduled_button', 'calendar',
enabled_when='can_start',
tooltip=schedule_tt),
icon_button_editor('stop_button', 'stop',
enabled_when='not can_start',
tooltip=stop_tt),
spacer(-20),
Item('end_at_run_completion',
label='Stop at Completion',
tooltip=end_tt),
spacer(-20),
icon_button_editor('abort_run_button', 'cancel',
# enabled_when='can_cancel',
tooltip=cancel_tt),
spacer(-20),
icon_button_editor('truncate_button',
'lightning',
enabled_when='measuring',
tooltip=truncate_tt),
UItem('truncate_style',
enabled_when='measuring',
tooltip=truncate_style_tt),
UItem('show_conditionals_button'),
spacer(-75),
CustomLabel('object.experiment_status.label',
color_name='object.experiment_status.color',
size=24,
weight='bold'), spring))
return v
class ExplanationPane(TraitsDockPane):
id = 'pychron.experiment.explanation'
name = 'Explanation'
measurement = Color(MEASUREMENT_COLOR)
extraction = Color(EXTRACTION_COLOR)
success = Color(SUCCESS_COLOR)
skip = Color(SKIP_COLOR)
canceled = Color(CANCELED_COLOR)
truncated = Color(TRUNCATED_COLOR)
failed = Color(FAILED_COLOR)
not_executable = Color(NOT_EXECUTABLE_COLOR)
end_after = Color(END_AFTER_COLOR)
def set_colors(self, cd):
for k, v in cd.iteritems():
if hasattr(self, k):
setattr(self, k, v)
def traits_view(self):
v = View(VGroup(HGroup(Label('Extraction'), spring,
UReadonly('extraction', )),
HGroup(Label('Measurement'), spring,
UReadonly('measurement', )),
HGroup(Label('Skip'), spring,
UReadonly('skip', )),
HGroup(Label('Success'), spring,
UReadonly('success', )),
HGroup(Label('Truncated'), spring,
UReadonly('truncated', )),
HGroup(Label('Canceled'), spring,
UReadonly('canceled', )),
HGroup(Label('Failed'), spring,
UReadonly('failed', )),
HGroup(Label('Not Executable'), spring,
UReadonly('not_executable', )),
HGroup(Label('End After'), spring,
UReadonly('end_after', ))))
return v
class IsotopeEvolutionPane(TraitsDockPane):
id = 'pychron.experiment.isotope_evolution'
name = 'Isotope Evolutions'
plot_panel = Instance(PlotPanel, ())
is_peak_hop = DelegatesTo('plot_panel')
def traits_view(self):
v = View(VSplit(UItem('object.plot_panel.graph_container',
style='custom',
height=0.75),
VGroup(HGroup(Spring(springy=False, width=-5),
Item('object.plot_panel.ncycles', label='Cycles',
tooltip='Set the number of measurement cycles',
visible_when='is_peak_hop',
width=-50),
Spring(springy=False, width=-10),
CustomLabel('object.plot_panel.current_cycle',
color='blue',
color_name='object.plot_panel.current_color',
# width=2,
visible_when='is_peak_hop'),
Spring(springy=False, width=-10),
Item('object.plot_panel.ncounts', label='Counts',
tooltip='Set the number of measurement points'),
Spring(springy=False, width=-10),
CustomLabel('object.plot_panel.display_counts',
color='red',
size=14,
width=50),
Spring(springy=False, width=-5)),
UItem('object.plot_panel.analysis_view',
style='custom',
height=0.25))))
return v
class SummaryPane(TraitsDockPane):
id = 'pychron.experiment.summary'
name = 'Summary'
plot_panel = Instance('pychron.experiment.plot_panel.PlotPanel')
def traits_view(self):
v = View(UItem('plot_panel', editor=InstanceEditor(view='summary_view'),
style='custom'))
return v
class AnalysisHealthAdapter(TabularAdapter):
columns = [('Isotope', 'name'),
('Min.', 'health_min'),
('Health', 'health'),
('Max.', 'health_max')]
class AnalysisHealthPane(TraitsDockPane):
id = 'pychron.experiment.analysis_health'
name = 'Health'
def traits_view(self):
v = View(UItem('analysis_type', style='readonly'),
Item('isotopes', editor=TabularEditor(adapter=AnalysisHealthAdapter())))
return v
class LoggerPane(TraitsDockPane):
loggers = List
selected = Any
name = 'Logger'
id = 'pychron.experiment.logger'
def __init__(self, *args, **kw):
super(LoggerPane, self).__init__(*args, **kw)
from pychron.displays.gdisplays import gWarningDisplay, gLoggerDisplay
self.loggers = [gLoggerDisplay, gWarningDisplay]
def traits_view(self):
v = View(UItem('loggers',
editor=ListEditor(use_notebook=True,
page_name='.title',
selected='selected'),
style='custom'))
return v
# ============= EOF =============================================
|
apache-2.0
| 1,861,105,749,135,323,400 | 50 | 119 | 0.455078 | false |
Som-Energia/invoice-janitor
|
CUPS/sips_comparator.py
|
1
|
3601
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import tqdm
import click
from erppeek import Client
from datetime import datetime, date
import configdb
ATR_CASES = ['C2']
ATR_STEPS = ['01']
def create_file(c, from_date, file_output):
atr_ids = c.GiscedataSwitching.search([('create_date','>=', from_date),('proces_id.name', 'in', ATR_CASES),('step_id.name','in',ATR_STEPS)])
print "{} contracts found from date {}".format(len(atr_ids), from_date)
print "Dumping data to {}".format(file_output)
if not atr_ids:
print "No ATR cases found"
return
polisses = c.GiscedataSwitching.read(atr_ids, ['cups_polissa_id','user_id'])
polisses_ids = set([polissa['cups_polissa_id'][0] for polissa in polisses if polissa['cups_polissa_id']])
polisses_with_resp = dict( [polissa['cups_polissa_id'][0],polissa['user_id'][1]] for polissa in polisses if polissa['cups_polissa_id'] and polissa['user_id'] )
with open(file_output, 'w') as csvfile:
fields = ['contrato', 'cups', 'data_alta', 'adr_cups', 'adr_sips', 'poblacio_sips', 'titular', 'titular_email', 'responsable', 'idioma']
csvwriter = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC ,quotechar ='\"', delimiter=';')
csvwriter.writerow(fields)
p_fields = ['name', 'data_alta', 'cups', 'titular']
for p_data in tqdm.tqdm(c.GiscedataPolissa.read(list(polisses_ids), p_fields)):
contract_name = p_data['name']
contract_id = p_data['id']
cups_id = p_data['cups'][0]
titular_id = p_data['titular'][0]
titular_name = p_data['titular'][1]
cups_name = p_data['cups'][1]
c_data = c.GiscedataCupsPs.read(cups_id, ['name', 'direccio'])
t_data = c.ResPartner.read(titular_id, ['name', 'lang', 'vat'])
pa_ids = c.ResPartnerAddress.search([('partner_id', '=', titular_id)])
email = ''
for pa_data in c.ResPartnerAddress.read(pa_ids, ['email']):
if pa_data['email']:
email = pa_data['email']
break
sips_ids = c.GiscedataSipsPs.search(
[('name', 'in', [cups_name, cups_name[:20]])]
)
data = [
contract_name, cups_name, p_data['data_alta'],
c_data['direccio'].encode('utf-8')
]
if sips_ids:
sips_id = int(sips_ids[0])
sips_data = c.GiscedataSipsPs.read(
sips_id, ['poblacio', 'direccio']
)
sips_dir = sips_data.get('direccio', u'').encode('utf-8')
sips_poblacio = sips_data.get('poblacio', u'').encode('utf-8')
extra_sips = [sips_dir, sips_poblacio]
else:
extra_sips = ["No SIPS Found", ""]
data.extend(extra_sips)
resp = polisses_with_resp[p_data['id']].encode('utf-8')
data.extend([titular_name.encode('utf-8'), email, resp, t_data['lang']])
csvwriter.writerow(data)
@click.command()
@click.option('-f', '--file-output', default='/tmp/sips_comparator.csv',
help='Destination file path')
@click.option('-d', '--from-date',
default=datetime.today().date().strftime('%Y-%m-%d'),
help='Contract start date to comparei [YYYY-MM-DD]')
def main(**kwargs):
c = Client(**configdb.erppeek)
print "connected to: {}".format(c._server)
create_file(c, kwargs['from_date'], kwargs['file_output'])
if __name__ == '__main__':
main()
|
agpl-3.0
| -8,360,267,966,215,296,000 | 40.390805 | 163 | 0.556234 | false |
akrherz/iem
|
htdocs/plotting/auto/scripts100/p120.py
|
1
|
5184
|
"""last spring temp"""
import datetime
from pandas.io.sql import read_sql
import pandas as pd
import matplotlib.dates as mdates
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["report"] = True
desc[
"description"
] = """This chart presents the accumulated frequency of
having the last spring temperature at or below a given threshold."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select Station",
network="IACLIMATE",
),
dict(type="int", name="t1", default=32, label="First Threshold (F)"),
dict(type="int", name="t2", default=28, label="Second Threshold (F)"),
dict(type="int", name="t3", default=26, label="Third Threshold (F)"),
dict(type="int", name="t4", default=22, label="Fourth Threshold (F)"),
dict(
type="year",
name="syear",
min=1880,
label="Potential (if data exists) minimum year",
default=1880,
),
dict(
type="year",
name="eyear",
min=1880,
label="Potential (if data exists) exclusive maximum year",
default=datetime.date.today().year,
),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
thresholds = [ctx["t1"], ctx["t2"], ctx["t3"], ctx["t4"]]
table = "alldata_%s" % (station[:2],)
# Load up dict of dates..
df = pd.DataFrame(
{
"dates": pd.date_range("2000/01/29", "2000/06/30"),
"%scnts" % (thresholds[0],): 0,
"%scnts" % (thresholds[1],): 0,
"%scnts" % (thresholds[2],): 0,
"%scnts" % (thresholds[3],): 0,
},
index=range(29, 183),
)
df.index.name = "doy"
for base in thresholds:
# Query Last doy for each year in archive
df2 = read_sql(
f"""
select year,
max(case when low <= %s then extract(doy from day)
else 0 end) as doy from {table}
WHERE month < 7 and station = %s and year > %s and year < %s
GROUP by year
""",
pgconn,
params=(base, station, ctx["syear"], ctx["eyear"]),
index_col=None,
)
for _, row in df2.iterrows():
if row["doy"] == 0:
continue
df.loc[0 : row["doy"], "%scnts" % (base,)] += 1
df["%sfreq" % (base,)] = (
df["%scnts" % (base,)] / len(df2.index) * 100.0
)
bs = ctx["_nt"].sts[station]["archive_begin"]
if bs is None:
raise NoDataFound("No metadata found.")
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978
# Low Temperature exceedence probabilities
# (On a certain date, what is the chance a temperature below a certain
# threshold would be observed again that spring season)
DOY Date <%s <%s <%s <%s
""" % (
datetime.date.today().strftime("%d %b %Y"),
max([bs.date(), datetime.date(ctx["syear"], 1, 1)]),
min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]),
station,
ctx["_nt"].sts[station]["name"],
thresholds[0] + 1,
thresholds[1] + 1,
thresholds[2] + 1,
thresholds[3] + 1,
)
fcols = ["%sfreq" % (s,) for s in thresholds]
mindate = None
for doy, row in df.iterrows():
if doy % 2 != 0:
continue
if row[fcols[3]] < 100 and mindate is None:
mindate = row["dates"] - datetime.timedelta(days=5)
res += (" %3s %s %3i %3i %3i %3i\n") % (
row["dates"].strftime("%-j"),
row["dates"].strftime("%b %d"),
row[fcols[0]],
row[fcols[1]],
row[fcols[2]],
row[fcols[3]],
)
title = "Frequency of Last Spring Temperature"
subtitle = "%s %s (%s-%s)" % (
station,
ctx["_nt"].sts[station]["name"],
max([bs.date(), datetime.date(ctx["syear"], 1, 1)]),
min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]),
)
(fig, ax) = figure_axes(title=title, subtitle=subtitle)
for base in thresholds:
ax.plot(
df["dates"].values,
df["%sfreq" % (base,)],
label="%s" % (base,),
lw=2,
)
ax.legend(loc="best")
ax.set_xlim(mindate)
ax.xaxis.set_major_locator(mdates.DayLocator([1, 7, 14, 21]))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d\n%b"))
ax.grid(True)
df.reset_index(inplace=True)
return fig, df, res
if __name__ == "__main__":
plotter(dict())
|
mit
| -6,493,876,069,943,078,000 | 31.198758 | 78 | 0.52527 | false |
devgi/bpf
|
src/bpf/validate.py
|
1
|
5558
|
import struct
from bpf.opcodes import (BPF_CLASS, BPF_LD, BPF_LDX, BPF_MODE, BPF_MEMWORDS,
BPF_IMM, BPF_IND, BPF_MSH, BPF_ABS, BPF_LEN, BPF_MEM,
BPF_ST, BPF_STX, BPF_ALU, BPF_OP, BPF_ADD, BPF_SUB,
BPF_MUL, BPF_OR, BPF_AND, BPF_RSH, BPF_LSH, BPF_NEG,
BPF_DIV, BPF_SRC, BPF_K, BPF_JMP, BPF_JA, BPF_JEQ,
BPF_JGT,
BPF_JGE, BPF_JSET, BPF_RET, BPF_MISC
)
# /*
# * Return true if the 'fcode' is a valid filter program.
# * The constraints are that each jump be forward and to a valid
# * code, that memory accesses are within valid ranges (to the
# * extent that this can be checked statically; loads of packet
# * data have to be, and are, also checked at run time), and that
# * the code terminates with either an accept or reject.
# *
# * The kernel needs to be able to verify an application's filter code.
# * Otherwise, a bogus program could easily crash the system.
# */
BPF_MAXINSNS = 4096
def validate_opcode_field(opcode, fieldname, fieldtype):
try:
struct.pack(fieldtype, getattr(opcode, fieldname))
except struct.error:
raise ValueError("Opcode field %s contains illegal value." % fieldname)
def validate_opcode(opcode):
# u_short code;
# u_char jt;
# u_char jf;
# long k
validate_opcode_field(opcode, "code", "!H")
validate_opcode_field(opcode, "jt", "!B")
validate_opcode_field(opcode, "jf", "!B")
validate_opcode_field(opcode, "k", "!L")
def bpf_validate(program):
proglen = len(program.instructions)
if (proglen < 1):
raise ValueError("program is empty?")
if (proglen > BPF_MAXINSNS):
raise ValueError("program is too long.")
for i, opcode in enumerate(program.instructions):
# call validate opcode.
validate_opcode(opcode)
code = opcode.code
if BPF_CLASS(code) in (BPF_LD, BPF_LDX):
if BPF_MODE(code) in (BPF_ABS, BPF_IND, BPF_MSH, BPF_LEN, BPF_IMM):
# nothing to do here.
pass
elif BPF_MODE(code) == BPF_MEM:
if opcode.k >= BPF_MEMWORDS:
raise ValueError(
"Attempt to load value from non exists memory cell.")
else:
raise ValueError("Invalid mode for class LD/LDX.")
elif (BPF_CLASS(code) in (BPF_ST, BPF_STX) and
opcode.k >= BPF_MEMWORDS):
raise ValueError(
"Attempt to store value in non exists memory cell.")
elif BPF_CLASS(code) == BPF_ALU:
if BPF_OP(code) in (
BPF_ADD, BPF_SUB, BPF_MUL, BPF_OR, BPF_AND, BPF_LSH,
BPF_RSH,
BPF_NEG):
# nothing to do here.
pass
elif BPF_OP(code) == BPF_DIV and (
BPF_SRC(code) == BPF_K and opcode.k == 0):
# Check for constant division by 0.
raise ValueError("Attempt to divide by 0.")
else:
raise ValueError("Invalid bpf op for class ALU.")
elif BPF_CLASS(code) == BPF_JMP:
# Check that jumps are within the code block,
# and that unconditional branches don't go
# backwards as a result of an overflow.
# Unconditional branches have a 32-bit offset,
# so they could overflow; we check to make
# sure they don't. Conditional branches have
# an 8-bit offset, and the from address is <=
# BPF_MAXINSNS, and we assume that BPF_MAXINSNS
# is sufficiently small that adding 255 to it
# won't overflow.
#
# We know that len is <= BPF_MAXINSNS, and we
# assume that BPF_MAXINSNS is < the maximum size
# of a u_int, so that i + 1 doesn't overflow.
#
# For userland, we don't know that the from
# or len are <= BPF_MAXINSNS, but we know that
# from <= len, and, except on a 64-bit system,
# it's unlikely that len, if it truly reflects
# the size of the program we've been handed,
# will be anywhere near the maximum size of
# a u_int. We also don't check for backward
# branches, as we currently support them in
# userland for the protochain operation.
#
jump_from = i + 1
if BPF_OP(code) == BPF_JA:
if jump_from + opcode.k >= proglen:
raise ValueError(
"Attempt to jump outside the program scope.")
elif BPF_OP(code) in (BPF_JEQ, BPF_JGT, BPF_JGE, BPF_JSET):
if (jump_from + opcode.jt >= proglen or
jump_from + opcode.jf >= proglen):
raise ValueError(
"Attempt to jump outside the program scope.")
else:
raise ValueError("Invalid bpf op for class JMP.")
elif BPF_CLASS(code) == BPF_RET:
# nothing to do here
pass
elif BPF_CLASS(code) == BPF_MISC:
# nothing to do here
pass
else:
raise ValueError("Invalid code class.")
# verify that the last opcode is ret.
if BPF_CLASS(program.instructions[proglen - 1].code) != BPF_RET:
raise ValueError("Program should end with RET.")
return True
|
mit
| 7,272,572,571,908,061,000 | 37.867133 | 79 | 0.54516 | false |
ParkJinSang/Logle
|
learning/svm/libsvm/svm.py
|
1
|
9604
|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
__all__ = ['libsvm', 'svm_problem', 'svm_parameter',
'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
'SIGMOID', 'c_double', 'svm_model']
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
libsvm = CDLL(path.join(dirname, r'.\windows\libsvm.dll'))
else:
libsvm = CDLL(path.join(dirname, '../libsvm.so.2'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
raise Exception('LIBSVM library not found.')
C_SVC = 0
NU_SVC = 1
ONE_CLASS = 2
EPSILON_SVR = 3
NU_SVR = 4
LINEAR = 0
POLY = 1
RBF = 2
SIGMOID = 3
PRECOMPUTED = 4
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
if not isKernel:
xi = [0] + xi # idx should start from 1
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if not isKernel:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, isKernel=None):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi,isKernel=isKernel)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = svm_parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = (c_int*0)()
self.weight = (c_double*0)()
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'sv_indices', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def get_sv_indices(self):
total_sv = self.get_nr_sv()
sv_indices = (c_int * total_sv)()
libsvm.svm_get_sv_indices(self, sv_indices)
return sv_indices[:total_sv]
def get_nr_sv(self):
return libsvm.svm_get_nr_sv(self)
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]
def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()
i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1
result.append(row)
return result
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_sv_indices, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_nr_sv, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])
|
mit
| 596,766,394,582,458,200 | 28.280488 | 122 | 0.637755 | false |
peteboyd/lammps_interface
|
lammps_interface/dreiding.py
|
1
|
2689
|
"""
Parameters for DREIDING force field.
"""
DREIDING_DATA = {
# Atom, R1, theta, R0, D0, phi, S
"H_": (0.33, 180.0, 3.195, 0.0152, 0.0, 12.382),
"H__HB": (0.33, 180.0, 3.195, 0.0001, 0.0, 12.0),
"H__b": (0.510, 90.0, 3.195, 0.0152, 0.0, 12.382),
"B_3": (0.880, 109.471, 4.02, 0.095, 0.0, 14.23),
"B_2": (0.790, 120.0, 4.02, 0.095, 0.0, 14.23),
"C_3": (0.770, 109.471, 3.8983, 0.0951, 0.0, 14.034),
"C_R": (0.700, 120.0, 3.8983, 0.0951, 0.0, 14.034),
"C_2": (0.670, 120.0, 3.8983, 0.0951, 0.0, 14.034),
"C_1": (0.602, 180.0, 3.8983, 0.0951, 0.0, 14.034),
"N_3": (0.702, 106.7, 3.6621, 0.0774, 0.0, 13.843),
"N_R": (0.650, 120.0, 3.6621, 0.0774, 0.0, 13.843),
"N_2": (0.615, 120.0, 3.6621, 0.0744, 0.0, 13.843),
"N_1": (0.556, 180.0, 3.6621, 0.0744, 0.0, 13.843),
"O_3": (0.660, 104.51, 3.4046, 0.0957, 0.0, 13.483),
"O_R": (0.660, 120.0, 3.4046, 0.0957, 0.0, 13.483),
"O_2": (0.560, 120.0, 3.4046, 0.0957, 0.0, 13.483),
"O_1": (0.528, 180.0, 3.4046, 0.0957, 0.0, 13.483),
"F_": (0.611, 180.0, 3.4720, 0.0725, 0.0, 14.444),
"Al3": (1.047, 109.471, 4.39, 0.31, 0.0, 12.0),
"Si3": (0.937, 109.471, 4.27, 0.31, 0.0, 12.0),
"P_3": (0.890, 93.3, 4.15, 0.32, 0.0, 12.0),
"S_3": (1.040, 92.1, 4.03, 0.344, 0.0, 12.0),
"Cl": (0.997, 180.0, 3.9503, 0.2833,0.0, 13.861),
"Ga3": (1.210, 109.471, 4.39, 0.4, 0.0, 12.0),
"Ge3": (1.210, 109.471, 4.27, 0.4, 0.0, 12.0),
"As3": (1.210, 92.1, 4.15, 0.41, 0.0, 12.0),
"Se3": (1.210, 90.6, 4.03, 0.43, 0.0, 12.0),
"Br": (1.167, 180.0, 3.95, 0.37, 0.0, 12.0),
"In3": (1.390, 109.471, 4.59, 0.55, 0.0, 12.0),
"Sn3": (1.373, 109.471, 4.47, 0.55, 0.0, 12.0),
"Sb3": (1.432, 91.6, 4.35, 0.55, 0.0, 12.0),
"Te3": (1.280, 90.3, 4.23, 0.57, 0.0, 12.0),
"I_": (1.360, 180.0, 4.15, 0.51, 0.0, 12.0),
"Na": (1.860, 90.0, 3.144, 0.5, 0.0, 12.0),
"Ca": (1.940, 90.0, 3.472, 0.05, 0.0, 12.0),
"Fe": (1.285, 90.0, 4.54, 0.055, 0.0, 12.0),
"Zn": (1.330, 109.471, 4.54, 0.055, 0.0, 12.0),
"Cu": (1.302, 90.0, 4.54, 0.055, 0.0, 12.0), # R1 taken from UFF, the rest are just a copy of DREIDING Zn
"Ni": (1.164, 90.0, 4.54, 0.055, 0.0, 12.0), # R1 taken from UFF, the rest are just a copy of DREIDING Zn
"Mg": (1.421, 90.0, 4.54, 0.055, 0.0, 12.0), # R1 taken from UFF, the rest are just a copy of DREIDING Zn
# Implicit hydrogens
"C_R1": (0.700, 120.0, 4.23, 0.1356, 0.0, 14.034),
"C_34": (0.770, 109.471, 4.2370, 0.3016, 0.0, 12.0),
"C_33": (0.770, 109.471, 4.1524, 0.25, 0.0, 12.0),
"C_32": (0.770, 109.471, 4.0677, 0.1984, 0.0, 12.0),
"C_31": (0.770, 109.471, 3.983, 0.1467, 54.74, 12.0)
}
|
mit
| 7,240,741,347,678,713,000 | 49.735849 | 109 | 0.486798 | false |
wtpayne/hiai
|
a3_src/h70_internal/da/check/pytype.py
|
1
|
3135
|
# -*- coding: utf-8 -*-
"""
Module containing mypy type checking coroutines.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import itertools
import os
import subprocess
import da.lwc.file
import da.util
# -----------------------------------------------------------------------------
@da.util.coroutine
def coro(dirpath_lwc_root, build_monitor):
"""
Send errors to build_monitor if supplied files not compliant with pep8.
"""
filepath_mypy = da.lwc.env.cli_path(
dependency_id = 'mypy',
application_name = 'mypy',
dirpath_lwc_root = dirpath_lwc_root)
# TODO: Figure out a way of correctly
# installing MyPy so that we do
# not need this typeshed path
# hack business.
dirpath_typeshed = os.path.abspath(
os.path.join(
os.path.dirname(filepath_mypy),
'../lib/mypy/typeshed'))
# Run main file-processing loop: recieve file
# paths from outside the coroutine and send
# them one at a time to mypy.
#
while True:
build_unit = (yield)
filepath = build_unit['filepath']
# Ignore non-python design documents.
if not da.lwc.file.is_python_file(filepath):
continue
# Ignore experimental design documents.
if da.lwc.file.is_experimental(filepath):
continue
# Call mypy
#
# --junit-xml=FILE
mypy_command = [
filepath_mypy,
'--silent-imports',
'--show-column-numbers',
'--custom-typeshed-dir={dir}'.format(dir = dirpath_typeshed),
filepath]
process = subprocess.Popen(mypy_command,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out, err = process.communicate()
exitcode = process.returncode
# Temprarily disabled while I figure out how
# to parse MyPy output / Use the MyPy API.
#
if exitcode is None:
for line in itertools.chain(out, err):
build_monitor.report_nonconformity(
tool = 'da.check.pytype',
msg_id = '999',
msg = line,
path = filepath)
|
apache-2.0
| 4,167,324,877,451,305,000 | 28.857143 | 79 | 0.556619 | false |
dcalacci/Interactive_estimation
|
game/control/models.py
|
1
|
1069
|
from django.db import models
from django.conf import settings
# from game.round.models import Round
# Create your models here.
class Control(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, unique=True)
start_time = models.DateTimeField(auto_now_add=True, null=True)
end_time = models.DateTimeField(null=True)
score = models.DecimalField(max_digits=8, decimal_places=4, default=0.00)
instruction = models.BooleanField(default=False)
exist_survey = models.BooleanField(default=False)
check = models.PositiveIntegerField(default=0)
check_done = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class Survey(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
game = models.OneToOneField(Control)
age = models.PositiveSmallIntegerField(null=True)
gender = models.TextField(null=True)
feedback = models.TextField(null=True)
def __str__(self):
return self.user.username
|
mit
| 7,463,442,458,747,134,000 | 33.483871 | 96 | 0.733396 | false |
abisee/pointer-generator
|
data.py
|
1
|
11262
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it"""
import glob
import random
import struct
import csv
from tensorflow.core.example import example_pb2
# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.
class Vocab(object):
"""Vocabulary class for mapping between words and ids (integers)"""
def __init__(self, vocab_file, max_size):
"""Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
Args:
vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
max_size: integer. The maximum size of the resulting Vocabulary."""
self._word_to_id = {}
self._id_to_word = {}
self._count = 0 # keeps track of total number of words in the Vocab
# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.
for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
# Read the vocab file and add words up to max_size
with open(vocab_file, 'r') as vocab_f:
for line in vocab_f:
pieces = line.split()
if len(pieces) != 2:
print 'Warning: incorrectly formatted line in vocabulary file: %s\n' % line
continue
w = pieces[0]
if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception('<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self._word_to_id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if max_size != 0 and self._count >= max_size:
print "max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (max_size, self._count)
break
print "Finished constructing vocabulary of %i total words. Last word added: %s" % (self._count, self._id_to_word[self._count-1])
def word2id(self, word):
"""Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV."""
if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def write_metadata(self, fpath):
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
Args:
fpath: place to write the metadata file
"""
print "Writing word embedding metadata file to %s..." % (fpath)
with open(fpath, "w") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for i in xrange(self.size()):
writer.writerow({"word": self._id_to_word[i]})
def example_generator(data_path, single_pass):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
def article2ids(article_words, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
article_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers."""
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in article_words:
i = vocab.word2id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def abstract2ids(abstract_words, vocab, article_oovs):
"""Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
Args:
abstract_words: list of words (strings)
vocab: Vocabulary object
article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
Returns:
ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id."""
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word2id(w)
if i == unk_id: # If w is an OOV word
if w in article_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id) # Map to the UNK token id
else:
ids.append(i)
return ids
def outputids2words(id_list, vocab, article_oovs):
"""Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode).
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode)
Returns:
words: list of words (strings)
"""
words = []
for i in id_list:
try:
w = vocab.id2word(i) # might be [UNK]
except ValueError as e: # w is OOV
assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except ValueError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
words.append(w)
return words
def abstract2sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p+len(SENTENCE_START):end_p])
except ValueError as e: # no more sentences
return sents
def show_art_oovs(article, vocab):
"""Returns the article string, highlighting the OOVs by placing __underscores__ around them"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = article.split(' ')
words = [("__%s__" % w) if vocab.word2id(w)==unk_token else w for w in words]
out_str = ' '.join(words)
return out_str
def show_abs_oovs(abstract, vocab, article_oovs):
"""Returns the abstract string, highlighting the article OOVs with __underscores__.
If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!.
Args:
abstract: string
vocab: Vocabulary object
article_oovs: list of words (strings), or None (in baseline mode)
"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = abstract.split(' ')
new_words = []
for w in words:
if vocab.word2id(w) == unk_token: # w is oov
if article_oovs is None: # baseline mode
new_words.append("__%s__" % w)
else: # pointer-generator mode
if w in article_oovs:
new_words.append("__%s__" % w)
else:
new_words.append("!!__%s__!!" % w)
else: # w is in-vocab word
new_words.append(w)
out_str = ' '.join(new_words)
return out_str
|
apache-2.0
| 8,850,772,225,426,416,000 | 39.804348 | 216 | 0.666223 | false |
cherry-wb/viper
|
viper/common/network.py
|
1
|
1068
|
import socket
import urllib2
import socks
#import lib.socks as socks
from viper.common.out import print_error
def download(url, tor=False):
def create_connection(address, timeout=None, source_address=None):
sock = socks.socksocket()
sock.connect(address)
return sock
import socket
if tor:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', 9050)
socket.socket = socks.socksocket
socket.create_connection = create_connection
try:
req = urllib2.Request(url)
req.add_header('User-agent', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)')
res = urllib2.urlopen(req)
data = res.read()
except urllib2.HTTPError as e:
print_error(e)
except urllib2.URLError as e:
if tor and e.reason.errno == 111:
print_error("Connection refused, maybe Tor is not running?")
else:
print_error(e)
except Exception as e:
print_error("Failed download: {0}".format(e))
else:
return data
|
bsd-3-clause
| 8,534,516,328,192,233,000 | 27.105263 | 103 | 0.632022 | false |
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Drivers/Serial/_test/test_Serial_driver.py
|
1
|
2989
|
"""
@name: Modules/Core/Drivers/Serial/_test/test_Serial_driver.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013_2019 by D. Brian Kimmel
@license: MIT License
@note: Created on May 4, 2013
@summary: This module is for testing local node data.
Passed all 9 tests - DBK - 2019-10-13
"""
__updated__ = '2019-10-13'
# Import system type stuff
from twisted.trial import unittest
from ruamel.yaml import YAML
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.Config.config_tools import Api as configApi
from Modules.House.Lighting.controllers import
from Modules.Core.Drivers.Serial import Serial_driver
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
TEST_YAML = """\
Controllers:
- Name: TestPlm
Comment: Portable, Goes where I do.
Family:
Name: Insteon
Type: Plm
Address: 49.F9.E7
Interface:
Type: Serial
Baud: 19200,8,N,1
Port: /dev/ttyUSB0
Host: Laptop-05
"""
class SetupMixin:
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
self.m_api = configApi(self.m_pyhouse_obj)
l_yaml = YAML()
self.m_test_config = l_yaml.load(TEST_YAML)
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title') # so it is defined when printing is cleaned up.
print('Id: test_Serial_driver')
class A1_setup(SetupMixin, unittest.TestCase):
""" Test SetupMixin
"""
def test_00(self):
print('A1-00')
pass
def test_01_yaml(self):
l_yaml = self.m_test_config
# print('A1-01-A - Yaml: {}'.format(l_yaml))
self.assertIsNotNone(l_yaml)
class B1_Config(SetupMixin, unittest.TestCase):
"""
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_yaml = self.m_test_config['Controllers'][0]['Interface']
def test_00(self):
print('B1-00')
pass
def test_01_Interface(self):
""" Test we have Interface: section
"""
# print('B1-01-A - Yaml {}'.format(self.m_yaml))
self.assertEqual(self.m_yaml['Type'], 'Serial')
def test_02_Baud(self):
"""
"""
l_baud = self.m_yaml['Baud']
print('B1-02-A - Yaml {}'.format(l_baud))
self.assertEqual(l_baud, '19200,8,N,1')
class C1_Parsed(SetupMixin, unittest.TestCase):
"""
"""
def setUp(self):
SetupMixin.setUp(self)
def test_00(self):
print('C1-00')
pass
def test_01_Port(self):
""" test find_port
"""
l_port = Serial_driver.FindPort().get_port()
print(PrettyFormatAny.form(l_port, 'Port'))
class D1_Driver(SetupMixin, unittest.TestCase):
"""
"""
def setUp(self):
SetupMixin.setUp(self)
def test_00(self):
print('D1-00')
pass
# ## END
|
mit
| -977,156,327,731,897,900 | 21.816794 | 100 | 0.602543 | false |
frossigneux/python-kwstandbyclient
|
kwstandbyclient/utils.py
|
1
|
5680
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import re
import six
from kwstandbyclient.openstack.common import timeutils
from kwstandbyclient import exception
from kwstandbyclient.openstack.common.gettextutils import _ # noqa
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
ELAPSED_TIME_REGEX = '^(\d+)([s|m|h|d])$'
def env(*args, **kwargs):
"""Returns the first environment variable set.
if none are non-empty, defaults to '' or keyword arg default.
"""
for v in args:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', '')
def to_primitive(value):
if isinstance(value, list) or isinstance(value, tuple):
o = []
for v in value:
o.append(to_primitive(v))
return o
elif isinstance(value, dict):
o = {}
for k, v in six.iteritems(value):
o[k] = to_primitive(v)
return o
elif isinstance(value, datetime.datetime):
return str(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(six.iteritems(value)))
elif hasattr(value, '__iter__'):
return to_primitive(list(value))
else:
return value
def dumps(value, indent=None):
try:
return json.dumps(value, indent=indent)
except TypeError:
pass
return json.dumps(to_primitive(value))
def get_item_properties(item, fields, mixed_case_fields=None, formatters=None):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Tenant, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
if mixed_case_fields is None:
mixed_case_fields = []
if formatters is None:
formatters = {}
for field in fields:
if field in formatters:
row.append(formatters[field](item))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
def find_resource_id_by_name_or_id(client, resource, name_or_id):
resource_manager = getattr(client, resource)
is_id = re.match(UUID_PATTERN, name_or_id)
if is_id:
resources = resource_manager.list()
for resource in resources:
if resource['id'] == name_or_id:
return name_or_id
raise exception.KwstandbyClientException('No resource found with ID %s' %
name_or_id)
return _find_resource_id_by_name(client, resource, name_or_id)
def _find_resource_id_by_name(client, resource, name):
resource_manager = getattr(client, resource)
resources = resource_manager.list()
named_resources = []
for resource in resources:
if resource['name'] == name:
named_resources.append(resource['id'])
if len(named_resources) > 1:
raise exception.NoUniqueMatch(message="There are more than one "
"appropriate resources for the "
"name '%s' and type '%s'" %
(name, resource))
elif named_resources:
return named_resources[0]
else:
message = "Unable to find resource with name '%s'" % name
raise exception.KwstandbyClientException(message=message,
status_code=404)
def from_elapsed_time_to_seconds(elapsed_time):
"""Return the amount of seconds based on the time_option parameter
:param: time_option: a string that matches ELAPSED_TIME_REGEX
"""
is_elapsed_time = re.match(ELAPSED_TIME_REGEX, elapsed_time)
if is_elapsed_time is None:
raise exception.KwstandbyClientException(_("Invalid time "
"format for option."))
elapsed_time_value = int(is_elapsed_time.group(1))
elapsed_time_option = is_elapsed_time.group(2)
seconds = {
's': lambda x:
timeutils.total_seconds(datetime.timedelta(seconds=x)),
'm': lambda x:
timeutils.total_seconds(datetime.timedelta(minutes=x)),
'h': lambda x:
timeutils.total_seconds(datetime.timedelta(hours=x)),
'd': lambda x:
timeutils.total_seconds(datetime.timedelta(days=x)),
}[elapsed_time_option](elapsed_time_value)
# the above code returns a "float"
return int(seconds)
|
apache-2.0
| 1,058,510,070,111,552,500 | 33.634146 | 81 | 0.601761 | false |
jonhadfield/ansible-lookups
|
aws_subnet_ids_from_names.py
|
1
|
1411
|
# (c) 2017, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and a list of one or more
subnet names and returns a list of matching subnet ids.
Example Usage:
{{ lookup('aws_subnet_ids_from_names', ('eu-west-1', ['subnet1', 'subnet2'])) }}
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("aws_subnet_ids_from_names lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
subnet_ids = []
region = terms[0][0]
subnet_names = terms[0][1]
session = boto3.session.Session(region_name=region)
try:
ec2_client = session.client('ec2')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified.")
subnet_filter = [{'Name': 'tag:Name', 'Values': subnet_names}]
result = ec2_client.describe_subnets(Filters=subnet_filter)
subnets = result.get('Subnets')
if subnets:
for subnet in subnets:
subnet_ids.append(subnet.get('SubnetId').encode('utf-8'))
return subnet_ids
|
mit
| -5,266,085,152,718,669,000 | 33.414634 | 95 | 0.651311 | false |
sysid/kg
|
quora/Ensemble_CNN_TD_Quora.py
|
1
|
12948
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# In[2]:
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
# In[3]:
train.head()
# In[4]:
test.head()
# In[5]:
print(train.shape)
print(test.shape)
# In[6]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[7]:
train = train.fillna('empty')
test = test.fillna('empty')
# In[8]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[9]:
test.head()
# In[10]:
for i in range(6):
print(train.question1[i])
print(train.question2[i])
print()
# In[17]:
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\'+-=]", " ", text)
text = re.sub(r"\'s", " 's ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", " cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"\s{2,}", " ", text)
# Shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
# In[18]:
def process_questions(question_list, questions, question_list_name, dataframe):
# function to transform questions and display progress
for question in questions:
question_list.append(text_to_wordlist(question))
if len(question_list) % 100000 == 0:
progress = len(question_list)/len(dataframe) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
# In[19]:
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
# In[35]:
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
# In[36]:
test_question1 = []
process_questions(test_question1, test.question1, 'test_question1', test)
# In[37]:
test_question2 = []
process_questions(test_question2, test.question2, 'test_question2', test)
# # Using Keras
# In[38]:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import datetime, time, json
from keras.models import Sequential
from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Bidirectional, Convolution1D, GRU, MaxPooling1D, Convolution2D
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from collections import defaultdict
# In[39]:
# Count the number of different words in the reviews
word_count = defaultdict(int)
for question in train_question1:
word_count[question] += 1
print("train_question1 is complete.")
for question in train_question2:
word_count[question] += 1
print("train_question2 is complete")
for question in test_question1:
word_count[question] += 1
print("test_question1 is complete.")
for question in test_question2:
word_count[question] += 1
print("test_question2 is complete")
print("Total number of unique words:", len(word_count))
# In[40]:
# Find the length of questions
lengths = []
for question in train_question1:
lengths.append(len(question.split()))
for question in train_question2:
lengths.append(len(question.split()))
# Create a dataframe so that the values can be inspected
lengths = pd.DataFrame(lengths, columns=['counts'])
# In[41]:
lengths.counts.describe()
# In[42]:
np.percentile(lengths.counts, 99.5)
# In[43]:
num_words = 200000
train_questions = train_question1 + train_question2
tokenizer = Tokenizer(nb_words = num_words)
tokenizer.fit_on_texts(train_questions)
print("Fitting is compelte.")
train_question1_word_sequences = tokenizer.texts_to_sequences(train_question1)
print("train_question1 is complete.")
train_question2_word_sequences = tokenizer.texts_to_sequences(train_question2)
print("train_question2 is complete")
# In[44]:
test_question1_word_sequences = tokenizer.texts_to_sequences(test_question1)
print("test_question1 is complete.")
test_question2_word_sequences = tokenizer.texts_to_sequences(test_question2)
print("test_question2 is complete.")
# In[45]:
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# In[46]:
# Pad the questions so that they all have the same length.
max_question_len = 37
train_q1 = pad_sequences(train_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q1 is complete.")
train_q2 = pad_sequences(train_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q2 is complete.")
# In[47]:
test_q1 = pad_sequences(test_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q1 is complete.")
test_q2 = pad_sequences(test_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q2 is complete.")
# In[48]:
y_train = train.is_duplicate
# In[49]:
# Load GloVe to use pretrained vectors
# From this link: https://nlp.stanford.edu/projects/glove/
embeddings_index = {}
with open('glove.840B.300d.txt', encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings:', len(embeddings_index))
# In[50]:
# Need to use 300 for embedding dimensions to match GloVe vectors.
embedding_dim = 300
nb_words = len(word_index)
word_embedding_matrix = np.zeros((nb_words + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# In[66]:
units = 150
dropout = 0.25
nb_filter = 32
filter_length = 3
embedding_dim = 300
model1 = Sequential()
model1.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Flatten())
model2 = Sequential()
model2.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Flatten())
model3 = Sequential()
model3.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model3.add(TimeDistributed(Dense(embedding_dim)))
model3.add(BatchNormalization())
model3.add(Activation('relu'))
model3.add(Dropout(dropout))
model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
model4 = Sequential()
model4.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model4.add(TimeDistributed(Dense(embedding_dim)))
model4.add(BatchNormalization())
model4.add(Activation('relu'))
model4.add(Dropout(dropout))
model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
modela = Sequential()
modela.add(Merge([model1, model2], mode='concat'))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modelb = Sequential()
modelb.add(Merge([model3, model4], mode='concat'))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
model = Sequential()
model.add(Merge([modela, modelb], mode='concat'))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
#sgd = SGD(lr=0.01, decay=5e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[67]:
save_best_weights = 'question_pairs_weights.h5'
t0 = time.time()
callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True),
EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')]
history = model.fit([train_q1, train_q2],
y_train,
batch_size=200,
nb_epoch=100,
validation_split=0.1,
verbose=True,
shuffle=True,
callbacks=callbacks)
t1 = time.time()
print("Minutes elapsed: %f" % ((t1 - t0) / 60.))
# In[68]:
summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ],
'train_acc': history.history['acc'],
'valid_acc': history.history['val_acc'],
'train_loss': history.history['loss'],
'valid_loss': history.history['val_loss']})
# In[69]:
summary_stats
# In[70]:
plt.plot(summary_stats.train_loss)
plt.plot(summary_stats.valid_loss)
plt.show()
# In[71]:
min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss']))
print('Minimum loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss))
min_loss = round(min_loss, 4)
# In[72]:
model.load_weights(save_best_weights)
predictions = model.predict([test_q1, test_q2], verbose = True)
# In[73]:
#Create submission
submission = pd.DataFrame(predictions, columns=['is_duplicate'])
submission.insert(0, 'test_id', test.test_id)
file_name = 'submission_{}.csv'.format(min_loss)
submission.to_csv(file_name, index=False)
# In[74]:
submission.head(10)
|
mit
| -2,846,399,667,729,057,300 | 24.588933 | 252 | 0.631063 | false |
tsuna/tcollector
|
tcollector.py
|
1
|
58569
|
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2010 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
# tcollector.py
#
"""Simple manager for collection scripts that run and gather data.
The tcollector gathers the data and sends it to the TSD for storage."""
#
# by Mark Smith <msmith@stumbleupon.com>.
#
import atexit
import errno
import fcntl
import logging
import os
import random
import re
import signal
import socket
import subprocess
import sys
import threading
import time
import json
import urllib2
import base64
from logging.handlers import RotatingFileHandler
from Queue import Queue
from Queue import Empty
from Queue import Full
from optparse import OptionParser
# global variables.
COLLECTORS = {}
GENERATION = 0
DEFAULT_LOG = '/var/log/tcollector.log'
LOG = logging.getLogger('tcollector')
ALIVE = True
# If the SenderThread catches more than this many consecutive uncaught
# exceptions, something is not right and tcollector will shutdown.
# Hopefully some kind of supervising daemon will then restart it.
MAX_UNCAUGHT_EXCEPTIONS = 100
DEFAULT_PORT = 4242
MAX_REASONABLE_TIMESTAMP = 1600000000 # Good until September 2020 :)
# How long to wait for datapoints before assuming
# a collector is dead and restarting it
ALLOWED_INACTIVITY_TIME = 600 # seconds
MAX_SENDQ_SIZE = 10000
MAX_READQ_SIZE = 100000
def register_collector(collector):
"""Register a collector with the COLLECTORS global"""
assert isinstance(collector, Collector), "collector=%r" % (collector,)
# store it in the global list and initiate a kill for anybody with the
# same name that happens to still be hanging around
if collector.name in COLLECTORS:
col = COLLECTORS[collector.name]
if col.proc is not None:
LOG.error('%s still has a process (pid=%d) and is being reset,'
' terminating', col.name, col.proc.pid)
col.shutdown()
COLLECTORS[collector.name] = collector
class ReaderQueue(Queue):
"""A Queue for the reader thread"""
def nput(self, value):
"""A nonblocking put, that simply logs and discards the value when the
queue is full, and returns false if we dropped."""
try:
self.put(value, False)
except Full:
LOG.error("DROPPED LINE: %s", value)
return False
return True
class Collector(object):
"""A Collector is a script that is run that gathers some data
and prints it out in standard TSD format on STDOUT. This
class maintains all of the state information for a given
collector and gives us utility methods for working with
it."""
def __init__(self, colname, interval, filename, mtime=0, lastspawn=0):
"""Construct a new Collector."""
self.name = colname
self.interval = interval
self.filename = filename
self.lastspawn = lastspawn
self.proc = None
self.nextkill = 0
self.killstate = 0
self.dead = False
self.mtime = mtime
self.generation = GENERATION
self.buffer = ""
self.datalines = []
# Maps (metric, tags) to (value, repeated, line, timestamp) where:
# value: Last value seen.
# repeated: boolean, whether the last value was seen more than once.
# line: The last line that was read from that collector.
# timestamp: Time at which we saw the value for the first time.
# This dict is used to keep track of and remove duplicate values.
# Since it might grow unbounded (in case we see many different
# combinations of metrics and tags) someone needs to regularly call
# evict_old_keys() to remove old entries.
self.values = {}
self.lines_sent = 0
self.lines_received = 0
self.lines_invalid = 0
self.last_datapoint = int(time.time())
def read(self):
"""Read bytes from our subprocess and store them in our temporary
line storage buffer. This needs to be non-blocking."""
# we have to use a buffer because sometimes the collectors
# will write out a bunch of data points at one time and we
# get some weird sized chunk. This read call is non-blocking.
# now read stderr for log messages, we could buffer here but since
# we're just logging the messages, I don't care to
try:
out = self.proc.stderr.read()
if out:
LOG.debug('reading %s got %d bytes on stderr',
self.name, len(out))
for line in out.splitlines():
LOG.warning('%s: %s', self.name, line)
except IOError, (err, msg):
if err != errno.EAGAIN:
raise
except:
LOG.exception('uncaught exception in stderr read')
# we have to use a buffer because sometimes the collectors will write
# out a bunch of data points at one time and we get some weird sized
# chunk. This read call is non-blocking.
try:
self.buffer += self.proc.stdout.read()
if len(self.buffer):
LOG.debug('reading %s, buffer now %d bytes',
self.name, len(self.buffer))
except IOError, (err, msg):
if err != errno.EAGAIN:
raise
except AttributeError:
# sometimes the process goes away in another thread and we don't
# have it anymore, so log an error and bail
LOG.exception('caught exception, collector process went away while reading stdout')
except:
LOG.exception('uncaught exception in stdout read')
return
# iterate for each line we have
while self.buffer:
idx = self.buffer.find('\n')
if idx == -1:
break
# one full line is now found and we can pull it out of the buffer
line = self.buffer[0:idx].strip()
if line:
self.datalines.append(line)
self.last_datapoint = int(time.time())
self.buffer = self.buffer[idx+1:]
def collect(self):
"""Reads input from the collector and returns the lines up to whomever
is calling us. This is a generator that returns a line as it
becomes available."""
while self.proc is not None:
self.read()
if not len(self.datalines):
return
while len(self.datalines):
yield self.datalines.pop(0)
def shutdown(self):
"""Cleanly shut down the collector"""
if not self.proc:
return
try:
if self.proc.poll() is None:
kill(self.proc)
for attempt in range(5):
if self.proc.poll() is not None:
return
LOG.info('Waiting %ds for PID %d (%s) to exit...'
% (5 - attempt, self.proc.pid, self.name))
time.sleep(1)
kill(self.proc, signal.SIGKILL)
self.proc.wait()
except:
# we really don't want to die as we're trying to exit gracefully
LOG.exception('ignoring uncaught exception while shutting down')
def evict_old_keys(self, cut_off):
"""Remove old entries from the cache used to detect duplicate values.
Args:
cut_off: A UNIX timestamp. Any value that's older than this will be
removed from the cache.
"""
for key in self.values.keys():
time = self.values[key][3]
if time < cut_off:
del self.values[key]
class StdinCollector(Collector):
"""A StdinCollector simply reads from STDIN and provides the
data. This collector presents a uniform interface for the
ReaderThread, although unlike a normal collector, read()/collect()
will be blocking."""
def __init__(self):
super(StdinCollector, self).__init__('stdin', 0, '<stdin>')
# hack to make this work. nobody else will rely on self.proc
# except as a test in the stdin mode.
self.proc = True
def read(self):
"""Read lines from STDIN and store them. We allow this to
be blocking because there should only ever be one
StdinCollector and no normal collectors, so the ReaderThread
is only serving us and we're allowed to block it."""
global ALIVE
line = sys.stdin.readline()
if line:
self.datalines.append(line.rstrip())
else:
ALIVE = False
def shutdown(self):
pass
class ReaderThread(threading.Thread):
"""The main ReaderThread is responsible for reading from the collectors
and assuring that we always read from the input no matter what.
All data read is put into the self.readerq Queue, which is
consumed by the SenderThread."""
def __init__(self, dedupinterval, evictinterval):
"""Constructor.
Args:
dedupinterval: If a metric sends the same value over successive
intervals, suppress sending the same value to the TSD until
this many seconds have elapsed. This helps graphs over narrow
time ranges still see timeseries with suppressed datapoints.
evictinterval: In order to implement the behavior above, the
code needs to keep track of the last value seen for each
combination of (metric, tags). Values older than
evictinterval will be removed from the cache to save RAM.
Invariant: evictinterval > dedupinterval
"""
assert evictinterval > dedupinterval, "%r <= %r" % (evictinterval,
dedupinterval)
super(ReaderThread, self).__init__()
self.readerq = ReaderQueue(MAX_READQ_SIZE)
self.lines_collected = 0
self.lines_dropped = 0
self.dedupinterval = dedupinterval
self.evictinterval = evictinterval
def run(self):
"""Main loop for this thread. Just reads from collectors,
does our input processing and de-duping, and puts the data
into the queue."""
LOG.debug("ReaderThread up and running")
lastevict_time = 0
# we loop every second for now. ideally we'll setup some
# select or other thing to wait for input on our children,
# while breaking out every once in a while to setup selects
# on new children.
while ALIVE:
for col in all_living_collectors():
for line in col.collect():
self.process_line(col, line)
if self.dedupinterval != 0: # if 0 we do not use dedup
now = int(time.time())
if now - lastevict_time > self.evictinterval:
lastevict_time = now
now -= self.evictinterval
for col in all_collectors():
col.evict_old_keys(now)
# and here is the loop that we really should get rid of, this
# just prevents us from spinning right now
time.sleep(1)
def process_line(self, col, line):
"""Parses the given line and appends the result to the reader queue."""
self.lines_collected += 1
col.lines_received += 1
if len(line) >= 1024: # Limit in net.opentsdb.tsd.PipelineFactory
LOG.warning('%s line too long: %s', col.name, line)
col.lines_invalid += 1
return
parsed = re.match('^([-_./a-zA-Z0-9]+)\s+' # Metric name.
'(\d+\.?\d+)\s+' # Timestamp.
'(\S+?)' # Value (int or float).
'((?:\s+[-_./a-zA-Z0-9]+=[-_./a-zA-Z0-9]+)*)$', # Tags
line)
if parsed is None:
LOG.warning('%s sent invalid data: %s', col.name, line)
col.lines_invalid += 1
return
metric, timestamp, value, tags = parsed.groups()
timestamp = int(timestamp)
# If there are more than 11 digits we're dealing with a timestamp
# with millisecond precision
if len(str(timestamp)) > 11:
global MAX_REASONABLE_TIMESTAMP
MAX_REASONABLE_TIMESTAMP = MAX_REASONABLE_TIMESTAMP * 1000
# De-dupe detection... To reduce the number of points we send to the
# TSD, we suppress sending values of metrics that don't change to
# only once every 10 minutes (which is also when TSD changes rows
# and how much extra time the scanner adds to the beginning/end of a
# graph interval in order to correctly calculate aggregated values).
# When the values do change, we want to first send the previous value
# with what the timestamp was when it first became that value (to keep
# slopes of graphs correct).
#
if self.dedupinterval != 0: # if 0 we do not use dedup
key = (metric, tags)
if key in col.values:
# if the timestamp isn't > than the previous one, ignore this value
if timestamp <= col.values[key][3]:
LOG.error("Timestamp out of order: metric=%s%s,"
" old_ts=%d >= new_ts=%d - ignoring data point"
" (value=%r, collector=%s)", metric, tags,
col.values[key][3], timestamp, value, col.name)
col.lines_invalid += 1
return
elif timestamp >= MAX_REASONABLE_TIMESTAMP:
LOG.error("Timestamp is too far out in the future: metric=%s%s"
" old_ts=%d, new_ts=%d - ignoring data point"
" (value=%r, collector=%s)", metric, tags,
col.values[key][3], timestamp, value, col.name)
return
# if this data point is repeated, store it but don't send.
# store the previous timestamp, so when/if this value changes
# we send the timestamp when this metric first became the current
# value instead of the last. Fall through if we reach
# the dedup interval so we can print the value.
if (col.values[key][0] == value and
(timestamp - col.values[key][3] < self.dedupinterval)):
col.values[key] = (value, True, line, col.values[key][3])
return
# we might have to append two lines if the value has been the same
# for a while and we've skipped one or more values. we need to
# replay the last value we skipped (if changed) so the jumps in
# our graph are accurate,
if ((col.values[key][1] or
(timestamp - col.values[key][3] >= self.dedupinterval))
and col.values[key][0] != value):
col.lines_sent += 1
if not self.readerq.nput(col.values[key][2]):
self.lines_dropped += 1
# now we can reset for the next pass and send the line we actually
# want to send
# col.values is a dict of tuples, with the key being the metric and
# tags (essentially the same as wthat TSD uses for the row key).
# The array consists of:
# [ the metric's value, if this value was repeated, the line of data,
# the value's timestamp that it last changed ]
col.values[key] = (value, False, line, timestamp)
col.lines_sent += 1
if not self.readerq.nput(line):
self.lines_dropped += 1
class SenderThread(threading.Thread):
"""The SenderThread is responsible for maintaining a connection
to the TSD and sending the data we're getting over to it. This
thread is also responsible for doing any sort of emergency
buffering we might need to do if we can't establish a connection
and we need to spool to disk. That isn't implemented yet."""
def __init__(self, reader, dryrun, hosts, self_report_stats, tags,
reconnectinterval=0, http=False, http_username=None,
http_password=None, ssl=False, maxtags=8):
"""Constructor.
Args:
reader: A reference to a ReaderThread instance.
dryrun: If true, data points will be printed on stdout instead of
being sent to the TSD.
hosts: List of (host, port) tuples defining list of TSDs
self_report_stats: If true, the reader thread will insert its own
stats into the metrics reported to TSD, as if those metrics had
been read from a collector.
http: A boolean that controls whether or not the http endpoint is used.
ssl: A boolean that controls whether or not the http endpoint uses ssl.
tags: A dictionary of tags to append for every data point.
"""
super(SenderThread, self).__init__()
self.dryrun = dryrun
self.reader = reader
self.tags = sorted(tags.items()) # dictionary transformed to list
self.http = http
self.http_username = http_username
self.http_password = http_password
self.ssl = ssl
self.hosts = hosts # A list of (host, port) pairs.
# Randomize hosts to help even out the load.
random.shuffle(self.hosts)
self.blacklisted_hosts = set() # The 'bad' (host, port) pairs.
self.current_tsd = -1 # Index in self.hosts where we're at.
self.host = None # The current TSD host we've selected.
self.port = None # The port of the current TSD.
self.tsd = None # The socket connected to the aforementioned TSD.
self.last_verify = 0
self.reconnectinterval = reconnectinterval # in seconds.
self.time_reconnect = 0 # if reconnectinterval > 0, used to track the time.
self.sendq = []
self.self_report_stats = self_report_stats
self.maxtags = maxtags # The maximum number of tags TSD will accept.
def pick_connection(self):
"""Picks up a random host/port connection."""
# Try to get the next host from the list, until we find a host that
# isn't in the blacklist, or until we run out of hosts (i.e. they
# are all blacklisted, which typically happens when we lost our
# connectivity to the outside world).
for self.current_tsd in xrange(self.current_tsd + 1, len(self.hosts)):
hostport = self.hosts[self.current_tsd]
if hostport not in self.blacklisted_hosts:
break
else:
LOG.info('No more healthy hosts, retry with previously blacklisted')
random.shuffle(self.hosts)
self.blacklisted_hosts.clear()
self.current_tsd = 0
hostport = self.hosts[self.current_tsd]
self.host, self.port = hostport
LOG.info('Selected connection: %s:%d', self.host, self.port)
def blacklist_connection(self):
"""Marks the current TSD host we're trying to use as blacklisted.
Blacklisted hosts will get another chance to be elected once there
will be no more healthy hosts."""
# FIXME: Enhance this naive strategy.
LOG.info('Blacklisting %s:%s for a while', self.host, self.port)
self.blacklisted_hosts.add((self.host, self.port))
def run(self):
"""Main loop. A simple scheduler. Loop waiting for 5
seconds for data on the queue. If there's no data, just
loop and make sure our connection is still open. If there
is data, wait 5 more seconds and grab all of the pending data and
send it. A little better than sending every line as its
own packet."""
errors = 0 # How many uncaught exceptions in a row we got.
while ALIVE:
try:
self.maintain_conn()
try:
line = self.reader.readerq.get(True, 5)
except Empty:
continue
self.sendq.append(line)
time.sleep(5) # Wait for more data
while True:
# prevents self.sendq fast growing in case of sending fails
# in send_data()
if len(self.sendq) > MAX_SENDQ_SIZE:
break
try:
line = self.reader.readerq.get(False)
except Empty:
break
self.sendq.append(line)
if ALIVE:
self.send_data()
errors = 0 # We managed to do a successful iteration.
except (ArithmeticError, EOFError, EnvironmentError, LookupError,
ValueError), e:
errors += 1
if errors > MAX_UNCAUGHT_EXCEPTIONS:
shutdown()
raise
LOG.exception('Uncaught exception in SenderThread, ignoring')
time.sleep(1)
continue
except:
LOG.exception('Uncaught exception in SenderThread, going to exit')
shutdown()
raise
def verify_conn(self):
"""Periodically verify that our connection to the TSD is OK
and that the TSD is alive/working."""
# http connections don't need this
if self.http:
return True
if self.tsd is None:
return False
# if the last verification was less than a minute ago, don't re-verify
if self.last_verify > time.time() - 60:
return True
# in case reconnect is activated, check if it's time to reconnect
if self.reconnectinterval > 0 and self.time_reconnect < time.time() - self.reconnectinterval:
# closing the connection and indicating that we need to reconnect.
try:
self.tsd.close()
except socket.error, msg:
pass # not handling that
self.time_reconnect = time.time()
return False
# we use the version command as it is very low effort for the TSD
# to respond
LOG.debug('verifying our TSD connection is alive')
try:
self.tsd.sendall('version\n')
except socket.error, msg:
self.tsd = None
self.blacklist_connection()
return False
bufsize = 4096
while ALIVE:
# try to read as much data as we can. at some point this is going
# to block, but we have set the timeout low when we made the
# connection
try:
buf = self.tsd.recv(bufsize)
except socket.error, msg:
self.tsd = None
self.blacklist_connection()
return False
# If we don't get a response to the `version' request, the TSD
# must be dead or overloaded.
if not buf:
self.tsd = None
self.blacklist_connection()
return False
# Woah, the TSD has a lot of things to tell us... Let's make
# sure we read everything it sent us by looping once more.
if len(buf) == bufsize:
continue
# If everything is good, send out our meta stats. This
# helps to see what is going on with the tcollector.
# TODO need to fix this for http
if self.self_report_stats:
strs = [
('reader.lines_collected',
'', self.reader.lines_collected),
('reader.lines_dropped',
'', self.reader.lines_dropped)
]
for col in all_living_collectors():
strs.append(('collector.lines_sent', 'collector='
+ col.name, col.lines_sent))
strs.append(('collector.lines_received', 'collector='
+ col.name, col.lines_received))
strs.append(('collector.lines_invalid', 'collector='
+ col.name, col.lines_invalid))
ts = int(time.time())
strout = ["tcollector.%s %d %d %s"
% (x[0], ts, x[2], x[1]) for x in strs]
for string in strout:
self.sendq.append(string)
break # TSD is alive.
# if we get here, we assume the connection is good
self.last_verify = time.time()
return True
def maintain_conn(self):
"""Safely connect to the TSD and ensure that it's up and
running and that we're not talking to a ghost connection
(no response)."""
# dry runs and http are always good
if self.dryrun or self.http:
return
# connection didn't verify, so create a new one. we might be in
# this method for a long time while we sort this out.
try_delay = 1
while ALIVE:
if self.verify_conn():
return
# increase the try delay by some amount and some random value,
# in case the TSD is down for a while. delay at most
# approximately 10 minutes.
try_delay *= 1 + random.random()
if try_delay > 600:
try_delay *= 0.5
LOG.debug('SenderThread blocking %0.2f seconds', try_delay)
time.sleep(try_delay)
# Now actually try the connection.
self.pick_connection()
try:
addresses = socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM, 0)
except socket.gaierror, e:
# Don't croak on transient DNS resolution issues.
if e[0] in (socket.EAI_AGAIN, socket.EAI_NONAME,
socket.EAI_NODATA):
LOG.debug('DNS resolution failure: %s: %s', self.host, e)
continue
raise
for family, socktype, proto, canonname, sockaddr in addresses:
try:
self.tsd = socket.socket(family, socktype, proto)
self.tsd.settimeout(15)
self.tsd.connect(sockaddr)
# if we get here it connected
LOG.debug('Connection to %s was successful'%(str(sockaddr)))
break
except socket.error, msg:
LOG.warning('Connection attempt failed to %s:%d: %s',
self.host, self.port, msg)
self.tsd.close()
self.tsd = None
if not self.tsd:
LOG.error('Failed to connect to %s:%d', self.host, self.port)
self.blacklist_connection()
def add_tags_to_line(self, line):
for tag, value in self.tags:
if ' %s=' % tag not in line:
line += ' %s=%s' % (tag, value)
return line
def send_data(self):
"""Sends outstanding data in self.sendq to the TSD in one operation."""
if self.http:
return self.send_data_via_http()
# construct the output string
out = ''
# in case of logging we use less efficient variant
if LOG.level == logging.DEBUG:
for line in self.sendq:
line = "put %s" % self.add_tags_to_line(line)
out += line + "\n"
LOG.debug('SENDING: %s', line)
else:
out = "".join("put %s\n" % self.add_tags_to_line(line) for line in self.sendq)
if not out:
LOG.debug('send_data no data?')
return
# try sending our data. if an exception occurs, just error and
# try sending again next time.
try:
if self.dryrun:
print out
else:
self.tsd.sendall(out)
self.sendq = []
except socket.error, msg:
LOG.error('failed to send data: %s', msg)
try:
self.tsd.close()
except socket.error:
pass
self.tsd = None
self.blacklist_connection()
# FIXME: we should be reading the result at some point to drain
# the packets out of the kernel's queue
def send_data_via_http(self):
"""Sends outstanding data in self.sendq to TSD in one HTTP API call."""
metrics = []
for line in self.sendq:
# print " %s" % line
parts = line.split(None, 3)
# not all metrics have metric-specific tags
if len(parts) == 4:
(metric, timestamp, value, raw_tags) = parts
else:
(metric, timestamp, value) = parts
raw_tags = ""
# process the tags
metric_tags = {}
for tag in raw_tags.strip().split():
(tag_key, tag_value) = tag.split("=", 1)
metric_tags[tag_key] = tag_value
metric_entry = {}
metric_entry["metric"] = metric
metric_entry["timestamp"] = long(timestamp)
metric_entry["value"] = float(value)
metric_entry["tags"] = dict(self.tags).copy()
if len(metric_tags) + len(metric_entry["tags"]) > self.maxtags:
metric_tags_orig = set(metric_tags)
subset_metric_keys = frozenset(metric_tags[:len(metric_tags[:self.maxtags-len(metric_entry["tags"])])])
metric_tags = dict((k, v) for k, v in metric_tags.iteritems() if k in subset_metric_keys)
LOG.error("Exceeding maximum permitted metric tags - removing %s for metric %s",
str(metric_tags_orig - set(metric_tags)), metric)
metric_entry["tags"].update(metric_tags)
metrics.append(metric_entry)
if self.dryrun:
print "Would have sent:\n%s" % json.dumps(metrics,
sort_keys=True,
indent=4)
return
self.pick_connection()
# print "Using server: %s:%s" % (self.host, self.port)
# url = "http://%s:%s/api/put?details" % (self.host, self.port)
# print "Url is %s" % url
LOG.debug("Sending metrics to http://%s:%s/api/put?details",
self.host, self.port)
if self.ssl:
protocol = "https"
else:
protocol = "http"
req = urllib2.Request("%s://%s:%s/api/put?details" % (
protocol, self.host, self.port))
if self.http_username and self.http_password:
req.add_header("Authorization", "Basic %s"
% base64.b64encode("%s:%s" % (self.http_username, self.http_password)))
req.add_header("Content-Type", "application/json")
try:
response = urllib2.urlopen(req, json.dumps(metrics))
LOG.debug("Received response %s", response.getcode())
# clear out the sendq
self.sendq = []
# print "Got response code: %s" % response.getcode()
# print "Content:"
# for line in response:
# print line,
# print
except urllib2.HTTPError, e:
LOG.error("Got error %s", e)
# for line in http_error:
# print line,
def setup_logging(logfile=DEFAULT_LOG, max_bytes=None, backup_count=None):
"""Sets up logging and associated handlers."""
LOG.setLevel(logging.INFO)
if backup_count is not None and max_bytes is not None:
assert backup_count > 0
assert max_bytes > 0
ch = RotatingFileHandler(logfile, 'a', max_bytes, backup_count)
else: # Setup stream handler.
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d] '
'%(levelname)s: %(message)s'))
LOG.addHandler(ch)
def parse_cmdline(argv):
"""Parses the command-line."""
# get arguments
default_cdir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
'collectors')
parser = OptionParser(description='Manages collectors which gather '
'data and report back.')
parser.add_option('-c', '--collector-dir', dest='cdir', metavar='DIR',
default=default_cdir,
help='Directory where the collectors are located.')
parser.add_option('-d', '--dry-run', dest='dryrun', action='store_true',
default=False,
help='Don\'t actually send anything to the TSD, '
'just print the datapoints.')
parser.add_option('-D', '--daemonize', dest='daemonize', action='store_true',
default=False, help='Run as a background daemon.')
parser.add_option('-H', '--host', dest='host', default='localhost',
metavar='HOST',
help='Hostname to use to connect to the TSD.')
parser.add_option('-L', '--hosts-list', dest='hosts', default=False,
metavar='HOSTS',
help='List of host:port to connect to tsd\'s (comma separated).')
parser.add_option('--no-tcollector-stats', dest='no_tcollector_stats',
default=False, action='store_true',
help='Prevent tcollector from reporting its own stats to TSD')
parser.add_option('-s', '--stdin', dest='stdin', action='store_true',
default=False,
help='Run once, read and dedup data points from stdin.')
parser.add_option('-p', '--port', dest='port', type='int',
default=DEFAULT_PORT, metavar='PORT',
help='Port to connect to the TSD instance on. '
'default=%default')
parser.add_option('-v', dest='verbose', action='store_true', default=False,
help='Verbose mode (log debug messages).')
parser.add_option('-t', '--tag', dest='tags', action='append',
default=[], metavar='TAG',
help='Tags to append to all timeseries we send, '
'e.g.: -t TAG=VALUE -t TAG2=VALUE')
parser.add_option('-P', '--pidfile', dest='pidfile',
default='/var/run/tcollector.pid',
metavar='FILE', help='Write our pidfile')
parser.add_option('--dedup-interval', dest='dedupinterval', type='int',
default=300, metavar='DEDUPINTERVAL',
help='Number of seconds in which successive duplicate '
'datapoints are suppressed before sending to the TSD. '
'Use zero to disable. '
'default=%default')
parser.add_option('--evict-interval', dest='evictinterval', type='int',
default=6000, metavar='EVICTINTERVAL',
help='Number of seconds after which to remove cached '
'values of old data points to save memory. '
'default=%default')
parser.add_option('--allowed-inactivity-time', dest='allowed_inactivity_time', type='int',
default=ALLOWED_INACTIVITY_TIME, metavar='ALLOWEDINACTIVITYTIME',
help='How long to wait for datapoints before assuming '
'a collector is dead and restart it. '
'default=%default')
parser.add_option('--remove-inactive-collectors', dest='remove_inactive_collectors', action='store_true',
default=False, help='Remove collectors not sending data '
'in the max allowed inactivity interval')
parser.add_option('--max-bytes', dest='max_bytes', type='int',
default=64 * 1024 * 1024,
help='Maximum bytes per a logfile.')
parser.add_option('--backup-count', dest='backup_count', type='int',
default=0, help='Maximum number of logfiles to backup.')
parser.add_option('--logfile', dest='logfile', type='str',
default=DEFAULT_LOG,
help='Filename where logs are written to.')
parser.add_option('--reconnect-interval',dest='reconnectinterval', type='int',
default=0, metavar='RECONNECTINTERVAL',
help='Number of seconds after which the connection to'
'the TSD hostname reconnects itself. This is useful'
'when the hostname is a multiple A record (RRDNS).'
)
parser.add_option('--max-tags', dest='maxtags', type=int, default=8,
help='The maximum number of tags to send to our TSD Instances')
parser.add_option('--http', dest='http', action='store_true', default=False,
help='Send the data via the http interface')
parser.add_option('--http-username', dest='http_username', default=False,
help='Username to use for HTTP Basic Auth when sending the data via HTTP')
parser.add_option('--http-password', dest='http_password', default=False,
help='Password to use for HTTP Basic Auth when sending the data via HTTP')
parser.add_option('--ssl', dest='ssl', action='store_true', default=False,
help='Enable SSL - used in conjunction with http')
(options, args) = parser.parse_args(args=argv[1:])
if options.dedupinterval < 0:
parser.error('--dedup-interval must be at least 0 seconds')
if options.evictinterval <= options.dedupinterval:
parser.error('--evict-interval must be strictly greater than '
'--dedup-interval')
if options.reconnectinterval < 0:
parser.error('--reconnect-interval must be at least 0 seconds')
# We cannot write to stdout when we're a daemon.
if (options.daemonize or options.max_bytes) and not options.backup_count:
options.backup_count = 1
return (options, args)
def daemonize():
"""Performs the necessary dance to become a background daemon."""
if os.fork():
os._exit(0)
os.chdir("/")
os.umask(022)
os.setsid()
os.umask(0)
if os.fork():
os._exit(0)
stdin = open(os.devnull)
stdout = open(os.devnull, 'w')
os.dup2(stdin.fileno(), 0)
os.dup2(stdout.fileno(), 1)
os.dup2(stdout.fileno(), 2)
stdin.close()
stdout.close()
os.umask(022)
for fd in xrange(3, 1024):
try:
os.close(fd)
except OSError: # This FD wasn't opened...
pass # ... ignore the exception.
def setup_python_path(collector_dir):
"""Sets up PYTHONPATH so that collectors can easily import common code."""
mydir = os.path.dirname(collector_dir)
libdir = os.path.join(mydir, 'collectors', 'lib')
if not os.path.isdir(libdir):
return
pythonpath = os.environ.get('PYTHONPATH', '')
if pythonpath:
pythonpath += ':'
pythonpath += mydir
os.environ['PYTHONPATH'] = pythonpath
LOG.debug('Set PYTHONPATH to %r', pythonpath)
def main(argv):
"""The main tcollector entry point and loop."""
options, args = parse_cmdline(argv)
if options.daemonize:
daemonize()
setup_logging(options.logfile, options.max_bytes or None,
options.backup_count or None)
if options.verbose:
LOG.setLevel(logging.DEBUG) # up our level
if options.pidfile:
write_pid(options.pidfile)
# validate everything
tags = {}
for tag in options.tags:
if re.match('^[-_.a-z0-9]+=\S+$', tag, re.IGNORECASE) is None:
assert False, 'Tag string "%s" is invalid.' % tag
k, v = tag.split('=', 1)
if k in tags:
assert False, 'Tag "%s" already declared.' % k
tags[k] = v
if not 'host' in tags and not options.stdin:
tags['host'] = socket.gethostname()
LOG.warning('Tag "host" not specified, defaulting to %s.', tags['host'])
options.cdir = os.path.realpath(options.cdir)
if not os.path.isdir(options.cdir):
LOG.fatal('No such directory: %s', options.cdir)
return 1
modules = load_etc_dir(options, tags)
setup_python_path(options.cdir)
# gracefully handle death for normal termination paths and abnormal
atexit.register(shutdown)
for sig in (signal.SIGTERM, signal.SIGINT):
signal.signal(sig, shutdown_signal)
# at this point we're ready to start processing, so start the ReaderThread
# so we can have it running and pulling in data for us
reader = ReaderThread(options.dedupinterval, options.evictinterval)
reader.start()
# prepare list of (host, port) of TSDs given on CLI
if not options.hosts:
options.hosts = [(options.host, options.port)]
else:
def splitHost(hostport):
if ":" in hostport:
# Check if we have an IPv6 address.
if hostport[0] == "[" and "]:" in hostport:
host, port = hostport.split("]:")
host = host[1:]
else:
host, port = hostport.split(":")
return (host, int(port))
return (hostport, DEFAULT_PORT)
options.hosts = [splitHost(host) for host in options.hosts.split(",")]
if options.host != "localhost" or options.port != DEFAULT_PORT:
options.hosts.append((options.host, options.port))
# and setup the sender to start writing out to the tsd
sender = SenderThread(reader, options.dryrun, options.hosts,
not options.no_tcollector_stats, tags, options.reconnectinterval,
options.http, options.http_username,
options.http_password, options.ssl, options.maxtags)
sender.start()
LOG.info('SenderThread startup complete')
# if we're in stdin mode, build a stdin collector and just join on the
# reader thread since there's nothing else for us to do here
if options.stdin:
register_collector(StdinCollector())
stdin_loop(options, modules, sender, tags)
else:
sys.stdin.close()
main_loop(options, modules, sender, tags)
# We're exiting, make sure we don't leave any collector behind.
for col in all_living_collectors():
col.shutdown()
LOG.debug('Shutting down -- joining the reader thread.')
reader.join()
LOG.debug('Shutting down -- joining the sender thread.')
sender.join()
def stdin_loop(options, modules, sender, tags):
"""The main loop of the program that runs when we are in stdin mode."""
global ALIVE
next_heartbeat = int(time.time() + 600)
while ALIVE:
time.sleep(15)
reload_changed_config_modules(modules, options, sender, tags)
now = int(time.time())
if now >= next_heartbeat:
LOG.info('Heartbeat (%d collectors running)'
% sum(1 for col in all_living_collectors()))
next_heartbeat = now + 600
def main_loop(options, modules, sender, tags):
"""The main loop of the program that runs when we're not in stdin mode."""
next_heartbeat = int(time.time() + 600)
while ALIVE:
populate_collectors(options.cdir)
reload_changed_config_modules(modules, options, sender, tags)
reap_children()
check_children(options)
spawn_children()
time.sleep(15)
now = int(time.time())
if now >= next_heartbeat:
LOG.info('Heartbeat (%d collectors running)'
% sum(1 for col in all_living_collectors()))
next_heartbeat = now + 600
def list_config_modules(etcdir):
"""Returns an iterator that yields the name of all the config modules."""
if not os.path.isdir(etcdir):
return iter(()) # Empty iterator.
return (name for name in os.listdir(etcdir)
if (name.endswith('.py')
and os.path.isfile(os.path.join(etcdir, name))))
def load_etc_dir(options, tags):
"""Loads any Python module from tcollector's own 'etc' directory.
Returns: A dict of path -> (module, timestamp).
"""
etcdir = os.path.join(options.cdir, 'etc')
sys.path.append(etcdir) # So we can import modules from the etc dir.
modules = {} # path -> (module, timestamp)
for name in list_config_modules(etcdir):
path = os.path.join(etcdir, name)
module = load_config_module(name, options, tags)
modules[path] = (module, os.path.getmtime(path))
return modules
def load_config_module(name, options, tags):
"""Imports the config module of the given name
The 'name' argument can be a string, in which case the module will be
loaded by name, or it can be a module object, in which case the module
will get reloaded.
If the module has an 'onload' function, calls it.
Returns: the reference to the module loaded.
"""
if isinstance(name, str):
LOG.info('Loading %s', name)
d = {}
# Strip the trailing .py
module = __import__(name[:-3], d, d)
else:
module = reload(name)
onload = module.__dict__.get('onload')
if callable(onload):
try:
onload(options, tags)
except:
LOG.fatal('Exception while loading %s', name)
raise
return module
def reload_changed_config_modules(modules, options, sender, tags):
"""Reloads any changed modules from the 'etc' directory.
Args:
cdir: The path to the 'collectors' directory.
modules: A dict of path -> (module, timestamp).
Returns: whether or not anything has changed.
"""
etcdir = os.path.join(options.cdir, 'etc')
current_modules = set(list_config_modules(etcdir))
current_paths = set(os.path.join(etcdir, name)
for name in current_modules)
changed = False
# Reload any module that has changed.
for path, (module, timestamp) in modules.iteritems():
if path not in current_paths: # Module was removed.
continue
mtime = os.path.getmtime(path)
if mtime > timestamp:
LOG.info('Reloading %s, file has changed', path)
module = load_config_module(module, options, tags)
modules[path] = (module, mtime)
changed = True
# Remove any module that has been removed.
for path in set(modules).difference(current_paths):
LOG.info('%s has been removed, tcollector should be restarted', path)
del modules[path]
changed = True
# Check for any modules that may have been added.
for name in current_modules:
path = os.path.join(etcdir, name)
if path not in modules:
module = load_config_module(name, options, tags)
modules[path] = (module, os.path.getmtime(path))
changed = True
return changed
def write_pid(pidfile):
"""Write our pid to a pidfile."""
f = open(pidfile, "w")
try:
f.write(str(os.getpid()))
finally:
f.close()
def all_collectors():
"""Generator to return all collectors."""
return COLLECTORS.itervalues()
# collectors that are not marked dead
def all_valid_collectors():
"""Generator to return all defined collectors that haven't been marked
dead in the past hour, allowing temporarily broken collectors a
chance at redemption."""
now = int(time.time())
for col in all_collectors():
if not col.dead or (now - col.lastspawn > 3600):
yield col
# collectors that have a process attached (currenty alive)
def all_living_collectors():
"""Generator to return all defined collectors that have
an active process."""
for col in all_collectors():
if col.proc is not None:
yield col
def shutdown_signal(signum, frame):
"""Called when we get a signal and need to terminate."""
LOG.warning("shutting down, got signal %d", signum)
shutdown()
def kill(proc, signum=signal.SIGTERM):
os.killpg(proc.pid, signum)
def shutdown():
"""Called by atexit and when we receive a signal, this ensures we properly
terminate any outstanding children."""
global ALIVE
# prevent repeated calls
if not ALIVE:
return
# notify threads of program termination
ALIVE = False
LOG.info('shutting down children')
# tell everyone to die
for col in all_living_collectors():
col.shutdown()
LOG.info('exiting')
sys.exit(1)
def reap_children():
"""When a child process dies, we have to determine why it died and whether
or not we need to restart it. This method manages that logic."""
for col in all_living_collectors():
now = int(time.time())
# FIXME: this is not robust. the asyncproc module joins on the
# reader threads when you wait if that process has died. this can cause
# slow dying processes to hold up the main loop. good for now though.
status = col.proc.poll()
if status is None:
continue
col.proc = None
# behavior based on status. a code 0 is normal termination, code 13
# is used to indicate that we don't want to restart this collector.
# any other status code is an error and is logged.
if status == 13:
LOG.info('removing %s from the list of collectors (by request)',
col.name)
col.dead = True
elif status != 0:
LOG.warning('collector %s terminated after %d seconds with '
'status code %d, marking dead',
col.name, now - col.lastspawn, status)
col.dead = True
else:
register_collector(Collector(col.name, col.interval, col.filename,
col.mtime, col.lastspawn))
def check_children(options):
"""When a child process hasn't received a datapoint in a while,
assume it's died in some fashion and restart it."""
for col in all_living_collectors():
now = int(time.time())
if col.last_datapoint < (now - options.allowed_inactivity_time):
# It's too old, kill it
LOG.warning('Terminating collector %s after %d seconds of inactivity',
col.name, now - col.last_datapoint)
col.shutdown()
if not options.remove_inactive_collectors:
register_collector(Collector(col.name, col.interval, col.filename,
col.mtime, col.lastspawn))
def set_nonblocking(fd):
"""Sets the given file descriptor to non-blocking mode."""
fl = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
def spawn_collector(col):
"""Takes a Collector object and creates a process for it."""
LOG.info('%s (interval=%d) needs to be spawned', col.name, col.interval)
# FIXME: do custom integration of Python scripts into memory/threads
# if re.search('\.py$', col.name) is not None:
# ... load the py module directly instead of using a subprocess ...
try:
col.proc = subprocess.Popen(col.filename, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
preexec_fn=os.setsid)
except OSError, e:
LOG.error('Failed to spawn collector %s: %s' % (col.filename, e))
return
# The following line needs to move below this line because it is used in
# other logic and it makes no sense to update the last spawn time if the
# collector didn't actually start.
col.lastspawn = int(time.time())
set_nonblocking(col.proc.stdout.fileno())
set_nonblocking(col.proc.stderr.fileno())
if col.proc.pid > 0:
col.dead = False
LOG.info('spawned %s (pid=%d)', col.name, col.proc.pid)
return
# FIXME: handle errors better
LOG.error('failed to spawn collector: %s', col.filename)
def spawn_children():
"""Iterates over our defined collectors and performs the logic to
determine if we need to spawn, kill, or otherwise take some
action on them."""
if not ALIVE:
return
for col in all_valid_collectors():
now = int(time.time())
if col.interval == 0:
if col.proc is None:
spawn_collector(col)
elif col.interval <= now - col.lastspawn:
if col.proc is None:
spawn_collector(col)
continue
# I'm not very satisfied with this path. It seems fragile and
# overly complex, maybe we should just reply on the asyncproc
# terminate method, but that would make the main tcollector
# block until it dies... :|
if col.nextkill > now:
continue
if col.killstate == 0:
LOG.warning('warning: %s (interval=%d, pid=%d) overstayed '
'its welcome, SIGTERM sent',
col.name, col.interval, col.proc.pid)
kill(col.proc)
col.nextkill = now + 5
col.killstate = 1
elif col.killstate == 1:
LOG.error('error: %s (interval=%d, pid=%d) still not dead, '
'SIGKILL sent',
col.name, col.interval, col.proc.pid)
kill(col.proc, signal.SIGKILL)
col.nextkill = now + 5
col.killstate = 2
else:
LOG.error('error: %s (interval=%d, pid=%d) needs manual '
'intervention to kill it',
col.name, col.interval, col.proc.pid)
col.nextkill = now + 300
def populate_collectors(coldir):
"""Maintains our internal list of valid collectors. This walks the
collector directory and looks for files. In subsequent calls, this
also looks for changes to the files -- new, removed, or updated files,
and takes the right action to bring the state of our running processes
in line with the filesystem."""
global GENERATION
GENERATION += 1
# get numerics from scriptdir, we're only setup to handle numeric paths
# which define intervals for our monitoring scripts
for interval in os.listdir(coldir):
if not interval.isdigit():
continue
interval = int(interval)
for colname in os.listdir('%s/%d' % (coldir, interval)):
if colname.startswith('.'):
continue
filename = '%s/%d/%s' % (coldir, interval, colname)
if os.path.isfile(filename) and os.access(filename, os.X_OK):
mtime = os.path.getmtime(filename)
# if this collector is already 'known', then check if it's
# been updated (new mtime) so we can kill off the old one
# (but only if it's interval 0, else we'll just get
# it next time it runs)
if colname in COLLECTORS:
col = COLLECTORS[colname]
# if we get a dupe, then ignore the one we're trying to
# add now. there is probably a more robust way of doing
# this...
if col.interval != interval:
LOG.error('two collectors with the same name %s and '
'different intervals %d and %d',
colname, interval, col.interval)
continue
# we have to increase the generation or we will kill
# this script again
col.generation = GENERATION
if col.mtime < mtime:
LOG.info('%s has been updated on disk', col.name)
col.mtime = mtime
if not col.interval:
col.shutdown()
LOG.info('Respawning %s', col.name)
register_collector(Collector(colname, interval,
filename, mtime))
else:
register_collector(Collector(colname, interval, filename,
mtime))
# now iterate over everybody and look for old generations
to_delete = []
for col in all_collectors():
if col.generation < GENERATION:
LOG.info('collector %s removed from the filesystem, forgetting',
col.name)
col.shutdown()
to_delete.append(col.name)
for name in to_delete:
del COLLECTORS[name]
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-3.0
| -8,030,747,697,064,389,000 | 39.957343 | 117 | 0.56716 | false |
louargantb/onectl
|
onectl-plugins/neighbors/communities.py
|
1
|
6515
|
#!/usr/bin/python -u
# Name: fqn.plugin.name
from includes import pluginClass
from includes import regexp
import os
import sys
import re
import subprocess
import inspect
class PluginControl(pluginClass.Base):
def setOptions(self):
''' Create additional argument parser options
specific to the plugin '''
dic = []
### OPTION: set
opt = {}
opt['name'] = '--set'
opt['metavar'] = 'HOST'
opt['action'] = 'store'
opt['nargs'] = '+'
opt['help'] = 'Create communities plugins.'
dic.append(opt)
### OPTION: add
opt = {}
opt['name'] = '--add'
opt['metavar'] = 'HOST'
opt['action'] = 'store'
opt['nargs'] = '+'
opt['help'] = 'Add a new community.'
dic.append(opt)
### OPTION: remove
opt = {}
opt['name'] = '--remove'
opt['metavar'] = 'HOST'
opt['action'] = 'store'
opt['nargs'] = '+'
opt['help'] = 'Remove a community.'
dic.append(opt)
return dic
def info(self):
title = self.PluginName+" configuration"
msg = "Create or remove hosts communities.\n"
msg += "--set COM1 COM2 ... \n"
msg += " : List of communities to be created.\n"
msg += " eg: --set mgnt private \n"
msg += " \n"
msg += "--add COM_NAME : Add community or list of communities to the existing ones.\n"
msg += "--remove COM_NAME : Remove community(ies) plugin(s).\n"
msg += "\n"
msg += " NB: After creating a community plugin, configuration is set via neighbors.conf.communities. plugins.\n"
self.output.help(title, msg)
def inputValidation(self, data):
try:
if not data:
return data
input_list = list(set(data))
input_list = sorted(input_list)
for hostname in input_list:
if len(hostname) > 255:
return False
pattern=re.compile(regexp.HOSTNAME, re.VERBOSE | re.IGNORECASE)
if pattern.match(hostname) is None:
raise ValueError('Invalid hostname %s' %hostname)
except:
self.printError("Validation failure for "+self.PluginName+" : ")
return None
return input_list
def get_active(self):
''' Get configured communities '''
file = inspect.getfile(self.__class__)
base = re.sub('/plugins.*', '', file)
path = os.path.join(base, "plugins/neighbors/conf/communities")
communities = []
if os.path.exists(path):
communities = os.listdir(path)
communities = sorted(communities)
return communities
def get(self):
try:
''' Print list of communities '''
# Get the configured communities
communities = self.get_active()
self.output.title("Configured communities:")
self.output.info(' '.join(communities))
except:
self.printError("Getting "+self.PluginName+" : ")
return 1
return 0
def set(self, data):
''' Create communities and remove all existing ones'''
try:
#neighbors.neighbors.communities -set COMM1 COMM2 COMM3
# validate and transform input to list
in_data = self.validate_input_data(data)
# find the existing aliases on the system
com_config = self.get_active()
for acom in com_config:
if acom not in in_data:
self.removePlugin("neighbors.conf.communities."+acom+".members")
self.removePlugin("neighbors.conf.communities."+acom+".ssh")
for community in in_data:
self.createPlugin("neighbors/community_members.py", "neighbors.conf.communities." + community + ".members")
self.createPlugin("neighbors/community_ssh.py", "neighbors.conf.communities." + community + ".ssh")
self.output.info("Created plugins for communities: " + ' '.join(in_data))
except:
self.printError("Set failure for "+self.PluginName+" : ")
return 1
return 0
def check(self):
''' Overwrite the check function.Needed for view diff.Check agains Onboot setup
\n are removed from result from get function '''
data_list = self.get_active()
view_output = ' '.join(data_list)
self._check(info_get=view_output)
def add(self, data=''):
''' Add new community plugins to already existing config
Takes the current config changes it and calls the set function
Return 0 for OK and 1 for error
'''
try:
input_data = self.validate_input_data(data)
toadd = input_data
plugin = self.PluginFqn
curr_config = self.get_current_config(self.PluginFqn)
#if the plugin is called for the first time no info saves load the current config
is_db_config = True
if not curr_config:
is_db_config = False
curr_config = self.get_active()
for item in list(toadd):
# if item is already in the current config remove it from list for adding
if item in curr_config:
toadd = list(filter(lambda curr: curr!= item, toadd))
# if list for elements to be added is empty and the db is exit
#if db is emtry save the current config
if not toadd and is_db_config:
self.output.info("Value(s) " + ' '.join(input_data) + " for plugin " +self.PluginName + " already configured")
return 0;
# add the new elements to the current config
curr_config.extend(toadd)
# set new values
res = self.set(curr_config)
# if set was ok
if res == 0:
self.output.info("Added plugins " + ' '.join(toadd))
except:
self.printError("Adding "+self.PluginName+" " + ' '.join(toadd)+": ")
return 1
return 0
def remove(self, data=''):
''' Delete community plugins
Return 0 for OK and 1 for error
'''
plugin = self.PluginFqn
try:
# Check input and transform it to a list
# remove duplicate values
input_data = self.validate_input_data(data)
todel = input_data
#Get the configured servers
current_config = self.get_current_config(self.PluginFqn)
if not current_config:
self.output.info("No " + self.PluginName + "(s) configured to be deleted")
return 0
bEntryRemoved=False
for entry in todel:
# if the entry is in current config remove it
if entry in current_config:
# iterate through the current config and remove the entry from command
current_config = list(filter(lambda curr: curr!= entry, current_config))
bEntryRemoved=True
# if no entries were removed show a message and exit
if not bEntryRemoved:
self.output.info("Value(s) " + ' '.join(input_data) + " for plugin " + self.PluginName + " is(are) not configured.")
return 0
res = self.set(current_config)
if res == 0:
self.output.info("Deleted plugins " + ' '.join(todel))
except:
self.printError("Removing "+self.PluginName+" " + ' '.join(todel)+": ")
return 1
return 0
|
gpl-2.0
| 183,517,812,310,186,270 | 28.748858 | 121 | 0.650038 | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/virtual_network_gateway_connection_list_entity_py3.py
|
1
|
7889
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_09_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_09_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, virtual_network_gateway1, connection_type, id: str=None, location: str=None, tags=None, authorization_key: str=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight: int=None, shared_key: str=None, peer=None, enable_bgp: bool=None, use_policy_based_traffic_selectors: bool=None, ipsec_policies=None, resource_guid: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.use_policy_based_traffic_selectors = use_policy_based_traffic_selectors
self.ipsec_policies = ipsec_policies
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
|
mit
| -7,734,023,908,964,083,000 | 50.901316 | 412 | 0.675624 | false |
lcc755/WorkTerm1
|
3IfStatements/1Sounds/Code/playNote.py
|
1
|
1297
|
from nanpy import (ArduinoApi, SerialManager, Tone)
from time import sleep
#Connect to Arduino. Automatically finds serial port.
connection = SerialManager()
speaker = 14 #AO on Arduino
tone = Tone(speakerpin, connection) #Setting "tone" to mean using the "speaker" pin on the "connection". See tone library in Nanpy
a = ArduinoApi(connection)
buttonC = 5
buttonD = 6
buttonE = 7
#Definitions for notes are in tone library in Nanpy, can easily change names to simply "c", "d", etc. if needed
c = Tone.NOTE_C6
d = Tone.NOTE_D6
e = Tone.NOTE_E6
a.pinMode(speaker, a.OUTPUT) #Setup speaker pin
a.pinMode(buttonC, a.INPUT) #Setup buttonC
a.pinMode(buttonD, a.INPUT) #Setup buttonD
a.pinMode(buttonE, a.INPUT) #Setup buttonE
while True:
bc = a.digitalRead(buttonC) #Reading at buttonC
bd = a.digitalRead(buttonD) #Reading at buttonD
be = a.digitalRead(buttonE) #Reading at buttonE
#If buttonC is pressed:
if (bc == 1):
tone.play(c, 1) #Play note c
#If buttonD is pressed:
elif (bd == 1):
tone.play(d, 1) #Play note d
#If buttonE is pressed:
elif (be == 1):
tone.play(e, 1) #Play note e
tone.stop( #Stop note
|
apache-2.0
| 5,221,883,863,522,235,000 | 29.880952 | 135 | 0.624518 | false |
Instanssi/Instanssi.org
|
Instanssi/store/utils/paytrail.py
|
1
|
1289
|
# -*- coding: utf-8 -*-
import hashlib
import requests
class PaytrailException(Exception):
pass
def validate_failure(order_no, timestamp, authcode, secret):
m = hashlib.md5()
m.update('{}|{}|{}'.format(order_no, timestamp, secret).encode('UTF-8'))
return authcode == m.hexdigest().upper()
def validate_success(order_no, timestamp, paid, method, authcode, secret):
m = hashlib.md5()
m.update('{}|{}|{}|{}|{}'.format(order_no, timestamp, paid, method, secret).encode('UTF-8'))
return authcode == m.hexdigest().upper()
def request(rid, secret, data):
req = requests.post(
'https://payment.paytrail.com/api-payment/create',
auth=(rid, secret),
json=data,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Verkkomaksut-Api-Version': '1',
})
# Send request, receive response
message = req.json()
# Paytrail responded with error
if req.status_code == 401:
raise PaytrailException(message['errorMessage'], message['errorCode'])
# No response from paytrail (other error)
if req.status_code != 201:
raise PaytrailException('HTTP request failure.', req.status_code)
# Return parsed JSON
return message
|
mit
| 5,314,636,159,189,205,000 | 27.021739 | 96 | 0.626067 | false |
miracle2k/stgit
|
stgit/stack.py
|
1
|
40808
|
"""Basic quilt-like functionality
"""
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, re
from email.Utils import formatdate
from stgit.exception import *
from stgit.utils import *
from stgit.out import *
from stgit.run import *
from stgit import git, basedir, templates
from stgit.config import config
from shutil import copyfile
from stgit.lib import git as libgit, stackupgrade
# stack exception class
class StackException(StgException):
pass
class FilterUntil:
def __init__(self):
self.should_print = True
def __call__(self, x, until_test, prefix):
if until_test(x):
self.should_print = False
if self.should_print:
return x[0:len(prefix)] != prefix
return False
#
# Functions
#
__comment_prefix = 'STG:'
__patch_prefix = 'STG_PATCH:'
def __clean_comments(f):
"""Removes lines marked for status in a commit file
"""
f.seek(0)
# remove status-prefixed lines
lines = f.readlines()
patch_filter = FilterUntil()
until_test = lambda t: t == (__patch_prefix + '\n')
lines = [l for l in lines if patch_filter(l, until_test, __comment_prefix)]
# remove empty lines at the end
while len(lines) != 0 and lines[-1] == '\n':
del lines[-1]
f.seek(0); f.truncate()
f.writelines(lines)
# TODO: move this out of the stgit.stack module, it is really for
# higher level commands to handle the user interaction
def edit_file(series, line, comment, show_patch = True):
fname = '.stgitmsg.txt'
tmpl = templates.get_template('patchdescr.tmpl')
f = file(fname, 'w+')
if line:
print >> f, line
elif tmpl:
print >> f, tmpl,
else:
print >> f
print >> f, __comment_prefix, comment
print >> f, __comment_prefix, \
'Lines prefixed with "%s" will be automatically removed.' \
% __comment_prefix
print >> f, __comment_prefix, \
'Trailing empty lines will be automatically removed.'
if show_patch:
print >> f, __patch_prefix
# series.get_patch(series.get_current()).get_top()
diff_str = git.diff(rev1 = series.get_patch(series.get_current()).get_bottom())
f.write(diff_str)
#Vim modeline must be near the end.
print >> f, __comment_prefix, 'vi: set textwidth=75 filetype=diff nobackup:'
f.close()
call_editor(fname)
f = file(fname, 'r+')
__clean_comments(f)
f.seek(0)
result = f.read()
f.close()
os.remove(fname)
return result
#
# Classes
#
class StgitObject:
"""An object with stgit-like properties stored as files in a directory
"""
def _set_dir(self, dir):
self.__dir = dir
def _dir(self):
return self.__dir
def create_empty_field(self, name):
create_empty_file(os.path.join(self.__dir, name))
def _get_field(self, name, multiline = False):
id_file = os.path.join(self.__dir, name)
if os.path.isfile(id_file):
line = read_string(id_file, multiline)
if line == '':
return None
else:
return line
else:
return None
def _set_field(self, name, value, multiline = False):
fname = os.path.join(self.__dir, name)
if value and value != '':
write_string(fname, value, multiline)
elif os.path.isfile(fname):
os.remove(fname)
class Patch(StgitObject):
"""Basic patch implementation
"""
def __init_refs(self):
self.__top_ref = self.__refs_base + '/' + self.__name
self.__log_ref = self.__top_ref + '.log'
def __init__(self, name, series_dir, refs_base):
self.__series_dir = series_dir
self.__name = name
self._set_dir(os.path.join(self.__series_dir, self.__name))
self.__refs_base = refs_base
self.__init_refs()
def create(self):
os.mkdir(self._dir())
def delete(self, keep_log = False):
if os.path.isdir(self._dir()):
for f in os.listdir(self._dir()):
os.remove(os.path.join(self._dir(), f))
os.rmdir(self._dir())
else:
out.warn('Patch directory "%s" does not exist' % self._dir())
try:
# the reference might not exist if the repository was corrupted
git.delete_ref(self.__top_ref)
except git.GitException, e:
out.warn(str(e))
if not keep_log and git.ref_exists(self.__log_ref):
git.delete_ref(self.__log_ref)
def get_name(self):
return self.__name
def rename(self, newname):
olddir = self._dir()
old_top_ref = self.__top_ref
old_log_ref = self.__log_ref
self.__name = newname
self._set_dir(os.path.join(self.__series_dir, self.__name))
self.__init_refs()
git.rename_ref(old_top_ref, self.__top_ref)
if git.ref_exists(old_log_ref):
git.rename_ref(old_log_ref, self.__log_ref)
os.rename(olddir, self._dir())
def __update_top_ref(self, ref):
git.set_ref(self.__top_ref, ref)
self._set_field('top', ref)
self._set_field('bottom', git.get_commit(ref).get_parent())
def __update_log_ref(self, ref):
git.set_ref(self.__log_ref, ref)
def get_old_bottom(self):
return git.get_commit(self.get_old_top()).get_parent()
def get_bottom(self):
return git.get_commit(self.get_top()).get_parent()
def get_old_top(self):
return self._get_field('top.old')
def get_top(self):
return git.rev_parse(self.__top_ref)
def set_top(self, value, backup = False):
if backup:
curr_top = self.get_top()
self._set_field('top.old', curr_top)
self._set_field('bottom.old', git.get_commit(curr_top).get_parent())
self.__update_top_ref(value)
def restore_old_boundaries(self):
top = self._get_field('top.old')
if top:
self.__update_top_ref(top)
return True
else:
return False
def get_description(self):
return self._get_field('description', True)
def set_description(self, line):
self._set_field('description', line, True)
def get_authname(self):
return self._get_field('authname')
def set_authname(self, name):
self._set_field('authname', name or git.author().name)
def get_authemail(self):
return self._get_field('authemail')
def set_authemail(self, email):
self._set_field('authemail', email or git.author().email)
def get_authdate(self):
date = self._get_field('authdate')
if not date:
return date
if re.match('[0-9]+\s+[+-][0-9]+', date):
# Unix time (seconds) + time zone
secs_tz = date.split()
date = formatdate(int(secs_tz[0]))[:-5] + secs_tz[1]
return date
def set_authdate(self, date):
self._set_field('authdate', date or git.author().date)
def get_commname(self):
return self._get_field('commname')
def set_commname(self, name):
self._set_field('commname', name or git.committer().name)
def get_commemail(self):
return self._get_field('commemail')
def set_commemail(self, email):
self._set_field('commemail', email or git.committer().email)
def get_log(self):
return self._get_field('log')
def set_log(self, value, backup = False):
self._set_field('log', value)
self.__update_log_ref(value)
class PatchSet(StgitObject):
def __init__(self, name = None):
try:
if name:
self.set_name (name)
else:
self.set_name (git.get_head_file())
self.__base_dir = basedir.get()
except git.GitException, ex:
raise StackException, 'GIT tree not initialised: %s' % ex
self._set_dir(os.path.join(self.__base_dir, 'patches', self.get_name()))
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def _basedir(self):
return self.__base_dir
def get_head(self):
"""Return the head of the branch
"""
crt = self.get_current_patch()
if crt:
return crt.get_top()
else:
return self.get_base()
def get_protected(self):
return os.path.isfile(os.path.join(self._dir(), 'protected'))
def protect(self):
protect_file = os.path.join(self._dir(), 'protected')
if not os.path.isfile(protect_file):
create_empty_file(protect_file)
def unprotect(self):
protect_file = os.path.join(self._dir(), 'protected')
if os.path.isfile(protect_file):
os.remove(protect_file)
def __branch_descr(self):
return 'branch.%s.description' % self.get_name()
def get_description(self):
return config.get(self.__branch_descr()) or ''
def set_description(self, line):
if line:
config.set(self.__branch_descr(), line)
else:
config.unset(self.__branch_descr())
def head_top_equal(self):
"""Return true if the head and the top are the same
"""
crt = self.get_current_patch()
if not crt:
# we don't care, no patches applied
return True
return git.get_head() == crt.get_top()
def is_initialised(self):
"""Checks if series is already initialised
"""
return config.get(stackupgrade.format_version_key(self.get_name())
) != None
def shortlog(patches):
log = ''.join(Run('git', 'log', '--pretty=short',
p.get_top(), '^%s' % p.get_bottom()).raw_output()
for p in patches)
return Run('git', 'shortlog').raw_input(log).raw_output()
class Series(PatchSet):
"""Class including the operations on series
"""
def __init__(self, name = None):
"""Takes a series name as the parameter.
"""
PatchSet.__init__(self, name)
# Update the branch to the latest format version if it is
# initialized, but don't touch it if it isn't.
stackupgrade.update_to_current_format_version(
libgit.Repository.default(), self.get_name())
self.__refs_base = 'refs/patches/%s' % self.get_name()
self.__applied_file = os.path.join(self._dir(), 'applied')
self.__unapplied_file = os.path.join(self._dir(), 'unapplied')
self.__hidden_file = os.path.join(self._dir(), 'hidden')
# where this series keeps its patches
self.__patch_dir = os.path.join(self._dir(), 'patches')
# trash directory
self.__trash_dir = os.path.join(self._dir(), 'trash')
def __patch_name_valid(self, name):
"""Raise an exception if the patch name is not valid.
"""
if not name or re.search('[^\w.-]', name):
raise StackException, 'Invalid patch name: "%s"' % name
def get_patch(self, name):
"""Return a Patch object for the given name
"""
return Patch(name, self.__patch_dir, self.__refs_base)
def get_current_patch(self):
"""Return a Patch object representing the topmost patch, or
None if there is no such patch."""
crt = self.get_current()
if not crt:
return None
return self.get_patch(crt)
def get_current(self):
"""Return the name of the topmost patch, or None if there is
no such patch."""
try:
applied = self.get_applied()
except StackException:
# No "applied" file: branch is not initialized.
return None
try:
return applied[-1]
except IndexError:
# No patches applied.
return None
def get_applied(self):
if not os.path.isfile(self.__applied_file):
raise StackException, 'Branch "%s" not initialised' % self.get_name()
return read_strings(self.__applied_file)
def set_applied(self, applied):
write_strings(self.__applied_file, applied)
def get_unapplied(self):
if not os.path.isfile(self.__unapplied_file):
raise StackException, 'Branch "%s" not initialised' % self.get_name()
return read_strings(self.__unapplied_file)
def set_unapplied(self, unapplied):
write_strings(self.__unapplied_file, unapplied)
def get_hidden(self):
if not os.path.isfile(self.__hidden_file):
return []
return read_strings(self.__hidden_file)
def get_base(self):
# Return the parent of the bottommost patch, if there is one.
if os.path.isfile(self.__applied_file):
bottommost = file(self.__applied_file).readline().strip()
if bottommost:
return self.get_patch(bottommost).get_bottom()
# No bottommost patch, so just return HEAD
return git.get_head()
def get_parent_remote(self):
value = config.get('branch.%s.remote' % self.get_name())
if value:
return value
elif 'origin' in git.remotes_list():
out.note(('No parent remote declared for stack "%s",'
' defaulting to "origin".' % self.get_name()),
('Consider setting "branch.%s.remote" and'
' "branch.%s.merge" with "git config".'
% (self.get_name(), self.get_name())))
return 'origin'
else:
raise StackException, 'Cannot find a parent remote for "%s"' % self.get_name()
def __set_parent_remote(self, remote):
value = config.set('branch.%s.remote' % self.get_name(), remote)
def get_parent_branch(self):
value = config.get('branch.%s.stgit.parentbranch' % self.get_name())
if value:
return value
elif git.rev_parse('heads/origin'):
out.note(('No parent branch declared for stack "%s",'
' defaulting to "heads/origin".' % self.get_name()),
('Consider setting "branch.%s.stgit.parentbranch"'
' with "git config".' % self.get_name()))
return 'heads/origin'
else:
raise StackException, 'Cannot find a parent branch for "%s"' % self.get_name()
def __set_parent_branch(self, name):
if config.get('branch.%s.remote' % self.get_name()):
# Never set merge if remote is not set to avoid
# possibly-erroneous lookups into 'origin'
config.set('branch.%s.merge' % self.get_name(), name)
config.set('branch.%s.stgit.parentbranch' % self.get_name(), name)
def set_parent(self, remote, localbranch):
if localbranch:
if remote:
self.__set_parent_remote(remote)
self.__set_parent_branch(localbranch)
# We'll enforce this later
# else:
# raise StackException, 'Parent branch (%s) should be specified for %s' % localbranch, self.get_name()
def __patch_is_current(self, patch):
return patch.get_name() == self.get_current()
def patch_applied(self, name):
"""Return true if the patch exists in the applied list
"""
return name in self.get_applied()
def patch_unapplied(self, name):
"""Return true if the patch exists in the unapplied list
"""
return name in self.get_unapplied()
def patch_hidden(self, name):
"""Return true if the patch is hidden.
"""
return name in self.get_hidden()
def patch_exists(self, name):
"""Return true if there is a patch with the given name, false
otherwise."""
return self.patch_applied(name) or self.patch_unapplied(name) \
or self.patch_hidden(name)
def init(self, create_at=False, parent_remote=None, parent_branch=None):
"""Initialises the stgit series
"""
if self.is_initialised():
raise StackException, '%s already initialized' % self.get_name()
for d in [self._dir()]:
if os.path.exists(d):
raise StackException, '%s already exists' % d
if (create_at!=False):
git.create_branch(self.get_name(), create_at)
os.makedirs(self.__patch_dir)
self.set_parent(parent_remote, parent_branch)
self.create_empty_field('applied')
self.create_empty_field('unapplied')
config.set(stackupgrade.format_version_key(self.get_name()),
str(stackupgrade.FORMAT_VERSION))
def rename(self, to_name):
"""Renames a series
"""
to_stack = Series(to_name)
if to_stack.is_initialised():
raise StackException, '"%s" already exists' % to_stack.get_name()
patches = self.get_applied() + self.get_unapplied()
git.rename_branch(self.get_name(), to_name)
for patch in patches:
git.rename_ref('refs/patches/%s/%s' % (self.get_name(), patch),
'refs/patches/%s/%s' % (to_name, patch))
git.rename_ref('refs/patches/%s/%s.log' % (self.get_name(), patch),
'refs/patches/%s/%s.log' % (to_name, patch))
if os.path.isdir(self._dir()):
rename(os.path.join(self._basedir(), 'patches'),
self.get_name(), to_stack.get_name())
# Rename the config section
for k in ['branch.%s', 'branch.%s.stgit']:
config.rename_section(k % self.get_name(), k % to_name)
self.__init__(to_name)
def clone(self, target_series):
"""Clones a series
"""
try:
# allow cloning of branches not under StGIT control
base = self.get_base()
except:
base = git.get_head()
Series(target_series).init(create_at = base)
new_series = Series(target_series)
# generate an artificial description file
new_series.set_description('clone of "%s"' % self.get_name())
# clone self's entire series as unapplied patches
try:
# allow cloning of branches not under StGIT control
applied = self.get_applied()
unapplied = self.get_unapplied()
patches = applied + unapplied
patches.reverse()
except:
patches = applied = unapplied = []
for p in patches:
patch = self.get_patch(p)
newpatch = new_series.new_patch(p, message = patch.get_description(),
can_edit = False, unapplied = True,
bottom = patch.get_bottom(),
top = patch.get_top(),
author_name = patch.get_authname(),
author_email = patch.get_authemail(),
author_date = patch.get_authdate())
if patch.get_log():
out.info('Setting log to %s' % patch.get_log())
newpatch.set_log(patch.get_log())
else:
out.info('No log for %s' % p)
# fast forward the cloned series to self's top
new_series.forward_patches(applied)
# Clone parent informations
value = config.get('branch.%s.remote' % self.get_name())
if value:
config.set('branch.%s.remote' % target_series, value)
value = config.get('branch.%s.merge' % self.get_name())
if value:
config.set('branch.%s.merge' % target_series, value)
value = config.get('branch.%s.stgit.parentbranch' % self.get_name())
if value:
config.set('branch.%s.stgit.parentbranch' % target_series, value)
def delete(self, force = False):
"""Deletes an stgit series
"""
if self.is_initialised():
patches = self.get_unapplied() + self.get_applied() + \
self.get_hidden();
if not force and patches:
raise StackException, \
'Cannot delete: the series still contains patches'
for p in patches:
self.get_patch(p).delete()
# remove the trash directory if any
if os.path.exists(self.__trash_dir):
for fname in os.listdir(self.__trash_dir):
os.remove(os.path.join(self.__trash_dir, fname))
os.rmdir(self.__trash_dir)
# FIXME: find a way to get rid of those manual removals
# (move functionality to StgitObject ?)
if os.path.exists(self.__applied_file):
os.remove(self.__applied_file)
if os.path.exists(self.__unapplied_file):
os.remove(self.__unapplied_file)
if os.path.exists(self.__hidden_file):
os.remove(self.__hidden_file)
if os.path.exists(self._dir()+'/orig-base'):
os.remove(self._dir()+'/orig-base')
if not os.listdir(self.__patch_dir):
os.rmdir(self.__patch_dir)
else:
out.warn('Patch directory %s is not empty' % self.__patch_dir)
try:
os.removedirs(self._dir())
except OSError:
raise StackException('Series directory %s is not empty'
% self._dir())
try:
git.delete_branch(self.get_name())
except git.GitException:
out.warn('Could not delete branch "%s"' % self.get_name())
config.remove_section('branch.%s' % self.get_name())
config.remove_section('branch.%s.stgit' % self.get_name())
def refresh_patch(self, files = None, message = None, edit = False,
empty = False,
show_patch = False,
cache_update = True,
author_name = None, author_email = None,
author_date = None,
committer_name = None, committer_email = None,
backup = True, sign_str = None, log = 'refresh',
notes = None, bottom = None):
"""Generates a new commit for the topmost patch
"""
patch = self.get_current_patch()
if not patch:
raise StackException, 'No patches applied'
descr = patch.get_description()
if not (message or descr):
edit = True
descr = ''
elif message:
descr = message
# TODO: move this out of the stgit.stack module, it is really
# for higher level commands to handle the user interaction
if not message and edit:
descr = edit_file(self, descr.rstrip(), \
'Please edit the description for patch "%s" ' \
'above.' % patch.get_name(), show_patch)
if not author_name:
author_name = patch.get_authname()
if not author_email:
author_email = patch.get_authemail()
if not committer_name:
committer_name = patch.get_commname()
if not committer_email:
committer_email = patch.get_commemail()
descr = add_sign_line(descr, sign_str, committer_name, committer_email)
if not bottom:
bottom = patch.get_bottom()
if empty:
tree_id = git.get_commit(bottom).get_tree()
else:
tree_id = None
commit_id = git.commit(files = files,
message = descr, parents = [bottom],
cache_update = cache_update,
tree_id = tree_id,
set_head = True,
allowempty = True,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
patch.set_top(commit_id, backup = backup)
patch.set_description(descr)
patch.set_authname(author_name)
patch.set_authemail(author_email)
patch.set_authdate(author_date)
patch.set_commname(committer_name)
patch.set_commemail(committer_email)
if log:
self.log_patch(patch, log, notes)
return commit_id
def new_patch(self, name, message = None, can_edit = True,
unapplied = False, show_patch = False,
top = None, bottom = None, commit = True,
author_name = None, author_email = None, author_date = None,
committer_name = None, committer_email = None,
before_existing = False, sign_str = None):
"""Creates a new patch, either pointing to an existing commit object,
or by creating a new commit object.
"""
assert commit or (top and bottom)
assert not before_existing or (top and bottom)
assert not (commit and before_existing)
assert (top and bottom) or (not top and not bottom)
assert commit or (not top or (bottom == git.get_commit(top).get_parent()))
if name != None:
self.__patch_name_valid(name)
if self.patch_exists(name):
raise StackException, 'Patch "%s" already exists' % name
# TODO: move this out of the stgit.stack module, it is really
# for higher level commands to handle the user interaction
def sign(msg):
return add_sign_line(msg, sign_str,
committer_name or git.committer().name,
committer_email or git.committer().email)
if not message and can_edit:
descr = edit_file(
self, sign(''),
'Please enter the description for the patch above.',
show_patch)
else:
descr = sign(message)
head = git.get_head()
if name == None:
name = make_patch_name(descr, self.patch_exists)
patch = self.get_patch(name)
patch.create()
patch.set_description(descr)
patch.set_authname(author_name)
patch.set_authemail(author_email)
patch.set_authdate(author_date)
patch.set_commname(committer_name)
patch.set_commemail(committer_email)
if before_existing:
insert_string(self.__applied_file, patch.get_name())
elif unapplied:
patches = [patch.get_name()] + self.get_unapplied()
write_strings(self.__unapplied_file, patches)
set_head = False
else:
append_string(self.__applied_file, patch.get_name())
set_head = True
if commit:
if top:
top_commit = git.get_commit(top)
else:
bottom = head
top_commit = git.get_commit(head)
# create a commit for the patch (may be empty if top == bottom);
# only commit on top of the current branch
assert(unapplied or bottom == head)
commit_id = git.commit(message = descr, parents = [bottom],
cache_update = False,
tree_id = top_commit.get_tree(),
allowempty = True, set_head = set_head,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
# set the patch top to the new commit
patch.set_top(commit_id)
else:
patch.set_top(top)
self.log_patch(patch, 'new')
return patch
def delete_patch(self, name, keep_log = False):
"""Deletes a patch
"""
self.__patch_name_valid(name)
patch = self.get_patch(name)
if self.__patch_is_current(patch):
self.pop_patch(name)
elif self.patch_applied(name):
raise StackException, 'Cannot remove an applied patch, "%s", ' \
'which is not current' % name
elif not name in self.get_unapplied():
raise StackException, 'Unknown patch "%s"' % name
# save the commit id to a trash file
write_string(os.path.join(self.__trash_dir, name), patch.get_top())
patch.delete(keep_log = keep_log)
unapplied = self.get_unapplied()
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
def forward_patches(self, names):
"""Try to fast-forward an array of patches.
On return, patches in names[0:returned_value] have been pushed on the
stack. Apply the rest with push_patch
"""
unapplied = self.get_unapplied()
forwarded = 0
top = git.get_head()
for name in names:
assert(name in unapplied)
patch = self.get_patch(name)
head = top
bottom = patch.get_bottom()
top = patch.get_top()
# top != bottom always since we have a commit for each patch
if head == bottom:
# reset the backup information. No logging since the
# patch hasn't changed
patch.set_top(top, backup = True)
else:
head_tree = git.get_commit(head).get_tree()
bottom_tree = git.get_commit(bottom).get_tree()
if head_tree == bottom_tree:
# We must just reparent this patch and create a new commit
# for it
descr = patch.get_description()
author_name = patch.get_authname()
author_email = patch.get_authemail()
author_date = patch.get_authdate()
committer_name = patch.get_commname()
committer_email = patch.get_commemail()
top_tree = git.get_commit(top).get_tree()
top = git.commit(message = descr, parents = [head],
cache_update = False,
tree_id = top_tree,
allowempty = True,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
patch.set_top(top, backup = True)
self.log_patch(patch, 'push(f)')
else:
top = head
# stop the fast-forwarding, must do a real merge
break
forwarded+=1
unapplied.remove(name)
if forwarded == 0:
return 0
git.switch(top)
append_strings(self.__applied_file, names[0:forwarded])
write_strings(self.__unapplied_file, unapplied)
return forwarded
def merged_patches(self, names):
"""Test which patches were merged upstream by reverse-applying
them in reverse order. The function returns the list of
patches detected to have been applied. The state of the tree
is restored to the original one
"""
patches = [self.get_patch(name) for name in names]
patches.reverse()
merged = []
for p in patches:
if git.apply_diff(p.get_top(), p.get_bottom()):
merged.append(p.get_name())
merged.reverse()
git.reset()
return merged
def push_empty_patch(self, name):
"""Pushes an empty patch on the stack
"""
unapplied = self.get_unapplied()
assert(name in unapplied)
# patch = self.get_patch(name)
head = git.get_head()
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
self.refresh_patch(bottom = head, cache_update = False, log = 'push(m)')
def push_patch(self, name):
"""Pushes a patch on the stack
"""
unapplied = self.get_unapplied()
assert(name in unapplied)
patch = self.get_patch(name)
head = git.get_head()
bottom = patch.get_bottom()
top = patch.get_top()
# top != bottom always since we have a commit for each patch
if head == bottom:
# A fast-forward push. Just reset the backup
# information. No need for logging
patch.set_top(top, backup = True)
git.switch(top)
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
return False
# Need to create a new commit an merge in the old patch
ex = None
modified = False
# Try the fast applying first. If this fails, fall back to the
# three-way merge
if not git.apply_diff(bottom, top):
# if git.apply_diff() fails, the patch requires a diff3
# merge and can be reported as modified
modified = True
# merge can fail but the patch needs to be pushed
try:
git.merge_recursive(bottom, head, top)
except git.GitException, ex:
out.error('The merge failed during "push".',
'Revert the operation with "stg undo".')
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
if not ex:
# if the merge was OK and no conflicts, just refresh the patch
# The GIT cache was already updated by the merge operation
if modified:
log = 'push(m)'
else:
log = 'push'
self.refresh_patch(bottom = head, cache_update = False, log = log)
else:
# we make the patch empty, with the merged state in the
# working tree.
self.refresh_patch(bottom = head, cache_update = False,
empty = True, log = 'push(c)')
raise StackException, str(ex)
return modified
def pop_patch(self, name, keep = False):
"""Pops the top patch from the stack
"""
applied = self.get_applied()
applied.reverse()
assert(name in applied)
patch = self.get_patch(name)
if git.get_head_file() == self.get_name():
if keep and not git.apply_diff(git.get_head(), patch.get_bottom(),
check_index = False):
raise StackException(
'Failed to pop patches while preserving the local changes')
git.switch(patch.get_bottom(), keep)
else:
git.set_branch(self.get_name(), patch.get_bottom())
# save the new applied list
idx = applied.index(name) + 1
popped = applied[:idx]
popped.reverse()
unapplied = popped + self.get_unapplied()
write_strings(self.__unapplied_file, unapplied)
del applied[:idx]
applied.reverse()
write_strings(self.__applied_file, applied)
def empty_patch(self, name):
"""Returns True if the patch is empty
"""
self.__patch_name_valid(name)
patch = self.get_patch(name)
bottom = patch.get_bottom()
top = patch.get_top()
if bottom == top:
return True
elif git.get_commit(top).get_tree() \
== git.get_commit(bottom).get_tree():
return True
return False
def rename_patch(self, oldname, newname):
self.__patch_name_valid(newname)
applied = self.get_applied()
unapplied = self.get_unapplied()
if oldname == newname:
raise StackException, '"To" name and "from" name are the same'
if newname in applied or newname in unapplied:
raise StackException, 'Patch "%s" already exists' % newname
if oldname in unapplied:
self.get_patch(oldname).rename(newname)
unapplied[unapplied.index(oldname)] = newname
write_strings(self.__unapplied_file, unapplied)
elif oldname in applied:
self.get_patch(oldname).rename(newname)
applied[applied.index(oldname)] = newname
write_strings(self.__applied_file, applied)
else:
raise StackException, 'Unknown patch "%s"' % oldname
def log_patch(self, patch, message, notes = None):
"""Generate a log commit for a patch
"""
top = git.get_commit(patch.get_top())
old_log = patch.get_log()
if message is None:
# replace the current log entry
if not old_log:
raise StackException, \
'No log entry to annotate for patch "%s"' \
% patch.get_name()
replace = True
log_commit = git.get_commit(old_log)
msg = log_commit.get_log().split('\n')[0]
log_parent = log_commit.get_parent()
if log_parent:
parents = [log_parent]
else:
parents = []
else:
# generate a new log entry
replace = False
msg = '%s\t%s' % (message, top.get_id_hash())
if old_log:
parents = [old_log]
else:
parents = []
if notes:
msg += '\n\n' + notes
log = git.commit(message = msg, parents = parents,
cache_update = False, tree_id = top.get_tree(),
allowempty = True)
patch.set_log(log)
def hide_patch(self, name):
"""Add the patch to the hidden list.
"""
unapplied = self.get_unapplied()
if name not in unapplied:
# keep the checking order for backward compatibility with
# the old hidden patches functionality
if self.patch_applied(name):
raise StackException, 'Cannot hide applied patch "%s"' % name
elif self.patch_hidden(name):
raise StackException, 'Patch "%s" already hidden' % name
else:
raise StackException, 'Unknown patch "%s"' % name
if not self.patch_hidden(name):
# check needed for backward compatibility with the old
# hidden patches functionality
append_string(self.__hidden_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
def unhide_patch(self, name):
"""Remove the patch from the hidden list.
"""
hidden = self.get_hidden()
if not name in hidden:
if self.patch_applied(name) or self.patch_unapplied(name):
raise StackException, 'Patch "%s" not hidden' % name
else:
raise StackException, 'Unknown patch "%s"' % name
hidden.remove(name)
write_strings(self.__hidden_file, hidden)
if not self.patch_applied(name) and not self.patch_unapplied(name):
# check needed for backward compatibility with the old
# hidden patches functionality
append_string(self.__unapplied_file, name)
|
gpl-2.0
| 5,813,231,224,454,919,000 | 33.819113 | 114 | 0.545824 | false |
ActiveState/code
|
recipes/Python/212565_copickle__module_serialize_code/recipe-212565.py
|
1
|
1544
|
""" Create portable serialized representations of Python <code> Objects"""
import new
import pickle
def co_dumps(s):
"""pickles a code object,arg s is the string with code
returns the code object pickled as a string"""
co = compile(s,'<string>','exec')
co_tup=[co.co_argcount,co.co_nlocals, co.co_stacksize,co.co_flags,
co.co_code,co.co_consts,co.co_names,co.co_varnames,co.co_filename,
co.co_name,co.co_firstlineno,co.co_lnotab]
return pickle.dumps(co_tup)
def co_dumpf(s,f):
"""similar to co_dumps() but instead of returning the string, writes
the pickled object to an opened file(f) to be retrieved with co_loadf()"""
co = compile(s,'<string>','exec')
co_tup=[co.co_argcount,co.co_nlocals, co.co_stacksize,co.co_flags,
co.co_code,co.co_consts,co.co_names,co.co_varnames,co.co_filename,
co.co_name,co.co_firstlineno,co.co_lnotab]
pickle.dump(co_tup,f)
def co_loads(s):
"""loads a code object pickled with co_dumps()
return a code object ready for exec()"""
r = pickle.loads(s)
return new.code(r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7],r[8],r[9],r[10],r[11])
def co_loadf(f):
"""loads a code object from a file"""
r = pickle.load(f)
return new.code(r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7],r[8],r[9],r[10],r[11])
def test():
string_with_code = 'print "hello co_pickle"'
pickled_code_object = co_dumps(string_with_code)
#print code_object
recovered_code_object = co_loads(pickled_code_object)
exec(recovered_code_object)
|
mit
| -668,232,421,904,555,500 | 34.090909 | 82 | 0.649611 | false |
wkschwartz/django
|
tests/update/tests.py
|
1
|
11045
|
import unittest
from django.core.exceptions import FieldError
from django.db import IntegrityError, connection, transaction
from django.db.models import CharField, Count, F, IntegerField, Max
from django.db.models.functions import Abs, Concat, Lower
from django.test import TestCase
from django.test.utils import register_lookup
from .models import (
A, B, Bar, D, DataPoint, Foo, RelatedPoint, UniqueNumber,
UniqueNumberChild,
)
class SimpleTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = A.objects.create()
cls.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=cls.a1)
D.objects.create(a=cls.a1)
def test_nonempty_update(self):
"""
Update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_foreign_key_update_with_id(self):
"""
Update works using <field>_id for foreign keys
"""
num_updated = self.a1.d_set.update(a_id=self.a2)
self.assertEqual(num_updated, 20)
self.assertEqual(self.a2.d_set.count(), 20)
class AdvancedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.d0 = DataPoint.objects.create(name="d0", value="apple")
cls.d2 = DataPoint.objects.create(name="d2", value="banana")
cls.d3 = DataPoint.objects.create(name="d3", value="banana")
cls.r1 = RelatedPoint.objects.create(name="r1", data=cls.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value='banana').update(value='pineapple')
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
msg = 'Cannot update a query once a slice has been taken.'
with self.assertRaisesMessage(AssertionError, msg):
method(another_value='another thing')
def test_update_respects_to_field(self):
"""
Update of an FK field which specifies a to_field works.
"""
a_foo = Foo.objects.create(target='aaa')
b_foo = Foo.objects.create(target='bbb')
bar = Bar.objects.create(foo=a_foo)
self.assertEqual(bar.foo_id, a_foo.target)
bar_qs = Bar.objects.filter(pk=bar.pk)
self.assertEqual(bar_qs[0].foo_id, a_foo.target)
bar_qs.update(foo=b_foo)
self.assertEqual(bar_qs[0].foo_id, b_foo.target)
def test_update_m2m_field(self):
msg = (
'Cannot update model field '
'<django.db.models.fields.related.ManyToManyField: m2m_foo> '
'(only non-relations and foreign keys permitted).'
)
with self.assertRaisesMessage(FieldError, msg):
Bar.objects.update(m2m_foo='whatever')
def test_update_transformed_field(self):
A.objects.create(x=5)
A.objects.create(x=-6)
with register_lookup(IntegerField, Abs):
A.objects.update(x=F('x__abs'))
self.assertCountEqual(A.objects.values_list('x', flat=True), [5, 6])
def test_update_annotated_queryset(self):
"""
Update of a queryset that's been annotated.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value='foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(alias=F('value')).filter(alias='apple')
self.assertEqual(qs.update(another_value='foo'), 1)
# Update where annotation is used in update parameters
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value=F('alias')), 3)
# Update where aggregation annotation is used in update parameters
qs = DataPoint.objects.annotate(max=Max('value'))
msg = (
'Aggregate functions are not allowed in this query '
'(another_value=Max(Col(update_datapoint, update.DataPoint.value))).'
)
with self.assertRaisesMessage(FieldError, msg):
qs.update(another_value=F('max'))
def test_update_annotated_multi_table_queryset(self):
"""
Update of a queryset that's been annotated and involves multiple tables.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.update(value='Foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.filter(related_count=1).update(value='Foo'), 1)
# Update where aggregation annotation is used in update parameters
qs = RelatedPoint.objects.annotate(max=Max('data__value'))
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
qs.update(name=F('max'))
def test_update_with_joined_field_annotation(self):
msg = 'Joined field references are not permitted in this query'
with register_lookup(CharField, Lower):
for annotation in (
F('data__name'),
F('data__name__lower'),
Lower('data__name'),
Concat('data__name', 'data__value'),
):
with self.subTest(annotation=annotation):
with self.assertRaisesMessage(FieldError, msg):
RelatedPoint.objects.annotate(
new_name=annotation,
).update(name=F('new_name'))
@unittest.skipUnless(
connection.vendor == 'mysql',
'UPDATE...ORDER BY syntax is supported on MySQL/MariaDB',
)
class MySQLUpdateOrderByTest(TestCase):
"""Update field with a unique constraint using an ordered queryset."""
@classmethod
def setUpTestData(cls):
UniqueNumber.objects.create(number=1)
UniqueNumber.objects.create(number=2)
def test_order_by_update_on_unique_constraint(self):
tests = [
('-number', 'id'),
(F('number').desc(), 'id'),
(F('number') * -1, 'id'),
]
for ordering in tests:
with self.subTest(ordering=ordering), transaction.atomic():
updated = UniqueNumber.objects.order_by(*ordering).update(
number=F('number') + 1,
)
self.assertEqual(updated, 2)
def test_order_by_update_on_unique_constraint_annotation(self):
# Ordering by annotations is omitted because they cannot be resolved in
# .update().
with self.assertRaises(IntegrityError):
UniqueNumber.objects.annotate(
number_inverse=F('number').desc(),
).order_by('number_inverse').update(
number=F('number') + 1,
)
def test_order_by_update_on_parent_unique_constraint(self):
# Ordering by inherited fields is omitted because joined fields cannot
# be used in the ORDER BY clause.
UniqueNumberChild.objects.create(number=3)
UniqueNumberChild.objects.create(number=4)
with self.assertRaises(IntegrityError):
UniqueNumberChild.objects.order_by('number').update(
number=F('number') + 1,
)
def test_order_by_update_on_related_field(self):
# Ordering by related fields is omitted because joined fields cannot be
# used in the ORDER BY clause.
data = DataPoint.objects.create(name='d0', value='apple')
related = RelatedPoint.objects.create(name='r0', data=data)
with self.assertNumQueries(1) as ctx:
updated = RelatedPoint.objects.order_by('data__name').update(name='new')
sql = ctx.captured_queries[0]['sql']
self.assertNotIn('ORDER BY', sql)
self.assertEqual(updated, 1)
related.refresh_from_db()
self.assertEqual(related.name, 'new')
|
bsd-3-clause
| 3,419,285,657,485,618,700 | 38.873646 | 84 | 0.616569 | false |
cackharot/fbeazt
|
src/foodbeazt/resources/store_order_report.py
|
1
|
2259
|
from datetime import datetime
from dateutil import parser as dtparser, tz
from bson import ObjectId, json_util
from flask import g, request
from flask_restful import Resource
from service.StoreOrderService import StoreOrderService
from service.StoreService import StoreService
from foodbeazt.fapp import mongo, store_admin_permission
import logging
class StoreOrderReportApi(Resource):
def __init__(self):
self.log = logging.getLogger(__name__)
self.service = StoreOrderService(mongo.db)
self.storeService = StoreService(mongo.db)
self.report_map = {
'day_orders': self.service.generate_report,
'order_trends': self.service.order_trend
}
def get(self, store_id):
if store_id == '' or store_id == '-1':
return dict(status="error", message="store_id is required"), 446
if not store_admin_permission.can():
return dict(status="error", message="Unauthorized! You cannot view store order reports!"), 403
report_type = request.args.get('report_type', 'day_orders')
if report_type in self.report_map:
return self.exec_report(report_type, store_id)
return []
def exec_report(self, report_type, store_id):
result = {}
try:
today = datetime.now()
tenant_id = g.user.tenant_id
today_iso = today.isoformat()
start_date = None
end_date = None
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
if 'start_date' in request.args:
start_date = dtparser.parse(request.args.get('start_date'),ignoretz=True)
start_date = start_date.replace(tzinfo=from_zone).astimezone(to_zone).date()
if 'end_date' in request.args:
end_date = dtparser.parse(request.args.get('end_date'),ignoretz=True)
end_date = end_date.replace(tzinfo=from_zone).astimezone(to_zone).date()
result = self.report_map[report_type](tenant_id, store_id, start_date, end_date)
except Exception as e:
self.log.exception(e)
return dict(status="error", message="Error while generating store order reports"), 447
return result
|
apache-2.0
| 4,817,031,853,769,750,000 | 40.833333 | 106 | 0.626826 | false |
zhiyu-he/algorithm-trip
|
growth/oj/leet_code/algorithms/122-best-time-to-buy-and-sell-stock-ii.py
|
1
|
1289
|
# -*- coding: utf-8 -*-
'''
TIPS:
1. operation (buy, sell)
2. for each (buy, sell), we find the longest incr sequence
3. but the most important question why longest incr sequence is work?
'''
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if prices is None or len(prices) == 0:
return 0
profit = 0
longest_incr = None
buy_in = prices[0]
i = 1
while i < len(prices):
if prices[i] > prices[i-1]:
longest_incr = prices[i]
i += 1
else:
_profit = longest_incr - buy_in if longest_incr else 0
if _profit > 0:
profit += _profit
buy_in = prices[i]
longest_incr = None
i += 1
if longest_incr and longest_incr - buy_in > 0:
profit += longest_incr - buy_in
return profit
if __name__ == '__main__':
obj = Solution()
print obj.maxProfit(None)
print obj.maxProfit([])
print obj.maxProfit([3,3])
print obj.maxProfit([1,2,3,3,3,4,5])
print obj.maxProfit([7,1,5,3,6,4])
print obj.maxProfit([1,2,3,4,5])
print obj.maxProfit([7,6,4,3,1])
print obj.maxProfit([1,5,3,6])
print obj.maxProfit([1,2,3,6])
|
gpl-2.0
| 8,956,034,449,981,051,000 | 25.306122 | 73 | 0.534523 | false |
ltucker/radarpost
|
radarpost/tests/test_feeds.py
|
1
|
6884
|
from helpers import *
def test_feed_basic():
"""
test_feed_basic
create a mailbox
subscribe to a feed
update the subscription
assert expected items are in the mailbox
"""
from radarpost.feed import FeedSubscription, update_feed_subscription, parse, AtomEntry
from radarpost.mailbox import Message
# create a random feed
ff, entries = random_feed_info_and_entries(10)
feed_doc = create_atom_feed(ff, entries)
url = ff['url']
# create a mailbox
mb = create_test_mailbox()
# make sure there are no items in the mailbox
count = 0
for r in mb.view(Message.by_timestamp, group=False):
count += r.value
assert count == 0
# subscribe to our feed and update the subscription
sub = FeedSubscription(url=url)
sub.store(mb)
feed = parse(feed_doc, url)
update_feed_subscription(mb, sub, feed)
# check that each item in the random feed is in the
# mailbox and only items from the random feed are in there.
seen_ids = []
for ni in AtomEntry.view(mb, Message.by_timestamp, include_docs=True, reduce=False):
seen_ids.append(ni.entry_id)
expected_ids = set([e['id'] for e in entries])
assert len(seen_ids) == len(entries)
for iid in seen_ids:
assert iid in expected_ids
def test_feed_update():
"""
create a mailbox
subscribe to a feed
update the subscription
assert expected items are in the mailbox
add new items to feed
update subscription
assert expected items are in the mailbox
assert that old items are not repeated
"""
from radarpost.feed import FeedSubscription, update_feed_subscription, parse, AtomEntry
from radarpost.mailbox import Message
# create two versions of a random feed.
# the second version with additional items.
ff, entries = random_feed_info_and_entries(20)
url = ff['url']
ff1 = dict(ff)
ff2 = dict(ff)
entries1 = entries[10:] # last 10 only
entries2 = entries # all entries
ff1['timestamp'] = entries2[0]['timestamp']
feed_doc1 = create_atom_feed(ff1, entries1)
feed_doc2 = create_atom_feed(ff2, entries2)
# create a mailbox
mb = create_test_mailbox()
# make sure there are no items in the mailbox
count = 0
for r in mb.view(Message.by_timestamp, group=False):
count += r.value
assert count == 0
# subscribe to our feed and update the subscription
sub = FeedSubscription(url=url)
sub.store(mb)
# update with the first feed (first 10 items only)
feed = parse(feed_doc1, url)
update_feed_subscription(mb, sub, feed)
# check that each item in the feed is in the
# mailbox and only items from the feed are in there.
seen_ids = []
for ni in AtomEntry.view(mb, Message.by_timestamp,
include_docs=True, reduce=False):
seen_ids.append(ni.entry_id)
expected_ids = set([e['id'] for e in entries1])
assert len(seen_ids) == len(entries1)
for iid in seen_ids:
assert iid in expected_ids
# now update with the whole set of items
feed = parse(feed_doc2, url)
update_feed_subscription(mb, sub, feed)
# check that all items are now in the feed is in the
# mailbox and only items from the feed are in there
# and they're there exactly once.
seen_ids = []
for ni in AtomEntry.view(mb, Message.by_timestamp,
include_docs=True, reduce=False):
seen_ids.append(ni.entry_id)
expected_ids = set([e['id'] for e in entries2])
assert len(seen_ids) == len(entries2)
for iid in seen_ids:
assert iid in expected_ids
def test_feed_delete_sticks():
"""
make sure that an item deleted from a mailbox does not
reappear if it still exists in the source feed.
"""
from radarpost.feed import FeedSubscription, update_feed_subscription, parse, AtomEntry
from radarpost.mailbox import Message
# create two versions of a random feed.
# the second version with additional items.
ff, entries = random_feed_info_and_entries(20)
url = ff['url']
ff1 = dict(ff)
ff2 = dict(ff)
entries1 = entries[10:] # last 10 only
entries2 = entries # all entries
ff1['timestamp'] = entries2[0]['timestamp']
feed_doc1 = create_atom_feed(ff1, entries1)
feed_doc2 = create_atom_feed(ff2, entries2)
# create a mailbox
mb = create_test_mailbox()
# make sure there are no items in the mailbox
count = 0
for r in mb.view(Message.by_timestamp, group=False):
count += r.value
assert count == 0
# subscribe to our feed and update the subscription
sub = FeedSubscription(url=url)
sub.store(mb)
# update with the first feed (first 10 items only)
feed = parse(feed_doc1, url)
update_feed_subscription(mb, sub, feed)
# check that each item in the feed is in the
# mailbox and only items from the feed are in there.
seen_ids = []
news_items = []
for ni in AtomEntry.view(mb, Message.by_timestamp,
include_docs=True, reduce=False):
seen_ids.append(ni.entry_id)
news_items.append(ni)
expected_ids = set([e['id'] for e in entries1])
assert len(seen_ids) == len(entries1)
for iid in seen_ids:
assert iid in expected_ids
# delete one of the items
killed_item = news_items[0]
del mb[killed_item.id]
assert killed_item.id not in mb
# update with the same info
update_feed_subscription(mb, sub, feed)
# item should not have reappeared
assert killed_item.id not in mb
# now update with the whole set of items
feed = parse(feed_doc2, url)
update_feed_subscription(mb, sub, feed)
# item should not have reappeared.
assert killed_item.id not in mb
# check that all other expected items are now in the feed is in the
# mailbox and only items from the feed are in there
# and they're there exactly once.
seen_ids = []
for ni in AtomEntry.view(mb, Message.by_timestamp,
include_docs=True, reduce=False):
seen_ids.append(ni.entry_id)
expected_ids = set([e['id'] for e in entries2])
expected_ids.remove(killed_item.entry_id)
assert len(seen_ids) == len(expected_ids)
for iid in seen_ids:
assert iid in expected_ids
def test_feeds_design_doc():
"""
tests that the feeds design document is
added to mailboxes.
"""
# create a mailbox
mb = create_test_mailbox()
from radarpost.feed import FeedSubscription
url = 'http://example.com/feed.xml'
sub = FeedSubscription(url=url)
sub.store(mb)
# lookup by url
for ss in mb.view(FeedSubscription.by_url, startkey=url, endkey=url):
assert ss.id == sub.id
|
gpl-2.0
| 4,724,863,857,700,947,000 | 30.295455 | 91 | 0.641488 | false |
nelsyeung/half-metals-analysis
|
lib/compounds/five_elements.py
|
1
|
2465
|
""" Class for generate 5 elements compounds """
import os
import sys
import inspect
baseLibDir = os.path.join(os.path.realpath(os.path.dirname(
inspect.getfile(inspect.currentframe()))), '..')
sys.path.append(baseLibDir)
import nmod
from compound import Compound
class FiveElements(Compound):
""" Five elements compound child class """
def __init__(self, jobsDir, elements, potFile, alat):
numElements = len(elements.split())
if numElements != 5:
print('Expecting 5 elements, but ' + numElements
+ ' (' + elements + ') were inputted.')
nmod.nexit()
Compound.__init__(self, jobsDir, elements, potFile, alat)
def generateConcentrations(self, num, **kwargs):
""" Generate the required permutations of concentrations """
if self.potFile == 'sc_5_elements_b2':
a, b, c, d, e = 1.0, 0.5, 0.0, 0.5, 0.0
step = b / (num - 1)
precision = len(str(step).split('.')[1])
conc = [None]*5
conc[0] = nmod.float2str(precision, a)
for i in range(0, num * num):
x, y = i % num, int(i / num)
conc[1] = nmod.float2str(precision, b - x * step)
conc[2] = nmod.float2str(precision, c + x * step)
conc[3] = nmod.float2str(precision, d - y * step)
conc[4] = nmod.float2str(precision, e + y * step)
self.create(conc[0] + '_' + conc[1] + '_' + conc[2]
+ '_' + conc[3] + '_' + conc[4], **kwargs)
elif self.potFile == 'fcc_5_elements_l21':
a, b, c, d, e = 1.0, 1.0, 0.0, 1.0, 0.0
step = b / (num - 1)
precision = len(str(step).split('.')[1])
conc = [None]*5
conc[0] = nmod.float2str(precision, a)
for i in range(0, num * num):
x, y = i % num, int(i / num)
conc[1] = nmod.float2str(precision, b - x * step)
conc[2] = nmod.float2str(precision, c + x * step)
conc[3] = nmod.float2str(precision, d - y * step)
conc[4] = nmod.float2str(precision, e + y * step)
self.create(conc[0] + '_' + conc[1] + '_' + conc[2]
+ '_' + conc[3] + '_' + conc[4], **kwargs)
else:
print(self.potFile + ' has not yet been implemented.')
nmod.nexit()
|
mit
| 3,695,346,537,015,509,500 | 41.5 | 70 | 0.494118 | false |
wangjiezhe/FetchNovels
|
novel/sources/tianna.py
|
1
|
1167
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.parse import urljoin
from pyquery import PyQuery
from novel import serial, utils
BASE_URL = 'http://www.tianna.la/tnw{}/'
class Tianna(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#content',
intro_sel='#intro',
tid=tid)
def get_title_and_author(self):
name = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:novel:book_name'
).attr('content')
author = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:novel:author'
).attr('content')
return name, author
@property
def chapter_list(self):
clist = self.doc('#list')('dt:eq(1)').next_all('dd').filter(
lambda i, e: PyQuery(e)('a').attr('href')
).map(
lambda i, e: (i,
urljoin(utils.get_base_url(self.url),
PyQuery(e)('a').attr('href')),
PyQuery(e).text())
)
return clist
|
gpl-3.0
| 3,946,425,903,942,329,300 | 27.463415 | 76 | 0.502999 | false |
tarasane/h2o-3
|
h2o-py/tests/h2o_pyunit.py
|
1
|
1766
|
import urllib2
import sys
sys.path.insert(1, "..")
import h2o
from tests import utils
"""
Here is some testing infrastructure for running the pyunit tests in conjunction with run.py.
run.py issues an ip and port as a string: "<ip>:<port>".
The expected value of sys_args[1] is "<ip>:<port>"
All tests MUST have the following structure:
import sys
sys.path.insert(1, "..") # may vary depending on this test's position relative to h2o-py
import h2o, tests
def my_test(ip=None, port=None):
...test filling...
if __name__ == "__main__":
tests.run_test(sys.argv, my_test)
So each test must have an ip and port
"""
def run_test(sys_args, test_to_run):
# import pkg_resources
# ver = pkg_resources.get_distribution("h2o").version
# print "H2O PYTHON PACKAGE VERSION: " + str(ver)
ip, port = sys_args[2].split(":")
h2o.init(ip,port,strict_version_check=False)
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST: "+str(h2o.ou()))
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
num_keys = h2o.store_size()
try:
if len(sys_args) > 3 and sys_args[3] == "--ipynb": utils.ipy_notebook_exec(sys_args[4],save_and_norun=False)
else: test_to_run()
finally:
h2o.remove_all()
if h2o.keys_leaked(num_keys): print "Leaked Keys!"
# HDFS helpers
def get_h2o_internal_hdfs_name_node():
return "172.16.2.176"
def is_running_internal_to_h2o():
url = "http://{0}:50070".format(get_h2o_internal_hdfs_name_node())
try:
urllib2.urlopen(urllib2.Request(url))
internal = True
except:
internal = False
return internal
|
apache-2.0
| -1,662,615,443,775,424,500 | 29.448276 | 116 | 0.601359 | false |
tsl143/addons-server
|
src/olympia/api/tests/test_jwt_auth.py
|
1
|
6575
|
from datetime import datetime, timedelta
from django.conf import settings
import jwt
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_jwt.settings import api_settings
from olympia.amo.tests import TestCase
from olympia.api import jwt_auth
from olympia.api.models import APIKey, SYMMETRIC_JWT_TYPE
from olympia.users.models import UserProfile
class JWTAuthKeyTester(TestCase):
def create_api_key(self, user, key='some-user-key', is_active=True,
secret='some-shared-secret', **kw):
return APIKey.objects.create(type=SYMMETRIC_JWT_TYPE,
user=user, key=key, secret=secret,
is_active=is_active, **kw)
def auth_token_payload(self, user, issuer):
"""Creates a JWT payload as a client would."""
issued_at = datetime.utcnow()
return {
# The JWT issuer must match the 'key' field of APIKey
'iss': issuer,
'iat': issued_at,
'exp': issued_at + timedelta(
seconds=settings.MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME)
}
def encode_token_payload(self, payload, secret):
"""Encodes a JWT payload as a client would."""
token = jwt.encode(payload, secret, api_settings.JWT_ALGORITHM)
return token.decode('utf-8')
def create_auth_token(self, user, issuer, secret):
payload = self.auth_token_payload(user, issuer)
return self.encode_token_payload(payload, secret)
class TestJWTKeyAuthDecodeHandler(JWTAuthKeyTester):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestJWTKeyAuthDecodeHandler, self).setUp()
self.user = UserProfile.objects.get(email='del@icio.us')
def test_report_unknown_issuer(self):
token = self.create_auth_token(self.user, 'non-existant-issuer',
'some-secret')
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert ctx.exception.detail == 'Unknown JWT iss (issuer).'
def test_report_token_without_issuer(self):
payload = self.auth_token_payload(self.user, 'some-issuer')
del payload['iss']
token = self.encode_token_payload(payload, 'some-secret')
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert ctx.exception.detail == 'JWT iss (issuer) claim is missing.'
def test_decode_garbage_token(self):
with self.assertRaises(jwt.DecodeError) as ctx:
jwt_auth.jwt_decode_handler('}}garbage{{')
assert str(ctx.exception) == 'Not enough segments'
def test_decode_invalid_non_ascii_token(self):
with self.assertRaises(jwt.DecodeError) as ctx:
jwt_auth.jwt_decode_handler(u'Ivan Krsti\u0107')
assert str(ctx.exception) == 'Not enough segments'
def test_incorrect_signature(self):
api_key = self.create_api_key(self.user)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
decoy_api_key = self.create_api_key(
self.user, key='another-issuer', secret='another-secret')
with self.assertRaises(jwt.DecodeError) as ctx:
jwt_auth.jwt_decode_handler(
token, get_api_key=lambda **k: decoy_api_key)
assert str(ctx.exception) == 'Signature verification failed'
def test_expired_token(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['exp'] = (datetime.utcnow() -
timedelta(seconds=10))
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(jwt.ExpiredSignatureError):
jwt_auth.jwt_decode_handler(token)
def test_missing_issued_at_time(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
del payload['iat']
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert (ctx.exception.detail ==
'Invalid JWT: Token is missing the "iat" claim.')
def test_invalid_issued_at_time(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
# Simulate clock skew...
payload['iat'] = (
datetime.utcnow() +
timedelta(seconds=settings.JWT_AUTH['JWT_LEEWAY'] + 10))
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert ctx.exception.detail.startswith(
'JWT iat (issued at time) is invalid.')
def test_invalid_issued_at_time_not_number(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
# Simulate clock skew...
payload['iat'] = 'thisisnotanumber'
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert ctx.exception.detail.startswith(
'JWT iat (issued at time) is invalid.')
def test_missing_expiration(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
del payload['exp']
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert (ctx.exception.detail ==
'Invalid JWT: Token is missing the "exp" claim.')
def test_disallow_long_expirations(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['exp'] = (
datetime.utcnow() +
timedelta(seconds=settings.MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME) +
timedelta(seconds=1)
)
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
jwt_auth.jwt_decode_handler(token)
assert ctx.exception.detail == 'JWT exp (expiration) is too long.'
|
bsd-3-clause
| 715,969,105,728,098,300 | 37.450292 | 76 | 0.630266 | false |
anthonysandrin/kafka-utils
|
kafka_utils/kafka_cluster_manager/cluster_info/topic.py
|
1
|
1887
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This class contains information for a topic object.
Useful as part of reassignment project when deciding upon moving
partitions of same topic over different brokers.
"""
import logging
class Topic(object):
"""Information of a topic object.
:params
id: Name of the given topic
replication_factor: replication factor of a given topic
partitions: List of Partition objects
"""
def __init__(self, id, replication_factor=0, partitions=None):
self._id = id
self._replication_factor = replication_factor
self._partitions = partitions or set([])
self.log = logging.getLogger(self.__class__.__name__)
@property
def id(self):
return self._id
@property
def replication_factor(self):
return self._replication_factor
@property
def partitions(self):
return self._partitions
@property
def weight(self):
return sum(
partition.weight * partition.replication_factor
for partition in self._partitions
)
def add_partition(self, partition):
self._partitions.add(partition)
def __str__(self):
return "{0}".format(self._id)
def __repr__(self):
return "{0}".format(self)
|
apache-2.0
| 2,210,050,034,550,098,700 | 28.484375 | 74 | 0.659247 | false |
jrg365/gpytorch
|
gpytorch/utils/pivoted_cholesky.py
|
1
|
3313
|
#!/usr/bin/env python3
import torch
from .. import settings
def pivoted_cholesky(matrix, max_iter, error_tol=None):
from ..lazy import lazify, LazyTensor
batch_shape = matrix.shape[:-2]
matrix_shape = matrix.shape[-2:]
if error_tol is None:
error_tol = settings.preconditioner_tolerance.value()
# Need to get diagonals. This is easy if it's a LazyTensor, since
# LazyTensor.diag() operates in batch mode.
matrix = lazify(matrix)
matrix_diag = matrix._approx_diag()
# Make sure max_iter isn't bigger than the matrix
max_iter = min(max_iter, matrix_shape[-1])
# What we're returning
L = torch.zeros(*batch_shape, max_iter, matrix_shape[-1], dtype=matrix.dtype, device=matrix.device)
orig_error = torch.max(matrix_diag, dim=-1)[0]
errors = torch.norm(matrix_diag, 1, dim=-1) / orig_error
# The permutation
permutation = torch.arange(0, matrix_shape[-1], dtype=torch.long, device=matrix_diag.device)
permutation = permutation.repeat(*batch_shape, 1)
# Get batch indices
batch_iters = [
torch.arange(0, size, dtype=torch.long, device=matrix_diag.device)
.unsqueeze_(-1)
.repeat(torch.Size(batch_shape[:i]).numel(), torch.Size(batch_shape[i + 1 :]).numel())
.view(-1)
for i, size in enumerate(batch_shape)
]
m = 0
while (m == 0) or (m < max_iter and torch.max(errors) > error_tol):
permuted_diags = torch.gather(matrix_diag, -1, permutation[..., m:])
max_diag_values, max_diag_indices = torch.max(permuted_diags, -1)
max_diag_indices = max_diag_indices + m
# Swap pi_m and pi_i in each row, where pi_i is the element of the permutation
# corresponding to the max diagonal element
old_pi_m = permutation[..., m].clone()
permutation[..., m].copy_(permutation.gather(-1, max_diag_indices.unsqueeze(-1)).squeeze_(-1))
permutation.scatter_(-1, max_diag_indices.unsqueeze(-1), old_pi_m.unsqueeze(-1))
pi_m = permutation[..., m].contiguous()
L_m = L[..., m, :] # Will be all zeros -- should we use torch.zeros?
L_m.scatter_(-1, pi_m.unsqueeze(-1), max_diag_values.sqrt().unsqueeze_(-1))
row = matrix[(*batch_iters, pi_m.view(-1), slice(None, None, None))]
if isinstance(row, LazyTensor):
row = row.evaluate()
row = row.view(*batch_shape, matrix_shape[-1])
if m + 1 < matrix_shape[-1]:
pi_i = permutation[..., m + 1 :].contiguous()
L_m_new = row.gather(-1, pi_i)
if m > 0:
L_prev = L[..., :m, :].gather(-1, pi_i.unsqueeze(-2).repeat(*(1 for _ in batch_shape), m, 1))
update = L[..., :m, :].gather(-1, pi_m.view(*pi_m.shape, 1, 1).repeat(*(1 for _ in batch_shape), m, 1))
L_m_new -= torch.sum(update * L_prev, dim=-2)
L_m_new /= L_m.gather(-1, pi_m.unsqueeze(-1))
L_m.scatter_(-1, pi_i, L_m_new)
matrix_diag_current = matrix_diag.gather(-1, pi_i)
matrix_diag.scatter_(-1, pi_i, matrix_diag_current - L_m_new ** 2)
L[..., m, :] = L_m
errors = torch.norm(matrix_diag.gather(-1, pi_i), 1, dim=-1) / orig_error
m = m + 1
return L[..., :m, :].transpose(-1, -2).contiguous()
|
mit
| -3,026,770,446,488,931,300 | 38.440476 | 119 | 0.582554 | false |
snapcore/snapcraft
|
tests/unit/sources/test_checksum.py
|
1
|
2784
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import os
import zipfile
from testtools.matchers import Equals
from snapcraft.internal.sources import errors
from snapcraft.internal.sources._checksum import verify_checksum
from tests import unit
class TestChecksum(unit.TestCase):
def setUp(self):
super().setUp()
def test_invalid_checksum(self):
# Create a file for testing
os.makedirs(os.path.join("src"))
dummy_file = os.path.join("src", "test")
open(dummy_file, "w").close()
self.assertRaises(AttributeError, verify_checksum, "456/abcde", dummy_file)
def test_correct_checksum(self):
# Create zip file for testing
os.makedirs(os.path.join("src"))
file_to_zip = os.path.join("src", "test.txt")
open(file_to_zip, "w").close()
zip_file = zipfile.ZipFile(os.path.join("src", "test.zip"), "w")
zip_file.write(file_to_zip)
zip_file.close()
calculated_checksum = hashlib.new(
"md5", open(os.path.join("src", "test.zip"), "rb").read()
)
calculated_checksum = calculated_checksum.hexdigest()
verify_checksum("md5/" + calculated_checksum, "src/test.zip")
def test_incorrect_checksum(self):
# Create zip file for testing
os.makedirs(os.path.join("src"))
file_to_zip = os.path.join("src", "test.txt")
open(file_to_zip, "w").close()
zip_file = zipfile.ZipFile(os.path.join("src", "test.zip"), "w")
zip_file.write(file_to_zip)
zip_file.close()
incorrect_checksum = "fe049cfba688aa1af88bc78191d7f904"
calculated_checksum = hashlib.new(
"md5", open(os.path.join("src", "test.zip"), "rb").read()
)
calculated_checksum = calculated_checksum.hexdigest()
raised = self.assertRaises(
errors.DigestDoesNotMatchError,
verify_checksum,
"md5/" + incorrect_checksum,
"src/test.zip",
)
self.assertThat(raised.expected, Equals(incorrect_checksum))
self.assertThat(raised.calculated, Equals(calculated_checksum))
|
gpl-3.0
| 8,382,165,105,227,043,000 | 33.8 | 83 | 0.650862 | false |
nomad-mystic/nomadmystic
|
fileSystem/school-projects/development/softwaredesignandcomputerlogiccis122/cis122lab2/python/Lab2.py
|
1
|
2465
|
# file = Lab2.py
# programmer = Keith Murphy
# date created = 1-15-2014
# last mod = 1-25-2014
#
# input_list:
# width_1, length_1, width_2, length_2
# output_list:
# rectangle_1_area, rectangle_2_area, average_area_of_rectangles
#
# Variables
# Declare real width_1
# Declare real length_1
# Declare real width_2
# Declare real length_2
# Declare real rectangle_1_area
# Declare real rectangle_2_area
# Declare real average_area_of_rectangles
#
# Display 'Welcome Message'
#
# This is for the first rectangle:
# Display 'Please enter the width of you first rectangle: '
# Input Real width_1
# Display 'Please enter the length of you first rectangle: '
# Input Real length_1
#
# This is for the second rectangle:
# Display 'What is the width of you second rectangle '
# Input Real width_2
# Display 'What is the length of the second rectangle '
# Input Real length_2
#
# Set rectangle_1_area = width_1 * length_1
# Set rectangle_2_area = width_2 * length_2
# Set average_area_of_rectangles = (rectangle_1_area + rectangle_2_area) / 2
#
# Display 'This is the area of your first rectangle = ', rectangle_1_area, 'and this is the area of your second
# rectangle = ', 'Your area for the second rectangle is ', rectangle_2_area
# Display 'The average of the two rectangles is ', average_area_of_rectangles
print("Welcome to your favorite two rectangle's areas and average finder.")
# Input from user to determine the area of the rectangles
# finding the area_1 of the first rectangle
width_1 = float(input('Please enter the width of you first rectangle: '))
length_1 = float(input('Please enter the length of the first rectangle: '))
# Finding the area of the second circle
width_2 = float(input('Please enter the width of you second rectangle: '))
length_2 = float(input('Please enter the length of the second rectangle: '))
# Calculate the area of the two rectangles individually and together
rectangle_1_area = width_1 * length_1
rectangle_2_area = width_2 * length_2
average_area_of_rectangles = (rectangle_1_area + rectangle_2_area) / 2
# Displaying the output of the two areas and the average between them
print(
'This is the area of your first rectangle =', str('{:.2f}'.format(rectangle_1_area)) +
' and this is the area of your second rectangle =', str('{:.2f}'.format(rectangle_2_area)) + '.')
print('The average between the two rectangles is', str('{:.3f}'.format(average_area_of_rectangles)) + '.')
|
mit
| -3,367,064,113,916,565,000 | 37.515625 | 111 | 0.711562 | false |
iconoeugen/rb-xmpp-notification
|
rbxmppnotification/admin_urls.py
|
1
|
1484
|
#
# admin_urls.py
#
# Copyright (c) 2013 Horatiu Eugen Vlad
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.conf.urls import patterns
from rbxmppnotification.extension import RBXmppNotification
from rbxmppnotification.forms import RBXmppNotificationSettingsForm
urlpatterns = patterns('',
(r'^$', 'reviewboard.extensions.views.configure_extension',
{'ext_class': RBXmppNotification,
'form_class': RBXmppNotificationSettingsForm,
}),
)
|
mit
| -7,802,793,473,282,647,000 | 39.108108 | 73 | 0.772237 | false |
rubgombar1/sharing-cars
|
sharingcars/announcements/views.py
|
1
|
9234
|
from django.views.generic.edit import CreateView
from django.core.urlresolvers import reverse_lazy, reverse
from django.views.generic.list import ListView
from django.views.generic import DeleteView
from django.db.models import Q
from django.shortcuts import redirect
from django.views.generic.detail import DetailView
from django.contrib import messages
from django.views.generic.edit import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from announcements.models import Announcement, ApplyAnnouncement, StopAnnouncement, CommentAnnouncement
from common.models import User
from announcements.forms import (AnnouncementCreateForm, ApplyAnnouncementCreateForm, StopAnnouncementForm,
CommentAnnouncementCreateForm)
class AnnouncementCreateView(CreateView):
model = Announcement
template_name = 'announcements/announcement/create.html'
success_url = reverse_lazy('index')
form_class = AnnouncementCreateForm
def get_success_url(self):
return self.success_url.format()
def form_valid(self, form):
instance = form.save(commit=False)
user = User.objects.get(user_account__id=self.request.user.id)
instance.user = user
return super(AnnouncementCreateView, self).form_valid(form)
class AnnouncementListView(ListView):
model = Announcement
template_name = 'announcements/announcement/list.html'
def get_queryset(self):
return Announcement.objects.filter(visibility=True)
class AnnouncementUserListView(ListView):
model = Announcement
template_name = 'announcements/announcement/list.html'
def get_queryset(self):
return Announcement.objects.filter(user__user_account__id=self.request.user.id)
class ApplyAnnouncementsReceivedUser(ListView):
model = ApplyAnnouncement
template_name = 'announcements/apply/list.html'
def get_queryset(self):
return ApplyAnnouncement.objects.filter(~Q(state='rejected'),
announcement__user__user_account__id=self.request.user.id)
def get_context_data(self, **kwargs):
context = super(ApplyAnnouncementsReceivedUser, self).get_context_data(**kwargs)
context['title'] = u'Solicitudes de anuncios recibidas'
context['kind_apply'] = u'Ir al anuncio'
context['received'] = True
return context
class AnnouncementApplyCreate(LoginRequiredMixin, CreateView):
model = ApplyAnnouncement
template_name = 'common/form.html'
form_class = ApplyAnnouncementCreateForm
def form_valid(self, form):
instance = form.save(commit=False)
user = User.objects.get(user_account__id=self.request.user.id)
announcement = Announcement.objects.get(pk=self.kwargs['pk'])
instance.user = user
instance.announcement = announcement
return super(AnnouncementApplyCreate, self).form_valid(form)
def dispatch(self, request, *args, **kwargs):
if not self.request.user.is_anonymous:
user = User.objects.get(user_account__id=self.request.user.id)
announcement = Announcement.objects.get(pk=self.kwargs['pk'])
if announcement.applyannouncement_set.filter(user=user).exists():
previous_url = self.request.META.get('HTTP_REFERER', None)
messages.add_message(self.request, messages.ERROR,
'Ya tiene una solicitud para este anuncio')
if previous_url:
return redirect(previous_url)
else:
return redirect('announcement-all')
return super(AnnouncementApplyCreate, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('details-announcement', kwargs={'pk': self.kwargs['pk']})
@login_required
def resolve_apply(request, pk, action):
apply_announcement = ApplyAnnouncement.objects.get(pk=pk)
if action == 'approach':
if apply_announcement.announcement.get_seats_free() > 0:
apply_announcement.state = 'approach'
else:
previous_url = request.META.get('HTTP_REFERER', None)
messages.add_message(request, messages.ERROR, 'No puede aceptar esta solicitud porque no hay asientos libres')
return redirect(previous_url)
elif action == 'reject':
apply_announcement.state = 'rejected'
apply_announcement.save()
apply_announcement.announcement.check_applies()
previous_url = request.META.get('HTTP_REFERER', None)
if previous_url:
return redirect(previous_url)
else:
return redirect('details-announcement', pk=apply_announcement.announcement.pk)
class AnnouncementDetailsView(DetailView):
template_name = 'announcements/announcement/show.html'
model = Announcement
def get_queryset(self):
return Announcement.objects.filter(Q(visibility=True) | Q(user__user_account__pk=self.request.user.pk))
class EditAnnouncementView(UpdateView):
model = Announcement
template_name = 'announcements/announcement/create.html'
success_url = reverse_lazy('index')
form_class = AnnouncementCreateForm
def get_success_url(self):
return self.success_url.format()
class StopAnnouncementCreateView(CreateView):
model = StopAnnouncement
template_name = 'announcements/stop/create.html'
success_url = reverse_lazy('index')
form_class = StopAnnouncementForm
def get_success_url(self):
return self.success_url.format()
def form_valid(self, form):
instance = form.save(commit=False)
announcement = Announcement.objects.get(pk=self.kwargs['pk'])
instance.announcement = announcement
return super(StopAnnouncementCreateView, self).form_valid(form)
class StopAnnouncementUpdateView(UpdateView):
model = StopAnnouncement
template_name = 'announcements/stop/create.html'
success_url = reverse_lazy('index')
form_class = StopAnnouncementForm
def get_success_url(self):
return self.success_url.format()
class ApplyAnnouncementsPerformedUser(ListView):
model = ApplyAnnouncement
template_name = 'announcements/apply/list.html'
def get_queryset(self):
return ApplyAnnouncement.objects.filter(user__user_account__id=self.request.user.id)
def get_context_data(self, **kwargs):
context = super(ApplyAnnouncementsPerformedUser, self).get_context_data(**kwargs)
context['title'] = u'Solicitudes de anuncios realizadas'
context['kind_apply'] = u'Ir al anuncio'
return context
class AnnouncementApplyDelete(DeleteView):
model = ApplyAnnouncement
success_url = reverse_lazy('index')
def get_queryset(self):
qs = super(AnnouncementApplyDelete, self).get_queryset()
return qs.filter(~Q(state='approach'), user__user_account=self.request.user)
class AnnouncementUserRecommendationsListView(ListView):
model = Announcement
template_name = 'announcements/announcement/list.html'
def get_context_data(self):
context = super(AnnouncementUserRecommendationsListView, self).get_context_data()
userId = self.request.user.id
userAnnouncements = Announcement.objects.filter(user__user_account__id=userId)
announcements = Announcement.objects.filter(~Q(user__user_account__id=userId), visibility=1)
recommendations = set()
for userAnnouncement in userAnnouncements:
for announcement in announcements:
if userAnnouncement.origin == announcement.origin and userAnnouncement.destination == announcement.destination:
recommendations.add(announcement)
stop_origin = announcement.stopannouncement_set.filter(stop=userAnnouncement.origin)
stop_destination = announcement.stopannouncement_set.filter(stop=userAnnouncement.destination)
if stop_origin and userAnnouncement.destination == announcement.destination:
recommendations.add(announcement)
if stop_origin and stop_destination and (stop_origin[0].sequence < stop_destination[0].sequence):
recommendations.add(announcement)
if userAnnouncement.origin == announcement.origin and stop_destination:
recommendations.add(announcement)
context['object_list'] = recommendations
return context
class CommentAnnouncementCreateView(LoginRequiredMixin, CreateView):
model = CommentAnnouncement
template_name = 'announcements/comments/create.html'
form_class = CommentAnnouncementCreateForm
def get_success_url(self):
return reverse('details-announcement', kwargs={'pk': self.kwargs['announcement_pk']})
def form_valid(self, form):
instance = form.save(commit=False)
user = User.objects.get(user_account__id=self.request.user.id)
announcement = Announcement.objects.get(pk=self.kwargs['announcement_pk'])
instance.user = user
instance.announcement = announcement
return super(CommentAnnouncementCreateView, self).form_valid(form)
|
gpl-3.0
| -7,470,523,588,939,033,000 | 39.682819 | 127 | 0.700563 | false |
seecr/seecr-test
|
seecr/test/calltrace.py
|
1
|
6216
|
## begin license ##
#
# "Seecr Test" provides test tools.
#
# Copyright (C) 2005-2009 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2012, 2019-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Seecr Test"
#
# "Seecr Test" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Seecr Test" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Seecr Test"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from re import compile
def emptyGenerator():
return
yield
class CallTrace:
def __init__(self, name="CallTrace", verbose=False, returnValues=None, ignoredAttributes=[], methods=None, onlySpecifiedMethods=False, emptyGeneratorMethods=[]):
self.calledMethods = CalledMethods()
self.returnValues = returnValues or {}
self.methods = methods or {}
self.exceptions = {}
self._verbose = verbose
self._name = name
self.ignoredAttributes = ignoredAttributes or []
self.onlySpecifiedMethods = onlySpecifiedMethods
self.emptyGeneratorMethods = emptyGeneratorMethods
def calledMethodNames(self):
return [m.name for m in self.calledMethods]
def __getattr__(self, attrname):
if attrname.startswith('__') and attrname.endswith('__') and not attrname in self.returnValues:
return object.__getattr__(self, attrname)
if attrname in self.ignoredAttributes:
raise AttributeError("'CallTrace' is instructed to not have an attribute called '%s'" % attrname)
if self.onlySpecifiedMethods and not attrname in (list(self.returnValues.keys()) + list(self.methods.keys()) + self.emptyGeneratorMethods):
raise AttributeError("'CallTrace' does not support '%s' as it is instructed to only allow specified methods." % attrname)
return CallTraceMethod(attrname, self)
def __calltrace__(self):
return list(map(str, self.calledMethods))
def __bool__(self):
return True
def __repr__(self):
#TODO: __repr__ ook terug laten komen in calltrace
return "<CallTrace: %s>" % self._name
def __str__(self):
#TODO: __str__ ook terug laten komen in calltrace
return self.__repr__()
class CallTraceMethod:
def __init__(self, methodName, callTrace):
self.name = methodName
self._callTrace = callTrace
def __call__(self, *args, **kwargs):
return TracedCall(self.name, self._callTrace)(*args, **kwargs)
def __repr__(self):
return "<bound method %s of %s>" % (self.name, self._callTrace)
class TracedCall:
def __init__(self, methodName, callTrace):
self.name = methodName
self._callTrace = callTrace
#inits are necessary to make __repr__ calls before __call__ calls possible
self.args = ()
self.kwargs = {}
def __call__(self, *args, **kwargs):
self._callTrace.calledMethods.append(self)
self.args = args
self.arguments = list(args) # For backwards compatibility only
self.kwargs = kwargs
if self._callTrace._verbose:
print('%s.%s -> %s' % (
self._callTrace._name,
self.__repr__(),
self.represent(self._callTrace.returnValues.get(self.name, None))))
if self.name in self._callTrace.exceptions:
raise self._callTrace.exceptions[self.name]
returnValue = None
if self.name in self._callTrace.returnValues:
returnValue = self._callTrace.returnValues.get(self.name)
elif self.name in self._callTrace.methods:
returnValue = self._callTrace.methods.get(self.name)(*args, **kwargs)
elif self.name in self._callTrace.emptyGeneratorMethods:
returnValue = emptyGenerator()
return returnValue
def asDict(self):
return {
'name': self.name,
'args': self.args,
'kwargs': self.kwargs
}
def represent(self, something):
"""
<calltracetest.NonObject instance at 0x2b02ba1075a8>
<calltracetest.IsObject object at 0x2b02ba0f3510>
<class 'calltracetest.IsObject'>
calltracetest.NonObject
"""
objectOnlyRe = r'((?:\w+\.)*\w+)'
instanceRe = r'<%s instance at .*>' % objectOnlyRe
objectRe = r'<%s object at .*>' % objectOnlyRe
classRe = r"<class '%s'>" % objectOnlyRe
objectsRe = compile(r'|'.join([instanceRe, objectRe]))
classesRe = compile(r'|'.join([classRe, objectOnlyRe]))
strSomething = str(something)
if something == None:
return 'None'
elif isinstance(something, str):
return "'%s'" % something
elif isinstance(something, (int, float)):
return strSomething
elif isinstance(something, (bytes, bytearray)):
return strSomething
elif isinstance(something, type): # a Class
return "<class %s>" % getattr(something, ("__qualname__" if self._callTrace._verbose else "__name__"))
elif isinstance(type(something), type) and (" object " in strSomething or
" instance " in strSomething): # object (instance) of some class
return "<%s>" % getattr(type(something), ("__qualname__" if self._callTrace._verbose else "__name__"))
else:
return strSomething
def __repr__(self):
return '%s(%s)' % (self.name, ", ".join(list(map(self.represent, self.args))+['%s=%s' % (key, self.represent(value)) for key, value in list(self.kwargs.items())]))
class CalledMethods(list):
def reset(self):
del self[:]
return self
|
gpl-2.0
| 4,696,377,958,694,295,000 | 37.85 | 171 | 0.624517 | false |
bluerover/6lbr
|
examples/6lbr/test/RouterLlsec.py
|
1
|
1672
|
#!/usr/bin/python2.7
import unittest
import config
from time import sleep
class RouterLlsec(config.scenarios, unittest.TestCase):
def modeSetUp(self):
config.security_layer=1
self.support.backbone.prefix=config.eth_prefix
self.support.wsn.prefix=config.wsn_prefix
self.br = self.support.add_6lbr()
self.support.setUp()
self.br.set_mode('ROUTER', config.channel, iid='100', ra_daemon=True, accept_ra=False, default_router=self.support.backbone.create_address('1'))
def set_up_network(self):
sleep(10)
self.assertTrue(self.support.platform.accept_ra(self.support.backbone.itf), "Could not enable RA configuration support")
if self.support.platform.support_rio():
self.assertTrue(self.support.platform.accept_rio(self.support.backbone.itf), "Could not enable RIO support")
self.assertTrue(self.support.tcpdump.expect_ra(self.support.backbone.itf, 30), "")
self.assertTrue(self.support.platform.check_prefix(self.support.backbone.itf, config.eth_prefix+':'), "Interface %s not configured" % self.support.backbone.itf)
self.support.host.ip=self.support.platform.get_address_with_prefix(self.support.backbone.itf, config.eth_prefix+':')
if not self.support.platform.support_rio():
self.assertTrue(self.support.platform.add_route(config.wsn_prefix+"::", gw=self.br.ip), "Could not add route")
def tear_down_network(self):
if not self.support.platform.support_rio():
self.support.platform.rm_route(config.wsn_prefix+"::", gw=self.br.ip)
if __name__ == '__main__':
unittest.main(exit=False, verbosity=1)
|
bsd-3-clause
| 8,836,917,602,680,402,000 | 51.25 | 168 | 0.691986 | false |
sam-m888/gprime
|
gprime/datehandler/test/dateparser_test.py
|
1
|
4932
|
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2013 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Deeper testing of some DateParser internals.
"""
import unittest
from ...utils.locale import Locale
from ...lib.date import Date
class DateParserTest(unittest.TestCase):
def setUp(self):
from .._dateparser import DateParser
self.parser = DateParser()
self.parser_RU = Locale(lang='ru').date_parser
def assert_map_key_val(self, m, k, v):
try:
self.assertEqual(m[k], v)
except KeyError:
self.assertTrue(False, list(m.items()))
def test_month_to_int_jan_is_1(self):
self.assert_map_key_val(self.parser.month_to_int, 'jan', 1)
def test_prefix_table_for_RU_built(self):
self.assertIn('ru_RU', self.parser._langs)
def test_month_to_int_septem_RU_is_9(self):
self.assert_map_key_val(self.parser.month_to_int, 'сентяб', 9)
def test_hebrew_to_int_av_is_12(self):
self.assert_map_key_val(self.parser.hebrew_to_int, 'av', 12)
self.assert_map_key_val(self.parser.hebrew_to_int, 'ав', 12) # RU
def test_french_to_int_thermidor_is_11(self):
self.assert_map_key_val(self.parser.french_to_int, 'thermidor', 11)
self.assert_map_key_val(self.parser.french_to_int, 'термидор', 11) # RU
def test_islamic_to_int_ramadan_is_9(self):
self.assert_map_key_val(self.parser.islamic_to_int, 'ramadan', 9)
self.assert_map_key_val(self.parser.islamic_to_int, 'рамадан', 9) # RU
def test_persian_to_int_tir_is_4(self):
self.assert_map_key_val(self.parser.persian_to_int, 'tir', 4)
self.assert_map_key_val(self.parser.persian_to_int, 'тир', 4) # RU
def test_calendar_to_int_gregorian(self):
self.assert_map_key_val(self.parser.calendar_to_int, 'gregorian', Date.CAL_GREGORIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'g', Date.CAL_GREGORIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'григорианский', Date.CAL_GREGORIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'г', Date.CAL_GREGORIAN)
def test_calendar_to_int_julian(self):
self.assert_map_key_val(self.parser.calendar_to_int, 'julian', Date.CAL_JULIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'j', Date.CAL_JULIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'юлианский', Date.CAL_JULIAN)
self.assert_map_key_val(self.parser.calendar_to_int, 'ю', Date.CAL_JULIAN)
class Test_generate_variants(unittest.TestCase):
def setUp(self):
from .. import _datestrings
from .._dateparser import _generate_variants
self.ds = ds = _datestrings.DateStrings(Locale(languages=('ru')))
self.month_variants = list(_generate_variants(
zip(ds.long_months, ds.short_months,
ds.swedish_SV, ds.alt_long_months)))
def testVariantsSameLengthAsLongMonths(self):
self.assertEqual(len(self.ds.long_months),
len(self.month_variants))
def testRussianHasDifferentVariantsForEachMonth(self):
for i in range(1, 13):
mvi = self.month_variants[i]
self.assertTrue(len(mvi) > 1, msg=mvi)
def testNoEmptyStringInVariants(self):
for i in range(1, 13):
mvi = self.month_variants[i]
self.assertNotIn("", mvi)
def testLongMonthsAppearInVariants(self):
for i in range(1, 13):
lmi = self.ds.long_months[i]
mvi = self.month_variants[i]
self.assertIn("{}".format(lmi), mvi)
def testShortMonthsAppearInVariants(self):
for i in range(1, 13):
smi = self.ds.short_months[i]
mvi = self.month_variants[i]
self.assertIn("{}".format(smi), mvi)
def testLongMonthVariantsUnique(self):
for i in range(1, 13):
mvi = self.month_variants[i]
self.assertEqual(len(mvi), len(set(mvi)), msg=mvi)
def testRuMayVariantsContainSvMaj(self):
v = self.month_variants[5]
self.assertIn("Maj", v)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| 4,527,299,556,007,412,000 | 38.056 | 97 | 0.653011 | false |
iesugrace/tree
|
view.py
|
1
|
17773
|
"""
Author: Joshua Chen
Date: 2015-12-30
Location: Shenzhen
Desc: Application that makes use of the acl library
to maintain a View database.
"""
from lib import *
from acl import *
import re
import sys
class View:
""" Represents a view entry in the view database.
When we process the view config, only the view name
and the related acl name is significant, othe config
will be simply treated as 'other config' and
remain intact.
"""
def __init__(self, name, aclName, otherConfig):
""" name and aclName are str, otherConfig is bytes
"""
self.name = name
self.aclName = aclName # str
self.otherConfig = otherConfig # bytes
@staticmethod
def parseConfig(lines):
""" extract the ACL info from the config lines which
is a list of bytes, return the ACL name as a str,
and the rest of the view config as a list of bytes.
The line which contains the ACL info shall be the
first line of the 'lines'
For this view config code in the view database:
view "VIEW_NAME" {
match-clients { key key_name;ACL_NAME; };
...
... other view config lines
...
};
The 'config' will be these four lines:
match-clients { key key_name;ACL_NAME; };
...
... other view config lines
...
"""
acl_line = lines[0]
if not re.match(b'\s*match-clients\s', acl_line):
raise InvalidViewConfigException
rest_lines = lines[1:]
aclName = acl_line.split(b';')[-3].decode().strip()
return (aclName, rest_lines)
class ViewGroup:
""" All views in the group are unique in name. The acl of one
view can overlap the one of another view, provided the view
of the LESS acl be placed first.
for instance:
view1 -- acl1 -- {192.168.0.0/16;}
view2 -- acl2 -- {192.168.1.0/24; 10.1.0.0/16;}
view2 MUST be placed in front of view1 in the
database, because acl2 is LESS than acl1.
But the relationship may be more complex when these
three are putting together in the same view database:
view1 -- acl1 -- {192.168.1.0/24; 10.1.0.0/16;}
view2 -- acl2 -- {192.168.0.0/16; 172.16.1.0/24;}
view3 -- acl3 -- {172.16.0.0/16; 10.1.1.0/24}
The relationship is: acl1 < acl2 < acl3 < acl1. It's a loop
here, which can not satisfy the 'LESS first' rule.
To deal with this problem, we separate acl3 and view3.
In the ViewGroup, all views are organized in two categories:
1. Free views
In this category, acl of any view doesn't have common
part with any other view in the whole ViewGroup. One
dictionary or set is well suited for holding all these
kind of views of a single ViewGroup.
2. Ordered views
If a view's acl is LESS or GREATER than another view's,
these two views are put in a list which is ordered,
LESS acl first. A ViewGroup may contain multiple such
ordered lists, order is only matter within a list, not
between lists, so a list can be placed before or after
another.
"""
# control how verbose the program will be
verbose = 0
def __init__(self, acls={}):
"""
self.data holds all unprocessed views.
self.outData holds all ready-for-output views.
self.outData['free'] holds all views that have
no LESS or GREATER relationship with others,
the order of it does not matter.
self.outData['ordered'] holds multiple lists,
each list is a group of views which must be
ordered. The order of the lists does not
matter, but the order of views in each list
does.
self.acls is the acl data the views will use.
"""
self.data = []
self.outData = {}
self.outData['free'] = []
self.outData['ordered'] = []
self.attachAclDb(acls)
def attachAclDb(self, acls):
""" Add the acl database for the ViewGroup to use
The acls is a dictionary, the key is the acl name,
and the value is the acl object. The view database
usually make use of a preset acl named 'ANY' for
default selection, here we ensure that acl exists.
"""
anyName = 'ANY'
if anyName not in acls:
acls[anyName] = Acl(anyName)
self.acls = acls
def load(self, dbFile, resolveParts=True):
""" Load data from a database, the existing
data of the group will be abandoned. The
'ANY' view shall be separated from others.
"""
self.data = []
viewBlocks = self.preproc(dbFile)
for block in viewBlocks:
lines = block.split(b'\n')
view_name = lines[0].split(b'"')[1].decode()
n = self.locateLine(lines, b'^};')
if n is None:
raise InvalidViewConfigException
lines = lines[1:n]
parsed = View.parseConfig(lines)
aclName = parsed[0]
otherConfig = parsed[1]
view = View(view_name, aclName, otherConfig)
self.addView(view)
self.separateDefaultView()
if resolveParts:
self.resolveViewsParts()
def separateDefaultView(self):
""" Separate the 'ANY' view from others.
"""
x = [v for v in self.data if v.name.lower() == 'any']
if len(x):
defaultView = x[0]
self.defaultView = defaultView
self.data.remove(defaultView)
else:
self.defaultView = None
def addView(self, view, validator=None, vpargs=(), vkargs={}):
""" Add the view to the group. Duplicate name of view
will be ignored.
"""
if not validator:
validator = self.defaultValidator
vpargs = (self.data,)
vkargs = {}
try:
validator(view, *vpargs, **vkargs)
except ViewExistsException as e:
view = e.args[0]
print('duplicate view: %s' % view.name, file=sys.stderr)
else:
self.data.append(view)
return True
def defaultValidator(self, view, group):
""" Default validator of the ViewGroup
Ensure unique view name in the group.
"""
if view.name not in group:
return True
else:
raise ViewExistsException(group[view.name])
def locateLine(self, lines, pattern):
""" Return the index number of the matching line
None will be returned if none match.
"""
n = None
for idx, line in enumerate(lines):
if re.search(pattern, line):
n = idx
break
return n
def preproc(self, dbFile):
""" Process the dbFile, return a list of bytes,
each bytes contains all config data of a view,
without the leading 'view' keyword.
"""
lines = open(dbFile, 'rb').read()
lines = lines.split(b'\n')
# remove all comment and empty lines above the first view
# and remove the leading keyword 'view' of the first view
n = self.locateLine(lines, b'^view\s')
if n is None:
raise InvalidViewConfigException
lines[n] = re.sub(b'^view\s', b'', lines[n])
lines = lines[n:]
rawData = b'\n'.join(lines)
blocks = re.split(b'\nview\s', rawData)
return blocks
def writeOneView(self, view, ofile):
""" Format a code text for the view,
and write it to the ofile.
"""
linePrefix = ' '
viewName = view.name
aclName = view.aclName
otherConfig = view.otherConfig
header = 'view "%s" {\n' % viewName
keyName = aclName.lower()
aclLine = '%smatch-clients { key %s; %s; };\n' % (
linePrefix,
keyName,
aclName)
tailer = '};\n'
ba = bytearray()
ba.extend(header.encode())
ba.extend(aclLine.encode())
ba.extend(b'\n'.join(otherConfig)) # a list of bytes objects
ba.extend(b'\n')
ba.extend(tailer.encode())
ba.extend(b'\n')
ofile.write(bytes(ba))
def save(self, dbFile):
""" Save the group data to a database file.
Views with LESS acl shall be put in front of
the one which is GREATER. The default view to
the bottom.
"""
ofile = open(dbFile, 'wb')
for viewList in self.outData['ordered']:
for view in viewList:
self.writeOneView(view, ofile)
for view in self.outData['free']:
self.writeOneView(view, ofile)
if self.defaultView is not None:
self.writeOneView(self.defaultView, ofile)
def resolveViewsParts(self):
""" Find out all views whose acl is missing (been
split), and find out all parts of that old acl,
create a new view for each part of it.
"""
views = [x for x in self.data if x.aclName not in self.acls]
for view in views:
newViews = self.resolveOneViewParts(view)
if not newViews:
print("%s's acl %s is missing" % (view.name, view.aclName),
file=sys.stderr)
else:
self.data.remove(view)
self.data.extend(newViews)
def resolveOneViewParts(self, view):
""" A view's acl may be split into parts in a
previous operation. This method finds out all
new acls of the old one, and creates new views
for each of these new acls, returns a list of
the new views.
If an acl been broken into parts, the name is
changed like this:
oldacl --> oldacl-0
oldacl-1 --> oldacl-1-0
oldacl-1-1
The new views have different name and aclName
comparing to the old view, but the MySQL
statement is identical, since these different
views all use the same data in the database.
This method shall not be call if the acl which
the 'view' connects already exists in self.acls.
"""
flag = view.aclName + '-'
names = [x for x in self.acls if x.startswith(flag)]
newViews = []
if len(names) > 1:
for aclName in names:
newView = View(aclName, aclName, view.otherConfig)
newViews.append(newView)
return newViews
def order(self):
""" Sort all views in the group, but not including
the 'ANY' view which is the default and shall not
be put together to sort, it shall always be the
last one in the view config database.
"""
views = list(self.data)
self.enforceRules(views)
for view in views:
self.placeView(view)
def placeView(self, begin_view):
""" Place the view to an appropricate location,
according to the order rule. On failure, split
the view and its acl, and start over.
"""
views = {begin_view.name: begin_view}
while views:
viewName = list(views.keys())[0]
viewObj = views.pop(viewName)
if self.verbose >= 1:
print("placing view %s" % viewName)
try:
self.insertView(viewObj)
except ViewOrderException as e: # split and retry
if self.verbose >= 1:
print("splitting view %s" % viewName)
self.orderExceptionHandler(e=e, viewObj=viewObj, views=views)
def orderExceptionHandler(self, *junk, e, viewObj, views):
""" Handler for order exception, to split the acl and the view.
"""
nets = e.args[0]
oldAclName = viewObj.aclName # get name befor split
oldAcl0, oldAcl1 = Acl.splitTree(nets)
self.acls.pop(oldAclName) # remove the old name
self.acls[oldAcl0.name] = oldAcl0
self.acls[oldAcl1.name] = oldAcl1
for suffix, aclName in [('-0', oldAcl0.name), ('-1', oldAcl1.name)]:
name = viewObj.name + suffix
newView = View(name, aclName, viewObj.otherConfig)
views[name] = newView
def insertView(self, newView):
""" Find a good location in the self.outData, and
insert the view into it.
This is the core mechanism that ensures a logically
correct view database. The rule is: IF ANY VIEW'S
ACL OVERLAPS OTHER VIEW'S, THE VIEW OF THE LESS ACL
SHALL BE PLACED FIRST, THEN THE GREATER ONE.
If it's impossible to pick a location that complies
to the order rule, raise an exception. It's possible
that the exception raised halfway at which point
some views may had already been moved from their
original position, thus corrupt the view group, to
prevent it, we make shallow copies of the groups,
and process the copies, after all existing views had
been processed, we update the view group with the
processed copies.
If a list in the orderedGroups has a view LESS or
GREATER than the newView, the same will be deleted,
all views in it will be moved to a new list. If a
view in the freeViews group LESS or GREATER than
the newView, the same will be moved to a new list.
"""
freeViews = list(self.outData['free'])
orderedGroups = [list(l) for l in self.outData['ordered']]
intactGroups = [] # holds the view lists that have
# no relationship with the newView
globalL = [] # holds all views that LESS than the newView
globalR = [] # holds all views that GREATER than the newView
newAcl = self.acls[newView.aclName]
# the free category
lGroup = []
gGroup = []
for existView in freeViews:
existAcl = self.acls[existView.aclName]
rela = existAcl.compare(newAcl)
if rela == Acl.LESS:
lGroup.append(existView)
elif rela == Acl.GREATER:
gGroup.append(existView)
for v in (lGroup + gGroup):
freeViews.remove(v)
globalL.extend(lGroup)
globalR.extend(gGroup)
# the ordered category
for viewList in orderedGroups:
lessLen = 0
lGroup = []
gGroup = []
for existView in viewList:
existAcl = self.acls[existView.aclName]
rela = existAcl.compare(newAcl)
if rela == Acl.LESS:
lessLen += 1
elif rela == Acl.GREATER:
lGroup = viewList[:lessLen]
gGroup = viewList[lessLen:]
break
else:
lessLen += 1
else:
lGroup = viewList[:lessLen]
# at this point, all views in the lGroup are
# LESS than the newView (its acl actually), but
# in the gGroup, only the first of it is GREATER
# than the newView, all subsequent ones are
# undetermined. The next step is to found out if
# there is any view in the gGroup that is LESS
# than the newView, in which case we will raise
# an exception because the rule is violated.
for existView in gGroup:
existAcl = self.acls[existView.aclName]
rela = existAcl.compare(newAcl)
if rela == Acl.LESS:
# attach the greater nets of the newAcl for split
nets = self.getNets(newAcl, existAcl, Network.GREATER)
raise ViewOrderException(nets)
if len(lGroup) == 0 and len(gGroup) == 0:
intactGroups.append(viewList)
else:
globalL.extend(lGroup)
globalR.extend(gGroup)
if len(globalL) == 0 and len(globalR) == 0:
self.outData['free'].append(newView)
else:
self.outData['free'] = freeViews
self.outData['ordered'] = []
self.outData['ordered'].extend(intactGroups)
newList = globalL + [newView] + globalR
self.outData['ordered'].append(newList)
def getNets(self, acl1, acl2, relation):
""" Compare acl1 and acl2, and find all networks
in acl1 that has 'relation' relationship with
networks in acl2.
"""
nets = []
nets1 = acl1.networks()
nets2 = acl2.networks()
for net1 in nets1:
for net2 in nets2:
if relation == net1.compare(net2):
nets.append(net1)
break
return nets
def enforceRules(self, views):
""" Raise an exception if any violation detected
Rules:
- Only top acl can be referenced by a view,
top acl is the one has no parent.
"""
aclNames = [x.aclName for x in views]
aclObjects = [self.acls[x] for x in aclNames]
m = [x for x in aclObjects if x.parent is not None]
# zero length means no violation
assert (len(m) == 0), "view config not complies with the rules"
|
gpl-2.0
| 2,059,163,580,106,679,300 | 36.495781 | 77 | 0.559332 | false |
MaYaSeVeN/Reverse-IP-Bing-API
|
reverseIP.py
|
1
|
8382
|
# -*- coding: utf-8 -*-
__author__ = 'Nop Phoomthaisong (aka @MaYaSeVeN)'
__version__ = 'reverseIP version 1.0 ( http://mayaseven.com )'
import urllib
import urllib2
import json
import re
import sys
import optparse
import socket
def main():
print "\n" + __version__
usage = "Usage: python " + sys.argv[
0] + " -k [Bing API Key] [IP_1] [IP_2] [IP_N] [Domain_1] [Domain_2] [Domain_N]\nUsage: python " + \
sys.argv[
0] + " -k [Bing API Key] -l [list file of IP address]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-k", "--key", dest="key", help="Bing API key")
parser.add_option("-l", "--list", dest="file", metavar="FILE", help="list file of IP address")
parser.add_option("-d", "--disable", action="store_false",
help="set this option to disable to recheck is that the domain is in IP address",
default=True)
(options, args) = parser.parse_args()
if not options.key or (not options.file and len(args) == 0):
print parser.format_help()
print """
you need to..
1.register or use microsoft account for get Bing API key -> https://datamarket.azure.com/
2.choose free plan 5,000 Transactions/month -> https://datamarket.azure.com/dataset/bing/search
"""
exit(1)
key = options.key
recheck = options.disable
file = options.file
reverseIP = RevereIP(args, key, recheck, file)
reverseIP.run()
def stdout(log):
print log
class RevereIP:
def __init__(self, args, key, recheck, file):
self.domains = []
self.domain_numbers = 0
self.final_result = {}
self.ips_or_domains = set(args)
self.api_key = key
self.count = 0
self.log = stdout
self.recheck = recheck
self.file = file
self.http = "http://"
self.https = "https://"
def run(self):
if self.file:
self.ips_or_domains = self.file_opener()
while self.ips_or_domains:
self.domain_numbers = 0
ip_or_domain = self.ips_or_domains.pop()
self.reverse_ip(ip_or_domain)
self.log("[*] You got " + str(self.domain_numbers) + " domains to hack.")
self.domains = []
def file_opener(self):
try:
file = open(self.file, "r").read()
find_ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', file)
ipl = set(find_ip)
return ipl
except IOError:
self.log("[-] Error: File does not appear to exist.")
exit(1)
def reverse_ip(self, ip_or_domain):
raw_domains_temp = []
name, ip = self.convert_domain_to_ip(ip_or_domain)
if not ip:
return
if self.check_ip_in_cloudflare(ip):
if name is not "":
self.log("\n[-] " + name + " hiding behind Cloudflare.")
return
self.log("[-] It's Cloudflare IP Address.")
return
query = "IP:" + ip
self.log("\n[*] Host: " + ip + " " + name)
self.count = 0
while 1:
raw_domains = self.bing_call_api(query)
if raw_domains == raw_domains_temp:
break
raw_domains_temp = raw_domains
if raw_domains == -1:
break
self.count += 100
if self.recheck:
self.check_domain_name_in_ip(raw_domains, ip)
else:
for l in raw_domains:
if l[1] not in self.domains:
self.log("[+] " + ''.join(l).encode('utf8'))
self.domains.append(l)
self.final_result.update({ip: self.domains})
self.domain_numbers = len(self.domains)
def convert_domain_to_ip(self, ip_or_domain):
name = ""
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip_or_domain):
try:
name = ip_or_domain
ip = socket.gethostbyname(ip_or_domain)
return name, ip
except socket.gaierror:
self.log("\n[-] unable to get address for " + ip_or_domain)
self.domain_numbers = 0
ip = None
return name, ip
return name, ip_or_domain
def bing_call_api(self, query):
domains = []
query = urllib.quote(query)
user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)'
credentials = (':%s' % self.api_key).encode('base64')[:-1]
auth = 'Basic %s' % credentials
url = 'https://api.datamarket.azure.com/Bing/SearchWeb/v1/Web?Query=%27' + query + '%27&$format=json' + '&$skip=' + str(
self.count)
request = urllib2.Request(url)
request.add_header('Authorization', auth)
request.add_header('User-Agent', user_agent)
request_opener = urllib2.build_opener()
try:
response = request_opener.open(request)
except urllib2.HTTPError, e:
if e.code == 401:
self.log('[-] Wrong API Key or not sign up to use Bing Search API')
self.log("""
you need to..
1.register or use microsoft account for get Bing API key -> https://datamarket.azure.com/
2.choose free plan 5,000 Transactions/month -> https://datamarket.azure.com/dataset/bing/search
""")
exit(1)
self.log("[-] Connection problem!!, Cannot connect to Bing API")
exit(1)
response_data = response.read()
json_result = json.loads(response_data)
result_list = json_result
if len(result_list['d']['results']) == 0:
return -1
for i in range(len(result_list['d']['results'])):
protocol_domain_port = []
domain = result_list['d']['results'][i]['DisplayUrl']
if self.https in domain:
protocol_domain_port.append("https://")
port = ':443'
else:
protocol_domain_port.append("http://")
port = ':80'
domain = domain.replace("http://", "").replace("https://", "")
rest = domain.split('/', 1)[0]
if ':' in rest:
port = rest.split(':', 1)[1]
rest = rest.split(':', 1)[0]
port = ':' + port
protocol_domain_port.append(rest)
protocol_domain_port.append(port)
domains.append(protocol_domain_port)
raw_domains = list()
map(lambda x: not x in raw_domains and raw_domains.append(x), domains)
return raw_domains
def check_domain_name_in_ip(self, raw_domains, ip):
for l in raw_domains:
if l[1] in self.domains:
continue
try:
ipc = socket.gethostbyname(l[1].encode("idna"))
except:
self.log("[!] " + ''.join(l).encode(
'utf8') + " Cannot recheck bacause of hostname encoded, Please recheck it by hand.")
continue
if ipc == ip:
self.log("[+] " + ''.join(l).encode('utf8'))
else:
self.log("[!] " + ''.join(l).encode('utf8') + " is on the other IP address, please recheck it by hand.")
continue
self.domains.append(l)
self.final_result.update({ip: self.domains})
self.domain_numbers = len(self.domains)
def check_ip_in_cloudflare(self, ip):
cloudflare_ips = ['199.27.128.0/21', '173.245.48.0/20', '103.21.244.0/22', '103.22.200.0/22', '103.31.4.0/22',
'141.101.64.0/18', '108.162.192.0/18', '190.93.240.0/20', '188.114.96.0/20',
'197.234.240.0/22', '198.41.128.0/17', '162.158.0.0/15', '104.16.0.0/12']
ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
for net in cloudflare_ips:
netstr, bits = net.split('/')
netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
if (ipaddr & mask) == (netaddr & mask):
return True
return False
if __name__ == "__main__":
main()
|
gpl-2.0
| 5,043,467,834,300,942,000 | 36.756757 | 142 | 0.517299 | false |
bokeh/bokeh
|
tests/unit/bokeh/core/test_templates.py
|
1
|
3107
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import hashlib
from os.path import abspath, join, split
# Module under test
import bokeh.core.templates as bct # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
TOP_PATH = abspath(join(split(bct.__file__)[0]))
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _crlf_cr_2_lf_bin(s):
import re
return re.sub(b"\r\n|\r|\n", b"\n", s)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def compute_sha256(data):
sha256 = hashlib.sha256()
sha256.update(data)
return sha256.hexdigest()
pinned_template_sha256 = "5d26be35712286918e36cc469c9354076b3d555eb39799aa63d04473c0566c29"
def test_autoload_template_has_changed() -> None:
"""This is not really a test but a reminder that if you change the
autoload_nb_js.js template then you should make sure that insertion of
plots into notebooks is working as expected. In particular, this test was
created as part of https://github.com/bokeh/bokeh/issues/7125.
"""
with open(join(TOP_PATH, "_templates/autoload_nb_js.js"), mode="rb") as f:
current_template_sha256 = compute_sha256(_crlf_cr_2_lf_bin(f.read()))
assert pinned_template_sha256 == current_template_sha256, """\
It seems that the template autoload_nb_js.js has changed.
If this is voluntary and that proper testing of plots insertion
in notebooks has been completed successfully, update this test
with the new file SHA256 signature."""
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
| 5,802,592,885,021,280,000 | 40.426667 | 91 | 0.380753 | false |
chmanchester/firefox-ui-tests
|
firefox_puppeteer/ui/toolbars.py
|
1
|
13343
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette import Wait, By
from ..api.keys import Keys
from ..api.l10n import L10n
from ..base import BaseLib
from ..decorators import use_class_as_property
class NavBar(BaseLib):
""" The NavBar class provides access to elements contained in the
navigation bar as well as the locationbar.
"""
@property
def back_button(self):
""" Provides access to the back button from the navbar ui.
:returns: The back button element.
"""
return self.marionette.find_element('id', 'back-button')
@property
def forward_button(self):
""" Provides access to the forward button from the navbar ui.
:returns: The forward button element.
"""
return self.marionette.find_element('id', 'forward-button')
@property
def home_button(self):
""" Provides access to the home button from the navbar ui.
:returns: The home button element.
"""
return self.marionette.find_element('id', 'home-button')
@use_class_as_property('ui.toolbars.LocationBar')
def locationbar(self):
"""Provides members for accessing and manipulationg the
locationbar.
See the :class:`~ui.toolbars.LocationBar` reference.
"""
@property
def menu_button(self):
""" Provides access to the menu button from the navbar ui.
:returns: The menu button element.
"""
return self.marionette.find_element('id', 'PanelUI-menu-button')
class LocationBar(BaseLib):
"""Various utilities for interacting with the location bar (the text area
of the ui that typically displays the current url).
"""
dtds = ["chrome://branding/locale/brand.dtd",
"chrome://browser/locale/browser.dtd"]
def __init__(self, *args, **kwargs):
BaseLib.__init__(self, *args, **kwargs)
# TODO: A "utility" module that sets up the client directly would be
# useful here.
self.l10n = L10n(self.get_marionette)
self.keys = Keys(self.get_marionette)
@use_class_as_property('ui.toolbars.AutocompleteResults')
def autocomplete_results(self):
"""Provides utility members for accessing and manipulationg the
locationbar.
See the :class:`~ui.toolbars.AutocompleteResults` reference.
"""
def clear(self):
""" Clears the contents of the url bar (via the DELETE shortcut).
"""
self.focus('shortcut')
self.urlbar.send_keys(Keys.DELETE)
Wait(self.marionette).until(
lambda _: self.urlbar.get_attribute('value') == '')
def close_context_menu(self):
""" Closes the Location Bar context menu by a key event.
"""
# TODO: This method should be implemented via the menu API.
self.contextmenu.send_keys(Keys.ESCAPE)
@property
def contextmenu(self):
""" Provides access to the urlbar context menu.
:returns: The urlbar contextmenu element.
"""
parent = self.urlbar.find_element('anon attribute', {'anonid': 'textbox-input-box'})
return parent.find_element('anon attribute', {'anonid': 'input-box-contextmenu'})
@property
def favicon(self):
""" Provides asccess to the urlbar favicon.
:returns: The favicon element.
"""
return self.marionette.find_element(By.ID, 'page-proxy-favicon')
def focus(self, evt='click'):
"""Focus the location bar according to the provided event.
:param evt: The event to synthesize in order to focus the urlbar
(one of 'click' or 'shortcut').
"""
if evt == 'click':
self.urlbar.click()
elif evt == 'shortcut':
cmd_key = self.l10n.get_localized_entity(LocationBar.dtds,
'openCmd.commandkey')
(self.marionette.find_element(By.ID, 'main-window')
.send_keys(self.keys.ACCEL, cmd_key))
else:
raise ValueError("An unknown event type was passed: %s" % evt)
Wait(self.marionette).until(
lambda _: self.urlbar.get_attribute('focused') == 'true')
def get_contextmenu_entry(self, action):
""" Retirieves the urlbar context menu entry corresponding
to the given action.
:param action: The action correspoding to the retrieved value.
:returns: The urlbar contextmenu entry.
"""
# TODO: This method should be implemented via the menu API.
entries = self.contextmenu.find_elements('css selector', 'menuitem')
filter_on = 'cmd_%s' % action
found = [e for e in entries if e.get_attribute('cmd') == filter_on]
return found[0] if len(found) else None
@property
def history_drop_marker(self):
""" Provides asccess to the history drop marker.
:returns: The history drop marker.
"""
return self.urlbar.find_element('anon attribute', {'anonid': 'historydropmarker'})
@use_class_as_property('ui.toolbars.IdentityPopup')
def identity_popup(self):
"""Provides utility members for accessing and manipulationg the
locationbar.
See the :class:`~ui.toolbars.IdentityPopup` reference.
"""
def load_url(self, url):
"""Load the specified url in the location bar by synthesized
keystrokes.
:param url: The url to load.
"""
self.clear()
self.focus('shortcut')
self.urlbar.send_keys(url + Keys.ENTER)
@property
def notification_popup(self):
""" Provides asccess to the notification popup.
:returns: The notification popup.
"""
return self.marionette.find_element(By.ID, "notification-popup")
@property
def reload_button(self):
""" Provides asccess to the reload button.
:returns: The reload button.
"""
return self.marionette.find_element(By.ID, 'urlbar-reload-button')
def reload_url(self, trigger='button', force=False):
"""Reload the currently open page.
:param trigger: The event type to use to cause the reload. (one of
"shortcut", "shortcut2", or "button").
:param force: Whether to cause a forced reload.
"""
# TODO: The force parameter is ignored for the moment. Use
# mouse event modifiers or actions when they're ready.
# Bug 1097705 tracks this feature in marionette.
if trigger == 'button':
self.reload_button.click()
elif trigger == 'shortcut':
cmd_key = self.l10n.get_localized_entity(LocationBar.dtds,
'reloadCmd.commandkey')
self.urlbar.send_keys(cmd_key)
elif trigger == 'shortcut2':
self.urlbar.send_keys(self.keys.F5)
@property
def stop_button(self):
""" Provides asccess to the stop button.
:returns: The stop button.
"""
return self.marionette.find_element(By.ID, 'urlbar-stop-button')
@property
def urlbar(self):
""" Provides access to the urlbar element.
:returns: The urlbar element.
"""
return self.marionette.find_element(By.ID, 'urlbar')
@property
def urlbar_input(self):
""" Provides access to the urlbar input element.
:returns: The urlbar_input element
"""
return self.urlbar.find_element('anon attribute', {'anonid': 'input'})
@property
def value(self):
""" Provides access to the currently displayed value of the urlbar.
:returns: The urlbar value.
"""
return self.urlbar.get_attribute('value')
class AutocompleteResults(BaseLib):
"""Library for interacting with autocomplete results.
"""
def __init__(self, *args, **kwargs):
BaseLib.__init__(self, *args, **kwargs)
# TODO: A "utility" module that sets up the client directly would be
# useful here.
self.l10n = L10n(self.get_marionette)
self.keys = Keys(self.get_marionette)
def close(self, force=False):
""" Closes the urlbar autocomplete popup.
:param force: If true, the popup is closed by its own hide function,
otherwise a key event is sent to close the popup.
"""
if not self.is_open:
return
if force:
self.marionette.execute_script("""
arguments[0].hidePopup();
""", script_args=[self.popup])
else:
(self.marionette.find_element('id', 'urlbar')
.send_keys(Keys.ESCAPE))
Wait(self.marionette).until(
lambda _: not self.is_open)
def get_matching_text(self, result, match_type):
"""Retuns an array of strings of the matching text within a autocomplete
result in the urlbar.
:param result: The result to inspect for matches.
:param match_type: The type of match to search for (one of "title", "url").
"""
if match_type == 'title':
descnode = self.marionette.execute_script("""
return arguments[0].boxObject.firstChild.childNodes[1].childNodes[0];
""", script_args=[result])
elif match_type == 'url':
descnode = self.marionette.execute_script("""
return arguments[0].boxObject.lastChild.childNodes[2].childNodes[0];
""", script_args=[result])
else:
raise ValueError('match_type provided must be one of'
'"title" or "url", not %s' % match_type)
return self.marionette.execute_script("""
let rv = [];
for (let node of arguments[0].childNodes) {
if (node.nodeName == 'span') {
rv.push(node.innerHTML);
}
}
return rv;
""", script_args=[descnode])
@property
def visible_results(self):
""" Supplies the list of visible autocomplete result nodes.
:returns: The list of visible results.
"""
return self.marionette.execute_script("""
let rv = [];
let node = arguments[0];
for (let i = 0; i < node.itemCount; ++i) {
let item = node.getItemAtIndex(i);
if (!item.hasAttribute("collapsed")) {
rv.push(item);
}
}
return rv;
""", script_args=[self.results])
@property
def is_open(self):
""" Returns whether this popup is currently open.
:returns: True when the popup is open, otherwise false.
"""
return self.popup.get_attribute('state') == 'open'
@property
def popup(self):
""" Provides access to the popup result element.
:returns: The popup result element.
"""
return self.marionette.find_element(By.ID,
'PopupAutoCompleteRichResult')
@property
def results(self):
""" Povides access to the container node for autocomplete results.
:returns: The result container node.
"""
return self.popup.find_element('anon attribute',
{'anonid': 'richlistbox'})
class IdentityPopup(BaseLib):
"""Library wrapping selectors for interacting with the identity popup.
"""
@property
def box(self):
return self.marionette.find_element(By.ID, 'identity-box')
@property
def country_label(self):
return self.marionette.find_element(By.ID, 'identity-icon-country-label')
@property
def encryption_label(self):
return self.marionette.find_element(By.ID, 'identity-popup-encryption-label')
@property
def encryption_icon(self):
return self.marionette.find_element(By.ID, 'identity-popup-encryption-icon')
@property
def host(self):
return self.marionette.find_element(By.ID, 'identity-popup-content-host')
@property
def is_open(self):
""" Returns whether this popup is currently open.
:returns: True when the popup is open, otherwise false.
"""
return self.popup.get_attribute('state') == 'open'
@property
def more_info_button(self):
return self.marionette.find_element(By.ID, 'identity-popup-more-info-button')
@property
def organization_label(self):
return self.marionette.find_element(By.ID, 'identity-icon-label')
@property
def owner(self):
return self.marionette.find_element(By.ID, 'identity-popup-content-owner')
@property
def owner_location(self):
return self.marionette.find_element(By.ID, 'identity-popup-content-supplemental')
@property
def popup(self):
return self.marionette.find_element(By.ID, 'identity-popup')
@property
def permissions(self):
return self.marionette.find_element(By.ID, 'identity-popup-permissions')
@property
def verifier(self):
return self.marionette.find_element(By.ID, 'identity-popup-content-verifier')
|
mpl-2.0
| 4,035,289,342,934,853,000 | 32.191542 | 92 | 0.601589 | false |
wdmchaft/taskcoach
|
taskcoachlib/gui/dialog/preferences.py
|
1
|
19285
|
# -*- coding: UTF-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Copyright (C) 2008 Rob McMullen <rob.mcmullen@gmail.com>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import meta, widgets, notify
from taskcoachlib.domain import date
from taskcoachlib.i18n import _
class SettingsPageBase(widgets.BookPage):
def __init__(self, *args, **kwargs):
super(SettingsPageBase, self).__init__(*args, **kwargs)
self._booleanSettings = []
self._choiceSettings = []
self._multipleChoiceSettings = []
self._integerSettings = []
self._colorSettings = []
self._pathSettings = []
self._textSettings = []
def addBooleanSetting(self, section, setting, text, helpText=''):
checkBox = wx.CheckBox(self, -1)
checkBox.SetValue(self.getboolean(section, setting))
self.addEntry(text, checkBox, helpText)
self._booleanSettings.append((section, setting, checkBox))
def addChoiceSetting(self, section, setting, text, choices, helpText=''):
choice = wx.Choice(self, -1)
for choiceValue, choiceText in choices:
choice.Append(choiceText, choiceValue)
if choiceValue == self.get(section, setting):
choice.SetSelection(choice.GetCount()-1)
if choice.GetSelection() == wx.NOT_FOUND: # force a selection if necessary
choice.SetSelection(0)
self.addEntry(text, choice, helpText)
self._choiceSettings.append((section, setting, choice))
def addMultipleChoiceSettings(self, section, setting, text, choices, helpText=''):
''' choices is a list of (number, text) tuples. '''
multipleChoice = wx.CheckListBox(self, choices=[choice[1] for choice in choices])
checkedNumbers = eval(self.get(section, setting))
for index, choice in enumerate(choices):
multipleChoice.Check(index, choice[0] in checkedNumbers)
self.addEntry(text, multipleChoice, helpText, growable=True)
self._multipleChoiceSettings.append((section, setting, multipleChoice,
[choice[0] for choice in choices]))
def addIntegerSetting(self, section, setting, text, minimum=0, maximum=100,
helpText=''):
spin = widgets.SpinCtrl(self, min=minimum, max=maximum, size=(40, -1),
initial=self.getint(section, setting))
self.addEntry(text, spin, helpText)
self._integerSettings.append((section, setting, spin))
def addColorSetting(self, section, setting, text):
colorButton = widgets.ColorSelect(self, -1, text,
eval(self.get(section, setting)))
self.addEntry(None, colorButton)
self._colorSettings.append((section, setting, colorButton))
def addPathSetting(self, section, setting, text, helpText=''):
pathChooser = widgets.DirectoryChooser(self, wx.ID_ANY)
pathChooser.SetPath(self.get(section, setting))
self.addEntry(text, pathChooser, helpText)
self._pathSettings.append((section, setting, pathChooser))
def addTextSetting(self, section, setting, text, helpText=''):
textChooser = wx.TextCtrl(self, wx.ID_ANY, self.get(section, setting))
self.addEntry(text, textChooser, helpText)
self._textSettings.append((section, setting, textChooser))
def addText(self, label, text):
self.addEntry(label, text)
def ok(self):
for section, setting, checkBox in self._booleanSettings:
self.set(section, setting, str(checkBox.IsChecked()))
for section, setting, choice in self._choiceSettings:
self.set(section, setting,
choice.GetClientData(choice.GetSelection()))
for section, setting, multipleChoice, choices in self._multipleChoiceSettings:
self.set(section, setting,
str([choices[index] for index in range(len(choices)) if multipleChoice.IsChecked(index)]))
for section, setting, spin in self._integerSettings:
self.set(section, setting, str(spin.GetValue()))
for section, setting, colorButton in self._colorSettings:
self.set(section, setting, str(colorButton.GetColour()))
for section, setting, btn in self._pathSettings:
self.set(section, setting, btn.GetPath())
for section, setting, txt in self._textSettings:
self.set(section, setting, txt.GetValue())
def get(self, section, name):
raise NotImplementedError
def getint(self, section, name):
return int(self.get(section, name))
def getboolean(self, section, name):
return self.get(section, name) == 'True'
def set(self, section, name, value):
raise NotImplementedError
class SettingsPage(SettingsPageBase):
def __init__(self, settings=None, *args, **kwargs):
self.settings = settings
super(SettingsPage, self).__init__(*args, **kwargs)
def addEntry(self, text, control, helpText='', **kwargs): # pylint: disable-msg=W0221
if helpText == 'restart':
helpText = _('This setting will take effect\nafter you restart %s')%meta.name
super(SettingsPage, self).addEntry(text, control, helpText, **kwargs)
def get(self, section, name):
return self.settings.get(section, name)
def getint(self, section, name):
return self.settings.getint(section, name)
def getboolean(self, section, name):
return self.settings.getboolean(section, name)
def set(self, section, name, value):
self.settings.set(section, name, value)
class SavePage(SettingsPage):
pageName = 'save'
pageTitle = _('Files')
pageIcon = 'save'
def __init__(self, *args, **kwargs):
super(SavePage, self).__init__(columns=3, *args, **kwargs)
self.addBooleanSetting('file', 'autosave',
_('Auto save after every change'))
self.addBooleanSetting('file', 'backup',
_('Create a backup copy before\noverwriting a %s file')%meta.name)
self.addBooleanSetting('file', 'saveinifileinprogramdir',
_('Save settings (%s.ini) in the same\ndirectory as the program') \
%meta.filename,
_('(For running %s\nfrom a removable medium)')%meta.name)
self.addPathSetting('file', 'attachmentbase', _('Attachment base directory'),
_('When adding an attachment, try to make\nits path relative to this one.'))
self.fit()
class WindowBehaviorPage(SettingsPage):
pageName = 'window'
pageTitle = _('Window behavior')
pageIcon = 'windows'
def __init__(self, *args, **kwargs):
super(WindowBehaviorPage, self).__init__(columns=3, *args, **kwargs)
self.addBooleanSetting('window', 'splash',
_('Show splash screen on startup'))
self.addBooleanSetting('window', 'tips',
_('Show tips window on startup'))
self.addChoiceSetting('window', 'starticonized',
_('Start with the main window iconized'),
[('Never', _('Never')), ('Always', _('Always')),
('WhenClosedIconized',
_('If it was iconized last session'))])
self.addBooleanSetting('version', 'notify',
_('Check for new version of %(name)s on startup')%meta.data.metaDict)
self.addBooleanSetting('window', 'hidewheniconized',
_('Hide main window when iconized'))
self.addBooleanSetting('window', 'hidewhenclosed',
_('Minimize main window when closed'))
self.addBooleanSetting('window', 'blinktaskbariconwhentrackingeffort',
_('Make clock in the task bar tick when tracking effort'))
self.addBooleanSetting('view', 'descriptionpopups',
_('Show a popup with the description of an item\nwhen hovering over it'))
self.fit()
class LanguagePage(SettingsPage):
pageName = 'language'
pageTitle = _('Language')
pageIcon = 'person_talking_icon'
def __init__(self, *args, **kwargs):
super(LanguagePage, self).__init__(columns=3, *args, **kwargs)
choices = \
[('', _('Let the system determine the language')),
('ar', u'الْعَرَبيّة (Arabic)'),
('eu_ES', 'Euskal Herria (Basque)'),
('bs_BA', u'босански (Bosnian)'),
('pt_BR', u'Português brasileiro (Brazilian Portuguese)'),
('br_FR', 'Brezhoneg (Breton)'),
('bg_BG', u'български (Bulgarian)'),
('ca_ES', u'Català (Catalan)'),
('zh_CN', u'简体中文 (Simplified Chinese)'),
('zh_TW', u'正體字 (Traditional Chinese)'),
('cs_CS', u'Čeština (Czech)'),
('da_DA', 'Dansk (Danish)'),
('nl_NL', 'Nederlands (Dutch)'),
('en_AU', 'English (Australia)'),
('en_CA', 'English (Canada)'),
('en_GB', 'English (UK)'),
('en_US', 'English (US)'),
('eo', 'Esperanto'),
('et_EE', 'Eesti keel (Estonian)'),
('fi_FI', 'Suomi (Finnish)'),
('fr_FR', u'Français (French)'),
('gl_ES', 'Galego (Galician)'),
('de_DE', 'Deutsch (German)'),
('nds_DE', 'Niederdeutsche Sprache (Low German)'),
('el_GR', u'ελληνικά (Greek)'),
('he_IL', u'עברית (Hebrew)'),
('hi_IN', u'हिन्दी, हिंदी (Hindi)'),
('hu_HU', 'Magyar (Hungarian)'),
('id_ID', 'Bahasa Indonesia (Indonesian)'),
('it_IT', 'Italiano (Italian)'),
('ja_JP', u'日本語 (Japanese)'),
('ko_KO', u'한국어/조선말 (Korean)'),
('lv_LV', u'Latviešu (Latvian)'),
('lt_LT', u'Lietuvių kalba (Lithuanian)'),
('mr_IN', u'मराठी Marāṭhī (Marathi)'),
('mn_CN', u'Монгол бичиг (Mongolian)'),
('nb_NO', u'Bokmål (Norwegian Bokmal)'),
('nn_NO', u'Nynorsk (Norwegian Nynorsk)'),
('fa_IR', u'فارسی (Persian)'),
('pl_PL', u'Język polski (Polish)'),
('pt_PT', u'Português (Portuguese)'),
('ro_RO', u'Română (Romanian)'),
('ru_RU', u'Русский (Russian)'),
('sk_SK', u'Slovenčina (Slovak)'),
('sl_SI', u'Slovenski jezik (Slovene)'),
('es_ES', u'Español (Spanish)'),
('sv_SE', 'Svenska (Swedish)'),
('te_IN', u'తెలుగు (Telugu)'),
('th_TH', u'ภาษาไทย (Thai)'),
('tr_TR', u'Türkçe (Turkish)'),
('uk_UA', u'украї́нська мо́ва (Ukranian)'),
('vi_VI', u'tiếng Việt (Vietnamese)')]
self.addChoiceSetting('view', 'language_set_by_user', _('Language'),
choices, helpText='restart')
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
text = wx.StaticText(panel,
label=_('''If your language is not available, or the translation needs
improving, please consider helping. See:'''))
sizer.Add(text)
url = meta.url + 'i18n.html'
urlCtrl = wx.HyperlinkCtrl(panel, -1, label=url, url=url)
sizer.Add(urlCtrl)
panel.SetSizerAndFit(sizer)
self.addText(_('Language not found?'), panel)
self.fit()
def ok(self):
super(LanguagePage, self).ok()
self.set('view', 'language', self.get('view', 'language_set_by_user'))
class ColorsPage(SettingsPage):
pageName = 'colors'
pageTitle = _('Colors')
pageIcon = 'palette_icon'
def __init__(self, *args, **kwargs):
super(ColorsPage, self).__init__(columns=1, growableColumn=-1, *args, **kwargs)
for setting, label in \
[('activetasks', _('Click this button to change the color of active tasks')),
('inactivetasks', _('Click this button to change the color of inactive tasks')),
('completedtasks', _('Click this button to change the color of completed tasks')),
('overduetasks', _('Click this button to change the color of over due tasks')),
('duesoontasks', _('Click this button to change the color of tasks due soon'))]:
self.addColorSetting('color', setting, label)
self.fit()
class FeaturesPage(SettingsPage):
pageName = 'features'
pageTitle = _('Features')
pageIcon = 'cogwheel_icon'
def __init__(self, *args, **kwargs):
super(FeaturesPage, self).__init__(columns=3, *args, **kwargs)
self.addBooleanSetting('feature', 'effort',
_('Allow for tracking effort'), helpText='restart')
self.addBooleanSetting('feature', 'notes', _('Allow for taking notes'),
helpText='restart')
names = [] # There's at least one, the universal one
for name in notify.AbstractNotifier.names():
names.append((name, name))
self.addChoiceSetting('feature', 'notifier', _('Notification system'), names,
helpText=_('Notification system to use for reminders (Growl, Snarl, etc)'))
try:
import taskcoachlib.syncml.core
except ImportError:
pass
else:
self.addBooleanSetting('feature', 'syncml', _('Enable SyncML'),
helpText='restart')
self.addBooleanSetting('feature', 'iphone', _('Enable iPhone synchronization'),
helpText='restart')
self.addIntegerSetting('view', 'efforthourstart',
_('Hour of start of work day'), minimum=0, maximum=23)
self.addIntegerSetting('view', 'efforthourend',
_('Hour of end of work day'), minimum=1, maximum=24)
self.addBooleanSetting('calendarviewer', 'gradient',
_('Use gradients in calendar views.\nThis may slow down Task Coach.'),
helpText='restart')
self.addChoiceSetting('view', 'effortminuteinterval',
_('Minutes between task start/end times'),
[('5', '5'), ('10', '10'), ('15', '15'), ('20', '20'), ('30', '30')])
self.fit()
class TaskBehaviorPage(SettingsPage):
pageName = 'task'
pageTitle = _('Task behavior')
pageIcon = 'cogwheel_icon'
def __init__(self, *args, **kwargs):
super(TaskBehaviorPage, self).__init__(columns=3, *args, **kwargs)
self.addBooleanSetting('behavior', 'markparentcompletedwhenallchildrencompleted',
_('Mark parent task completed when all children are completed'))
self.addIntegerSetting('behavior', 'duesoonhours',
_("Number of hours that tasks are considered to be 'due soon'"),
minimum=0, maximum=90)
self.addMultipleChoiceSettings('view', 'snoozetimes',
_('Snooze times to offer in task reminder dialog'),
date.snoozeChoices[1:]) # Don't offer "Don't snooze" as a choice
self.fit()
class IPhonePage(SettingsPage):
pageName = 'iphone'
pageTitle = _('iPhone')
pageIcon = 'computer_handheld_icon'
def __init__(self, *args, **kwargs):
super(IPhonePage, self).__init__(columns=3, *args, **kwargs)
self.addTextSetting('iphone', 'password',
_('Password for synchronization with iPhone'))
self.addTextSetting('iphone', 'service',
_('Bonjour service name'), helpText='restart')
self.addBooleanSetting('iphone', 'synccompleted',
_('Upload completed tasks to device'), helpText=_('Upload completed tasks to device'))
self.addBooleanSetting('iphone', 'showlog',
_('Show sync log'), helpText=_('Show the synchronization log'))
self.fit()
class EditorPage(SettingsPage):
pageName = 'editor'
pageTitle = _('Editor')
pageIcon = 'edit'
def __init__(self, *args, **kwargs):
super(EditorPage, self).__init__(columns=2, *args, **kwargs)
self.addBooleanSetting('editor', 'maccheckspelling',
_('Check spelling in editors'))
self.fit()
def ok(self):
super(EditorPage, self).ok()
widgets.MultiLineTextCtrl.CheckSpelling = \
self.settings.getboolean('editor', 'maccheckspelling')
class Preferences(widgets.NotebookDialog):
allPageNames = ['window', 'task', 'save', 'language', 'colors', 'features',
'iphone', 'editor']
pages = dict(window=WindowBehaviorPage, task=TaskBehaviorPage,
save=SavePage, language=LanguagePage, colors=ColorsPage,
features=FeaturesPage, iphone=IPhonePage, editor=EditorPage)
def __init__(self, settings=None, *args, **kwargs):
self.settings = settings
super(Preferences, self).__init__(bitmap='wrench_icon', *args, **kwargs)
self.TopLevelParent.Bind(wx.EVT_CLOSE, self.onClose)
if '__WXMAC__' in wx.PlatformInfo:
self.CentreOnParent()
def addPages(self):
self.SetMinSize((300, 430))
for pageName in self.allPageNamesInUserOrder():
if self.shouldCreatePage(pageName):
page = self.createPage(pageName)
self._interior.AddPage(page, page.pageTitle, page.pageIcon)
def allPageNamesInUserOrder(self):
''' Return all pages names in the order stored in the settings. The
settings may not contain all pages (e.g. because a feature was
turned off by the user) so we add the missing pages if necessary. '''
pageNamesInUserOrder = self.settings.getlist('editor', 'preferencespages')
remainingPageNames = self.allPageNames[:]
for pageName in pageNamesInUserOrder:
remainingPageNames.remove(pageName)
return pageNamesInUserOrder + remainingPageNames
def shouldCreatePage(self, pageName):
if pageName == 'iphone':
return self.settings.getboolean('feature', 'iphone')
elif pageName == 'editor':
return '__WXMAC__' in wx.PlatformInfo
else:
return True
def createPage(self, pageName):
return self.pages[pageName](parent=self._interior, settings=self.settings)
def onClose(self, event):
event.Skip()
pageNames = [page.pageName for page in self]
self.settings.setlist('editor', 'preferencespages', pageNames)
|
gpl-3.0
| -8,869,947,193,191,827,000 | 43.090069 | 111 | 0.597454 | false |
LettError/filibuster
|
Lib/filibuster/content/horoscope.py
|
1
|
7144
|
# -*- coding: UTF-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ---------
# Contributed by Erik van Blokland and Jonathan Hoefler
#
# FILIBUSTERb
#
# MIT License
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ---------
"""
living
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
__version__ = '4.0'
content = {
'hs_horoscope': [
'<#!bold,hs_sign#>: <#^,hs_bullshit#> <#hs_symptoms#> <#hs_symptoms#> <#hs_prediction_px#><#hs_prediction_sx#> <#hs_lovelife#> <#hs_outlook#> <#hs_finalwarning#>',
],
'hs_altsign': [
'<#^,sci_astro_constellations#>',
],
'hs_arenas_pl': [
'personal problems',
'travel plans',
'major commitments',
'emotional difficulties',
'hesitations and misgivings',
'career goals',
'professional aspirations',
'colleagues',
],
'hs_arenas_sing': [
'personal <#hs_qualities#>',
'professional efficiency',
'romantic life',
'marital status',
'home life',
'domestic situation',
'sensitive nature',
'<#hs_qualities#>',
'ascendant <#sci_astro_constellations#>',
'significant other',
'domestic partner',
'two dollars and fifty cents',
],
'hs_astrology_basic': [
'<#^,sci_astro_planets#> rises in <#hs_altsign#>',
'<#^,sci_astro_planets#> in <#hs_altsign#>',
'<#^,sci_astro_planets#> in the house of <#hs_altsign#>',
'<#^,sci_astro_planets#> descends in <#hs_altsign#>',
],
'hs_astrology_intro': [
'By using <#company#> technology we can provide you with a personalized horoscope. Your IP number is cross-referenced and heuristically extrapolated, giving us your exact location under the stars. We hope this unique feature will make us your favorite spot for checking in with the Heavens.',
],
'hs_astrology_more': [
'<#hs_astrology_basic#> and <#^,sci_astro_planets#> aligns with <#^,sci_astro_stars#> in <#hs_altsign#>',
],
'hs_astrology_section': [
'It is written in the stars, by <#name_female#>',
'The heavens',
'Celestial bodies',
'Cynics have fun',
'The Heavenly Bodies and You',
'Your Personal Horoscope, by <#name_female#>',
'The Signs',
u'This is your fate, and there isn’t a damn thing you can do about it!',
],
'hs_bullshit': [
'The spirit of cooperation is the gift of this New Moon.',
'Your ruling planet is <#^,sci_astro_planets#>.',
'The planet that rules <#hs_sign#> is <#^,sci_astro_planets#>.',
'<#hs_astrology_basic#>.',
'<#hs_astrology_more#>.',
],
'hs_events': [
'major changes',
'death',
'upheaval',
'good luck',
'news from afar',
'unexpected news',
'changes in your <#^,hs_arenas_sing#>',
],
'hs_finalwarning': [
'Your compatible sign for today is <#hs_altsign#>.',
'But watch out for <#hs_altsign#>.',
'<#hs_altsign#> brings unexpected news.',
'Expect a call from <#hs_altsign#> or <#hs_altsign#>.',
'Beware, or minor accidents may occur!',
'Relationships are stabilizing...',
'Relationships are in for a ride...',
'Buy some stock in <#startup_company#>.',
],
'hs_imperatives_pl': [
'will',
'are likely to',
'may',
],
'hs_imperatives_sing': [
'will',
'is likely to',
'can easily',
],
'hs_lovelife': [
'Let your mate know your true feelings,',
'Let <#hs_altsign#> know your true feelings,',
'Your significant other is ready to make a bigger commitment to your relationship:',
],
'hs_objects': [
'friends who need help',
'those close to you',
'someone new in your life',
'superiors',
'enemies',
],
'hs_outlook': [
'your <#hs_outlooks_quality#> outlook for today is <#hs_outlooks_quantity#>.',
],
'hs_outlooks_quality': [
'financial',
'romantic',
],
'hs_outlooks_quantity': [
'poor',
'stormy',
'mild',
'good',
'fair',
],
'hs_prediction_px': [
'Abandon all hope',
'Enjoy a bit of luxury tonight',
'Take one thing at a time',
'Try to be honest in your description concerning recent happenings',
u'Don’t take on too much',
'Keeping healthy requires a strong foundation of exercise, diet, and rest',
'You have a tendency to cover up your anger with a pretty facade',
],
'hs_prediction_sx': [
' a good time to finish those chores! and hide from friends and loved ones.',
u' and don’t be too quick to enter into joint financial ventures.',
u', you could easily be blamed for something you didn’t do.',
', or exhaustion and minor health problems will occur.',
' - slipping up in one of these areas invites a chronic ailment to return.',
],
'hs_qualities': [
'generosity',
'passion',
'insight',
'creativity',
'melancholy',
'introspection',
'productivity',
'ambition',
'portfolio',
'libido',
],
'hs_sign': [
'<#^,sci_astro_constellations#>',
],
'hs_symptoms': [
'Your <#hs_arenas_pl#> <#hs_imperatives_pl#> <#hs_verbs#> your <#hs_arenas_sing#>.',
'Your <#hs_arenas_sing#> <#hs_imperatives_sing#> <#hs_verbs#> <#hs_objects#>.',
'Now is a good time to <#hs_verbs_decision#> your <#hs_arenas_sing#>.',
'Changes in your <#hs_arenas_pl#> are apparent.',
'Your <#cat_householdappliances#> will break.',
'Your fortunate astrological number for today is <-randint(0, 1000)->.',
'Look for <#hs_events#> around the <#num_ord_010_019#> of the month.',
'Your <#hs_qualities#> will peak around the <#num_ord_010_019#> of the month.',
'Now is the time to <#hs_verbs_decision#> <#hs_objects#>, who <#hs_imperatives_pl#> bring <#hs_events#>.',
'You are able to institute a saving or budget plan that will meet your future needs.',
'You will be able to rebuild bridges that have been burned and make amends in situations than had seemed hopeless.',
'At the very least, you can agree to disagree and acknowledge that others will never adopt your agenda.',
'Material resources are available for your wise use.',
'You should sign legal documents that need to be updated.',
],
'hs_verbs': [
'be appreciated by',
'distract you from',
'interfere with',
'challenge',
'make you <#hs_verbs_decision#>',
],
'hs_verbs_decision': [
'reevaluate',
'reconsider',
'reflect on',
'take pride in',
'ignore',
'share',
'stand firm on',
'be suspicious of',
'contemplate',
'pay close attention to',
], }
|
mit
| -2,019,195,695,617,608,200 | 34.502488 | 300 | 0.539518 | false |
jmwright/cadquery-projects
|
generative-design/simple_beam/simple_beam_design.py
|
1
|
1744
|
from random import randint
import cadquery as cq
from Helpers import show
# Generates a random solid, and keeps trying till it finds one
def generate(width=10, height=10, startingPoints=6):
points = []
# Make sure that we get stop points for the random points that make sense
xStop = w
yStop = h / 2.0
points.append((0.0, h / 2.0))
for i in range(1, startingPoints - 1):
# Genreate a random point for two edges to connect at
x = randint(0, xStop)
y = randint(1, yStop) # The 0.1 start is to help get a solid
points.append((x, y))
points.append((w, h / 2.0))
# Append a point that is back on the base to close the mirror
points.append((w, 0.0))
for point in points:
print(point)
try:
# Profile of our beam that we will later test
crossSection = cq.Workplane('XZ').polyline(points).mirrorX()
except:
print("Points for outline not valid.")
return None
try:
beam = crossSection.extrude(100)
except:
print("Not a valid cross-section, could not extrude")
return None
# Check to make sure our resulting shape is a valid solid
if not beam.val().wrapped.isValid():
return None
# Check to see that we have the correct number of faces
# if beam.faces().size() != startingPoints * 2 + 2:
# return None
return beam
# Overall allowed dimensions of the beam
w = 10.0 # Width
h = 10.0 # Height
# The number of points that we can manipulate (same as number of edges
startingPoints = 6 # Half of the number of starting edges
# Keep looping until we get a valid solid
beam = None
while beam is None:
beam = generate(w, h, startingPoints)
show(beam)
|
apache-2.0
| 5,019,286,939,047,338,000 | 25.424242 | 77 | 0.643349 | false |
mnizol/ormpy
|
test/TestPopulation.py
|
1
|
26387
|
##############################################################################
# Package: ormpy
# File: TestPopulation.py
# Author: Matthew Nizol
##############################################################################
""" This file contains unit tests for the lib.Population module. """
import os, sys, re
from StringIO import StringIO
from unittest import TestCase
import lib.TestDataLocator as TestDataLocator
from lib.ORMMinusModel import ORMMinusModel
from lib.Population import Population, Relation, lcm
from lib.NormaLoader import NormaLoader
class TestPopulation(TestCase):
""" Unit tests for the Population module. """
def setUp(self):
self.data_dir = TestDataLocator.get_data_dir()
self.maxDiff = None
def test_unsat_model(self):
""" Test population of an unsatisfiable model. """
fname = os.path.join(self.data_dir, "unsat_smarag_2.orm")
model = ORMMinusModel(NormaLoader(fname).model)
with self.assertRaises(ValueError) as ex:
pop = Population(model)
self.assertEquals(ex.exception.message,
"Cannot populate an unsatisfiable model.")
def test_populate_object_types(self):
""" Test population of object types. """
fname = os.path.join(self.data_dir, "fact_type_tests.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=5)
pop = Population(model)
pop1 = pop.object_types["ObjectTypes.A"]
self.assertItemsEqual(pop1, ["A0", "A1", "A2", "A3", "A4"])
pop2 = pop.object_types["ObjectTypes.A_id"]
self.assertItemsEqual(pop2, [0, 1, 2, 3, 4])
pop3 = pop.object_types["ObjectTypes.B"]
self.assertItemsEqual(pop3, ["A", "Dog", "3.567", "12/23/2014"])
def test_populate_roles(self):
""" Test population of roles. """
fname = os.path.join(self.data_dir, "populate_roles.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=10)
pop = Population(model)
# Test role played by an object type that plays no other roles.
pop1 = pop._roles["FactTypes.VT1HasVT2.Roles.VT1"]
self.assertItemsEqual(pop1, [[1],[2],[3],[4],[5]])
# Test population of two roles played by same object type such that
# the role populations are complete and disjoint.
pop2a = pop._roles["FactTypes.VT1HasVT2.Roles.VT2"]
pop2b = pop._roles["FactTypes.VT2HasVT7.Roles.VT2"]
self.assertItemsEqual(pop2a, [[1],[2],[3],[4],[5]])
self.assertItemsEqual(pop2b, [[6],[7]])
# Test population of object type that plays enough roles that its
# objects appear in more than one role population.
pop3a = pop._roles["FactTypes.VT3HasVT4.Roles.VT4"]
pop3b = pop._roles["FactTypes.VT4HasVT5.Roles.VT4"]
pop3c = pop._roles["FactTypes.VT4HasVT6.Roles.VT4"]
self.assertItemsEqual(pop3a, [[1],[2]])
self.assertItemsEqual(pop3b, [[3],[4],[5],[1],[2]])
self.assertItemsEqual(pop3c, [[3],[4],[5]])
# Test population of independent object type whose population is not
# fully exhausted.
pop4 = pop._roles["FactTypes.VT4HasVT5.Roles.VT5"]
self.assertItemsEqual(pop4, [[1],[2],[3],[4],[5]])
def test_fact_type_parts(self):
""" Calling pop on fact_type_parts.orm was crashing. Confirm it
no longer crashes. """
fname = os.path.join(self.data_dir, "fact_type_parts.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=10)
pop = Population(model)
self.assertTrue(True) # Just want to ensure test doesn't crash
def test_populate_role_sequences_and_fact_types(self):
""" Test population of role sequences and fact types. """
fname = os.path.join(self.data_dir, "populate_fact_types.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=6)
pop = Population(model)
pop1 = pop._roles["Constraints.IFC1"]
self.assertItemsEqual(pop1, [[1, 'B1'],[2,'B2'],[3,'B3']])
# Note, pop of B role starts with B4 because previous B role was
# already instantiated with B1,B2,B3
pop2 = pop._roles["Constraints.IUC1"]
self.assertItemsEqual(pop2, [['B4','C1','C1'], ['B5','C2','C2'],
['B1','C3','C3'], ['B2','C4','C4'],
['B3','C1','C1'], ['B4','C2','C2']])
pop3 = pop._roles["FactTypes.AHasBBCCD.Roles.D"]
self.assertItemsEqual(pop3, [[False],[True]])
# Population of whole predicate
pop4 = pop.fact_types["FactTypes.AHasBBCCD"]
self.assertItemsEqual(pop4, [[1, 'B1', 'B4', 'C1', 'C1', False],
[2, 'B2', 'B5', 'C2', 'C2', True],
[3, 'B3', 'B1', 'C3', 'C3', False],
[1, 'B1', 'B2', 'C4', 'C4', True],
[2, 'B2', 'B3', 'C1', 'C1', False],
[3, 'B3', 'B4', 'C2', 'C2', True]])
self.assertEquals(pop4.names, ['A','B','B2','C','C2','D'])
def test_populate_subtypes(self):
""" Test that subtypes are properly populated. """
fname = TestDataLocator.path("value_constraints_on_subtypes.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=6)
pop = Population(model)
pop_a = pop.object_types["ObjectTypes.A"]
pop_b = pop.object_types["ObjectTypes.B"]
pop_c = pop.object_types["ObjectTypes.C"]
self.assertEquals(pop_a, range(50, 56))
self.assertEquals(pop_b, pop_a)
self.assertEquals(pop_c, pop_a)
pop_w = pop.object_types["ObjectTypes.W"]
pop_x = pop.object_types["ObjectTypes.X"]
pop_y = pop.object_types["ObjectTypes.Y"]
pop_z = pop.object_types["ObjectTypes.Z"]
# Use assertEquals to test order, not just contents!
self.assertEquals(pop_w, [20, 21, 22, 1, 2, 3])
self.assertEquals(pop_y, [20, 21, 22, 1, 2])
self.assertEquals(pop_x, pop_w)
self.assertEquals(pop_z, [20, 21, 22])
def test_ignored_overlapping_iuc(self):
""" Test that overlapping IUC is ignored while populating fact type. """
fname = os.path.join(self.data_dir, "populate_fact_types.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=6)
pop = Population(model)
self.assertIsNone(pop._roles.get("Constraints.IUC2", None))
self.assertIsNotNone(pop._roles.get("Constraints.IUC3", None))
def test_population_with_no_fact_types(self):
""" Test population with no fact types. """
fname = os.path.join(self.data_dir, "no_fact_types.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=5)
pop = Population(model)
self.assertItemsEqual(pop.object_types["ObjectTypes.A"], [0,1,2,3,4])
def test_absorption_population(self):
""" Test population involving absorption fact type."""
fname = os.path.join(self.data_dir, "absorption_valid_simple.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=5)
pop = Population(model)
self.assertEquals(len(pop.fact_types), 2)
expected = [['A0',0],['A1',1],['A2',2],['A3',3],['A4',4]]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], expected)
self.assertItemsEqual(pop.fact_types["FactTypes.AHasC"], expected)
def test_absorption_population_non_trivial(self):
""" Test population involving absorption fact type whose population
is actually affected by the EUC."""
fname = os.path.join(self.data_dir, "absorption_sat_nontrivial_pop.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=100)
pop = Population(model)
self.assertEquals(len(pop.fact_types), 2)
expectedB = [[0,False],[1,True],[2,False],[3,True],[4,False],[5,True]]
expectedC = [[0,'C1'], [1,'C2'],[2,'C3'], [3,'C1'],[4,'C2'], [5,'C3']]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], expectedB)
self.assertItemsEqual(pop.fact_types["FactTypes.AHasC"], expectedC)
def test_mandatory_middle_role(self):
""" I'm verifying my assumption that if a mandatory role is populated
between two other roles, the subsequent role's population will pick
up where the first role left off."""
fname = os.path.join(self.data_dir, "middle_mandatory_role.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=100)
# Forceably re-order roles of value type V
v = model.object_types.get("V")
v.roles = sorted(v.roles, key=lambda x: x.fact_type.name)
self.assertEquals(v.roles[1].fact_type.name, "VHasQ")
pop = Population(model)
self.assertItemsEqual(pop.fact_types["FactTypes.VExists"], [[0],[1],[2]])
self.assertItemsEqual(pop.fact_types["FactTypes.VSwims"], [[3],[4],[5],[6]])
def test_euc_strengthen_pop(self):
""" Test population after EUC strengthening. """
fname = os.path.join(self.data_dir, "join_rule_valid_linear_path_euc.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=100, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
ahasb = [[0,0],
[1,1]]
bhasc = [[0,0],
[1,1]]
chasd = [[2,0],
[0,1],
[1,0]]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], ahasb)
self.assertItemsEqual(pop.fact_types["FactTypes.BHasC"], bhasc)
self.assertItemsEqual(pop.fact_types["FactTypes.CHasD"], chasd)
def test_pop_with_subset(self):
""" Test population with subset constraint. """
fname = os.path.join(self.data_dir, "subset_population_test.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=12, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
ahasb = [[0,0],
[1,1]]
alikescb = [[0,0,0],
[1,1,1],
[2,2,0],
[0,3,1],
[1,4,0],
[2,5,1],
[0,6,0],
[1,7,1],
[2,8,0],
[0,9,1],
[1,0,0],
[2,1,1]]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], ahasb)
self.assertItemsEqual(pop.fact_types["FactTypes.ALikesCB"], alikescb)
def test_pop_with_subset_2(self):
""" Test population with subset constraint. """
fname = os.path.join(self.data_dir, "subset_population_test2.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=12, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
ahasid = [['A0',0],
['A1',1],
['A2',2],
['A3',3],
['A4',4],
['A5',5],
['A6',6]]
ahasb = [['A0',10],
['A1',20],
['A2',30],
['A3',40]]
chasa = [[1,'A4'],
[2,'A5'],
[3,'A6']]
chasda = [[1,0,'A4'],
[2,1,'A5'],
[3,2,'A6']]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasAId"], ahasid)
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], ahasb)
self.assertItemsEqual(pop.fact_types["FactTypes.CHasA"], chasa)
self.assertItemsEqual(pop.fact_types["FactTypes.CHasDA"], chasda)
def test_how_population_order_affects_subset(self):
""" Subset has subtype that was created before supertype, and a subset
constraint from subtype to supertype role. Because of the order
of the types, the superset role pop won't be available when we
try to populate the subset role pop. """
fname = os.path.join(self.data_dir, "subset_population_ordering.orm")
loader = NormaLoader(fname)
model = loader.model
model = ORMMinusModel(model, ubound=5, experimental=True)
pop = Population(model)
self.assertItemsEqual(pop.fact_types["FactTypes.BExists"], [[3],[4],[5],[6]])
self.assertItemsEqual(pop.fact_types["FactTypes.AExists"], [[3],[4],[5],[6]])
self.assertItemsEqual(pop.fact_types["FactTypes.AExists"].names, ["A"])
def test_pop_with_equality(self):
""" Test population with equality constraint. """
fname = os.path.join(self.data_dir, "equality_population_test.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=1000, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
ehasa = [['E1','A1'],
['E2','A2']]
quat = [['A1',100,200,300],
['A2',101,201,301]]
tern = [['A1',100,300],
['A2',101,301]]
self.assertItemsEqual(pop.fact_types["FactTypes.EHasA"], ehasa)
self.assertItemsEqual(pop.fact_types["FactTypes.AHasBCD"], quat)
self.assertItemsEqual(pop.fact_types["FactTypes.ALikesBD"], tern)
def test_pop_with_equality2(self):
""" Test population with equality constraint. """
fname = os.path.join(self.data_dir, "equality_population_test2.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=1000, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
aexists = [[0],[1],[2]]
alikes = [[3]]
aloves = [[3]]
alikesb = [[3,0]]
alovesb = [[3,0]]
asink = [[4],[0],[1],[2],[3]]
self.assertItemsEqual(pop.fact_types["FactTypes.AExists"], aexists)
self.assertItemsEqual(pop.fact_types["FactTypes.ALikes"], alikes)
self.assertItemsEqual(pop.fact_types["FactTypes.ALoves"], aloves)
self.assertItemsEqual(pop.fact_types["FactTypes.ALikesB"], alikesb)
self.assertItemsEqual(pop.fact_types["FactTypes.ALovesB"], alovesb)
self.assertItemsEqual(pop.fact_types["FactTypes.ASink"], asink)
def test_pop_with_join_equality(self):
""" Test population with join equality constraint. """
fname = os.path.join(self.data_dir, "join_equality_sat.orm")
model = NormaLoader(fname).model
model = ORMMinusModel(model, ubound=1000, experimental=True)
pop = Population(model)
self.assertItemsEqual(model.ignored, [])
expected_bin = [[0,0],[1,1],[2,2],[3,3],[4,4]]
expected_join = [[0,0,0],[1,1,1],[2,2,2],[3,3,3],[4,4,4]]
self.assertItemsEqual(pop.fact_types["FactTypes.AHasB"], expected_bin)
self.assertItemsEqual(pop.fact_types["FactTypes.AHasD"], expected_bin)
self.assertItemsEqual(pop.fact_types["FactTypes.BHasC"], expected_bin)
self.assertItemsEqual(pop.fact_types["FactTypes.DHasC"], expected_bin)
self.assertItemsEqual(pop.fact_types["FactTypes.EQ_join_fact"], expected_join)
self.assertItemsEqual(pop.fact_types["FactTypes.EQ_join_fact2"], expected_join)
#####################################################################
# Tests writing populations to stdout or CSV files
#####################################################################
class TestPopulationWrite(TestCase):
""" Unit tests for writing the population out to stdout or CSV files. """
def setUp(self):
""" Set-up unit tests. """
self.data_dir = TestDataLocator.get_data_dir()
self.path = os.path.join(self.data_dir, "..", "output")
fname = os.path.join(self.data_dir, "populate_fact_types.orm")
model = ORMMinusModel(NormaLoader(fname).model, ubound=6)
self.pop = Population(model)
self.expected = ['Population of ObjectTypes.A:\n' + \
'1\n2\n3',
'Population of ObjectTypes.B:\n' + \
'B1\nB2\nB3\nB4\nB5',
'Population of ObjectTypes.C:\n' + \
'C1\nC2\nC3\nC4',
'Population of ObjectTypes.D:\n' + \
'False\nTrue',
'Population of ObjectTypes.E:\n' + \
'1',
'Population of ObjectTypes.F:\n' + \
'2',
'Population of ObjectTypes.G:\n' + \
'3',
'Population of FactTypes.AHasBBCCD:\n' + \
'A,B,B2,C,C2,D\n' + \
'1,B1,B4,C1,C1,False\n' + \
'2,B2,B5,C2,C2,True\n' + \
'3,B3,B1,C3,C3,False\n' + \
'1,B1,B2,C4,C4,True\n' + \
'2,B2,B3,C1,C1,False\n' + \
'3,B3,B4,C2,C2,True',
'Population of FactTypes.EHasFG:\n' + \
'E,G,F\n' + \
'1,3,2']
def test_write_stdout(self):
""" Test writing population to stdout. """
saved = sys.stdout
sys.stdout = StringIO()
self.pop.write_stdout() # Method under test
actual = sys.stdout.getvalue().split('\n\n') # Double newline separates
# different populations
actual.remove('')
self.assertItemsEqual(actual, self.expected)
sys.stdout = saved
def test_write_csv_with_dir_creation(self):
""" Test that write_csv creates a directory if one doesn't exist. """
path = os.path.join(self.path, "delete_me")
self.delete(path)
self.assertFalse(os.path.isdir(path))
self.pop.write_csv(path)
self.assertTrue(os.path.isdir(path))
self.assertItemsEqual(self.get_actual(path), self.expected)
self.delete(path)
self.assertFalse(os.path.isdir(path))
def test_write_csv_without_dir_creation(self):
""" Test that write_csv works for an existing directory. """
path = os.path.join(self.path, "DO_NOT_DELETE")
self.assertTrue(os.path.isdir(path))
prev_mod_time = max(self.get_mtimes(path))
self.pop.write_csv(path)
self.assertItemsEqual(self.get_actual(path), self.expected)
curr_mod_time = min(self.get_mtimes(path))
self.assertTrue(curr_mod_time > prev_mod_time)
""" The next two test cases are commented out, because they rely on
permissions settings that git will not store. However, they only
test that write_csv raises an exception, and the call to write_csv
is wrapped in a try-except in CommandLine.py, so these tests do not
really matter. """
#def test_write_to_inaccessible_dir(self):
# """ Test an attempt to write to an inaccessible directory. """
# path = os.path.join(self.path, "CANNOT_ACCESS")
# self.assertTrue(os.path.isdir(path))
# with self.assertRaises(IOError) as ex:
# self.pop.write_csv(path)
#def test_write_to_readonly_dir(self):
# """ Test an attempt to write to a readonly directory. """
# path = os.path.join(self.path, "CANNOT_WRITE")
# self.assertTrue(os.path.isdir(path))
# with self.assertRaises(IOError) as ex:
# self.pop.write_csv(path)
def test_write_to_file_not_directory(self):
""" Test an attempt to write to a file rather than a directory. """
path = os.path.join(self.path, "NOT_A_DIRECTORY")
self.assertTrue(os.path.isfile(path))
with self.assertRaises(OSError) as ex:
self.pop.write_csv(path)
def get_actual(self, dirname):
""" Get actual results by cobbling together CSV files. """
actual = []
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
popname = re.sub('\.csv$', '', filename)
lines = ["Population of " + popname + ':'] + \
[line.strip() for line in f]
actual.append('\n'.join(lines))
return actual
def get_mtimes(self, dirname):
""" Get modification times for all files in dirname. """
mtimes = []
try:
for filename in os.listdir(dirname):
path = os.path.join(dirname, filename)
stat = os.stat(path)
mtimes.append(stat.st_mtime)
return mtimes
except:
return None
def delete(self, dirname):
""" Delete a directory and all of its contents. """
try:
for filename in os.listdir(dirname):
os.remove(os.path.join(dirname, filename))
os.rmdir(dirname)
except:
pass
#####################################################################
# Tests for Relation Class
#####################################################################
class TestRelation(TestCase):
""" Unit tests for the Relation class. """
def setUp(self):
self.data_dir = TestDataLocator.get_data_dir()
def test_add_to_relation(self):
""" Test adding tuple to relation. """
rel = Relation(["col1", "col2"])
rel.add([4, 5])
rel.add(["a", "b"])
self.assertEquals(rel.arity, 2)
self.assertEquals(len(rel), 2)
self.assertItemsEqual(rel[0], [4,5])
self.assertItemsEqual(rel[1], ["a","b"])
def test_add_wrong_arity_to_relation(self):
""" Test adding a tuple of the wrong arity to a relation. """
rel = Relation(["col1", "col2"])
with self.assertRaises(Exception) as ex:
rel.add([1])
self.assertEquals(ex.exception.message,
"Cannot add tuple of arity 1 to a Relation of arity 2")
def test_combine_with_nullary(self):
""" Test combining a nullary relation with another relation. """
null_rel = Relation([])
bin_rel = Relation(['one','two'])
self.assertIs(null_rel.combine_with(bin_rel, 100), bin_rel)
self.assertIs(bin_rel.combine_with(null_rel, 100), bin_rel)
def test_combine_with_0_tuple(self):
""" Test combining a non-empty relation with an empty relation. """
rel1 = Relation(['col1'])
rel2 = Relation(['col2'])
rel2.add([1])
rel2.add([2])
self.assertEquals(len(rel1), 0)
self.assertEquals(len(rel2), 2)
rel3 = rel1.combine_with(rel2, 100)
self.assertEquals(rel3.arity, 2)
self.assertEquals(rel3.names, ['col1', 'col2'])
self.assertEquals(len(rel3), 0)
rel4 = rel2.combine_with(rel1, 100)
self.assertEquals(rel4.arity, 2)
self.assertEquals(rel4.names, ['col2', 'col1'])
self.assertEquals(len(rel4), 0)
def test_combine_with(self):
""" Test combine_with under various settings for n. """
src = Relation(['col1'])
tgt = Relation(['col2'])
for i in xrange(4): src.add([i])
for i in xrange(6): tgt.add([i])
# n < |src|
result = src.combine_with(tgt, 3)
self.assertEquals(result, [[0,0], [1,1], [2,2]])
# n < |tgt|
result = src.combine_with(tgt, 5)
self.assertEquals(result, [[0,0], [1,1], [2,2], [3,3], [0,4]])
# n <= lcm(src,tgt)
result = src.combine_with(tgt, 12)
self.assertEquals(result, [[0,0], [1,1], [2,2], [3,3], [0,4],
[1,5], [2,0], [3,1], [0,2], [1,3],
[2,4], [3,5]])
# lcm(src,tgt) < n < |s| x |t|
result = src.combine_with(tgt, 14)
self.assertEquals(result, [[0,0], [1,1], [2,2], [3,3], [0,4],
[1,5], [2,0], [3,1], [0,2], [1,3],
[2,4], [3,5], [0,1], [1,2]])
# n == |s| x |t|
result = src.combine_with(tgt, 24)
self.assertEquals(result, [[0,0], [1,1], [2,2], [3,3], [0,4],
[1,5], [2,0], [3,1], [0,2], [1,3],
[2,4], [3,5], [0,1], [1,2], [2,3],
[3,4], [0,5], [1,0], [2,1], [3,2],
[0,3], [1,4], [2,5], [3,0]])
# n > |s| x |t|
self.assertEquals(src.combine_with(tgt, 24),
src.combine_with(tgt, 10000))
# n == 0
result = src.combine_with(tgt, 0)
self.assertEquals(result.arity, 2)
self.assertEquals(len(result), 0)
def test_combine_with_multiple_shifts(self):
""" Test case when cyclic shift has to occur more than once. """
src = Relation(['col1'])
tgt = Relation(['col2'])
for i in xrange(3): src.add([i])
for i in xrange(3): tgt.add([i])
result = src.combine_with(tgt, 9)
self.assertEquals(result, [[0,0], [1,1], [2,2],
[0,1], [1,2], [2,0],
[0,2], [1,0], [2,1]])
def test_first(self):
""" Test first() method. """
orig = Relation(['col1', 'col2'])
for i in xrange(10):
orig.add((i,i+1))
final = orig.first(5)
self.assertItemsEqual(final.names, ['col1','col2'])
self.assertEquals(final.arity, 2)
self.assertItemsEqual(final, [(0,1),(1,2),(2,3),(3,4),(4,5)])
#####################################################################
# Tests for Utility Functions
#####################################################################
class TestUtility(TestCase):
""" Unit tests for utility functions. """
def setUp(self):
pass
def test_lcm(self):
""" Test least common multiple function. """
self.assertEquals(lcm(3,4), 12)
self.assertEquals(lcm(0,1), 0)
self.assertEquals(lcm(1,0), 0)
self.assertEquals(lcm(2,4), 4)
self.assertEquals(lcm(4,6), 12)
|
gpl-2.0
| -7,042,184,542,609,720,000 | 37.409025 | 92 | 0.542729 | false |
heromod/migrid
|
mig/cgi-bin/showre.py
|
1
|
1100
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# showre - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.showre import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
gpl-2.0
| 4,900,057,541,474,287,000 | 30.428571 | 81 | 0.739091 | false |
theo-l/django
|
django/utils/deprecation.py
|
4
|
5058
|
import asyncio
import inspect
import warnings
from asgiref.sync import sync_to_async
class RemovedInDjango40Warning(DeprecationWarning):
pass
class RemovedInDjango41Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango40Warning
class warn_about_renamed_method:
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning, 2
)
return super().__instancecheck__(instance)
class MiddlewareMixin:
sync_capable = True
async_capable = True
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def __init__(self, get_response):
def __init__(self, get_response=None):
self._get_response_none_deprecation(get_response)
self.get_response = get_response
self._async_check()
super().__init__()
def _async_check(self):
"""
If get_response is a coroutine function, turns us into async mode so
a thread is not consumed during a whole request.
"""
if asyncio.iscoroutinefunction(self.get_response):
# Mark the class as async-capable, but do the actual switch
# inside __call__ to avoid swapping out dunder methods
self._is_coroutine = asyncio.coroutines._is_coroutine
def __call__(self, request):
# Exit out to async mode, if needed
if asyncio.iscoroutinefunction(self.get_response):
return self.__acall__(request)
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
async def __acall__(self, request):
"""
Async version of __call__ that is swapped in when an async request
is running.
"""
response = None
if hasattr(self, 'process_request'):
response = await sync_to_async(self.process_request)(request)
response = response or await self.get_response(request)
if hasattr(self, 'process_response'):
response = await sync_to_async(self.process_response)(request, response)
return response
def _get_response_none_deprecation(self, get_response):
if get_response is None:
warnings.warn(
'Passing None for the middleware get_response argument is '
'deprecated.',
RemovedInDjango40Warning, stacklevel=3,
)
|
bsd-3-clause
| -4,542,584,378,597,957,000 | 34.87234 | 90 | 0.602017 | false |
Yukarumya/Yukarum-Redfoxes
|
testing/mozbase/mozprocess/tests/test_mozprocess.py
|
1
|
8479
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import sys
import proctest
import mozunit
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
def make_proclaunch(aDir):
"""
Makes the proclaunch executable.
Params:
aDir - the directory in which to issue the make commands
Returns:
the path to the proclaunch executable that is generated
"""
if sys.platform == "win32":
exepath = os.path.join(aDir, "proclaunch.exe")
else:
exepath = os.path.join(aDir, "proclaunch")
# remove the launcher, if it already exists
# otherwise, if the make fails you may not notice
if os.path.exists(exepath):
os.remove(exepath)
# Ideally make should take care of both calls through recursion, but since it doesn't,
# on windows anyway (to file?), let's just call out both targets explicitly.
for command in [["make", "-C", "iniparser"],
["make"]]:
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=aDir)
stdout, stderr = process.communicate()
if process.returncode:
# SomethingBadHappen; print all the things
print "%s: exit %d" % (command, process.returncode)
print "stdout:\n%s" % stdout
print "stderr:\n%s" % stderr
raise subprocess.CalledProcessError(process.returncode, command, stdout)
# ensure the launcher now exists
if not os.path.exists(exepath):
raise AssertionError("proclaunch executable '%s' "
"does not exist (sys.platform=%s)" % (exepath, sys.platform))
return exepath
class ProcTest(proctest.ProcTest):
# whether to remove created files on exit
cleanup = os.environ.get('CLEANUP', 'true').lower() in ('1', 'true')
@classmethod
def setUpClass(cls):
cls.proclaunch = make_proclaunch(here)
@classmethod
def tearDownClass(cls):
del cls.proclaunch
if not cls.cleanup:
return
files = [('proclaunch',),
('proclaunch.exe',),
('iniparser', 'dictionary.o'),
('iniparser', 'iniparser.lib'),
('iniparser', 'iniparser.o'),
('iniparser', 'libiniparser.a'),
('iniparser', 'libiniparser.so.0'),
]
files = [os.path.join(here, *path) for path in files]
errors = []
for path in files:
if os.path.exists(path):
try:
os.remove(path)
except OSError as e:
errors.append(str(e))
if errors:
raise OSError("Error(s) encountered tearing down "
"%s.%s:\n%s" % (cls.__module__, cls.__name__, '\n'.join(errors)))
def test_process_normal_finish(self):
"""Process is started, runs to completion while we wait for it"""
p = processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
cwd=here)
p.run()
p.wait()
self.determine_status(p)
def test_commandline_no_args(self):
"""Command line is reported correctly when no arguments are specified"""
p = processhandler.ProcessHandler(self.proclaunch, cwd=here)
self.assertEqual(p.commandline, self.proclaunch)
def test_commandline_overspecified(self):
"""Command line raises an exception when the arguments are specified ambiguously"""
err = None
try:
processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
args=["1", "2", "3"],
cwd=here)
except TypeError, e:
err = e
self.assertTrue(err)
def test_commandline_from_list(self):
"""Command line is reported correctly when command and arguments are specified in a list"""
p = processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
cwd=here)
self.assertEqual(p.commandline, self.proclaunch + ' process_normal_finish.ini')
def test_commandline_over_specified(self):
"""Command line raises an exception when the arguments are specified ambiguously"""
err = None
try:
processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
args=["1", "2", "3"],
cwd=here)
except TypeError, e:
err = e
self.assertTrue(err)
def test_commandline_from_args(self):
"""Command line is reported correctly when arguments are specified in a dedicated list"""
p = processhandler.ProcessHandler(self.proclaunch,
args=["1", "2", "3"],
cwd=here)
self.assertEqual(p.commandline, self.proclaunch + ' 1 2 3')
def test_process_wait(self):
"""Process is started runs to completion while we wait indefinitely"""
p = processhandler.ProcessHandler([self.proclaunch,
"process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.wait()
self.determine_status(p)
def test_process_timeout(self):
""" Process is started, runs but we time out waiting on it
to complete
"""
p = processhandler.ProcessHandler([self.proclaunch, "process_waittimeout.ini"],
cwd=here)
p.run(timeout=10)
p.wait()
self.determine_status(p, False, ['returncode', 'didtimeout'])
def test_process_timeout_no_kill(self):
""" Process is started, runs but we time out waiting on it
to complete. Process should not be killed.
"""
p = None
def timeout_handler():
self.assertEqual(p.proc.poll(), None)
p.kill()
p = processhandler.ProcessHandler([self.proclaunch, "process_waittimeout.ini"],
cwd=here,
onTimeout=(timeout_handler,),
kill_on_timeout=False)
p.run(timeout=1)
p.wait()
self.assertTrue(p.didTimeout)
self.determine_status(p, False, ['returncode', 'didtimeout'])
def test_process_waittimeout(self):
"""
Process is started, then wait is called and times out.
Process is still running and didn't timeout
"""
p = processhandler.ProcessHandler([self.proclaunch,
"process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.wait(timeout=5)
self.determine_status(p, True, ())
def test_process_waitnotimeout(self):
""" Process is started, runs to completion before our wait times out
"""
p = processhandler.ProcessHandler([self.proclaunch,
"process_waittimeout_10s.ini"],
cwd=here)
p.run(timeout=30)
p.wait()
self.determine_status(p)
def test_process_kill(self):
"""Process is started, we kill it"""
p = processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
cwd=here)
p.run()
p.kill()
self.determine_status(p)
def test_process_output_twice(self):
"""
Process is started, then processOutput is called a second time explicitly
"""
p = processhandler.ProcessHandler([self.proclaunch,
"process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.processOutput(timeout=5)
p.wait()
self.determine_status(p, False, ())
if __name__ == '__main__':
mozunit.main()
|
mpl-2.0
| 6,777,982,416,933,544,000 | 34.776371 | 99 | 0.543342 | false |
prozum/mastermixer
|
web/web.py
|
1
|
3740
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2014 by Armin Ronacher.
:copyright: (c) 2014 by Niclas Moeslund Overby.
:license: BSD, see LICENSE for more details.
"""
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
app = Flask(__name__)
app.config.update(dict(
DATABASE='/tmp/flaskr.db',
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Creates the database tables."""
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/', methods=['GET', 'POST'])
def root():
return render_template('menu.html')
@app.route('/order', methods=['GET', 'POST'])
def order():
db = get_db()
cur = db.execute('select Id, Title, Text from Products order by Id asc')
products = cur.fetchall()
return render_template('order.html', products=products)
@app.route('/add_order', methods=['GET', 'POST'])
def add_order():
#print(int(request.form["order"]))
db = get_db()
db.execute('insert into Orders (Client,P_Id) values (?, ?)',
[request.form['client'], int(request.form["order"])])
db.commit()
flash('Din bestilling var succesfuld!')
return redirect(url_for('queue'))
@app.route('/product', methods=['GET', 'POST'])
def product():
return render_template('product.html')
@app.route('/add_product', methods=['GET', 'POST'])
def add_product():
db = get_db()
db.execute('insert into Products (Title, Text , Stat1, Stat2, Stat3) values (?, ?, ?, ?, ?)',
[request.form['title'], request.form['text'],request.form['stat1'],request.form['stat2'],request.form['stat3']])
db.commit()
flash(u'Produktet blev tilføjet')
return redirect(url_for('root'))
@app.route('/queue', methods=['GET', 'POST'])
def queue():
db = get_db()
cur = db.execute('select Id, P_Id, Client from Orders order by Id asc')
orders = cur.fetchall()
cur = db.execute('select Id, Title from Products order by Id asc')
products = cur.fetchall()
return render_template('queue.html', orders=orders, products=products)
@app.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_orders'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
init_db()
app.run()
|
mit
| -8,129,914,217,948,837,000 | 28.210938 | 129 | 0.615138 | false |
swannapa/erpnext
|
erpnext/accounts/doctype/subscription/subscription.py
|
1
|
10400
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import calendar
from frappe import _
from frappe.desk.form import assign_to
from frappe.utils.jinja import validate_template
from dateutil.relativedelta import relativedelta
from frappe.utils.user import get_system_managers
from frappe.utils import cstr, getdate, split_emails, add_days, today, get_last_day, get_first_day
from frappe.model.document import Document
month_map = {'Monthly': 1, 'Quarterly': 3, 'Half-yearly': 6, 'Yearly': 12}
class Subscription(Document):
def validate(self):
self.update_status()
self.validate_dates()
self.validate_next_schedule_date()
self.validate_email_id()
validate_template(self.subject or "")
validate_template(self.message or "")
def before_submit(self):
self.set_next_schedule_date()
def on_submit(self):
# self.update_subscription_id()
self.update_subscription_data()
def on_update_after_submit(self):
self.update_subscription_data()
self.validate_dates()
self.set_next_schedule_date()
def before_cancel(self):
self.unlink_subscription_id()
def unlink_subscription_id(self):
doc = frappe.get_doc(self.reference_doctype, self.reference_document)
if doc.meta.get_field('subscription'):
doc.subscription = None
doc.db_update()
def validate_dates(self):
if self.end_date and getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End date must be greater than start date"))
def validate_next_schedule_date(self):
if self.repeat_on_day and self.next_schedule_date:
next_date = getdate(self.next_schedule_date)
if next_date.day != self.repeat_on_day:
# if the repeat day is the last day of the month (31)
# and the current month does not have as many days,
# then the last day of the current month is a valid date
lastday = calendar.monthrange(next_date.year, next_date.month)[1]
if self.repeat_on_day < lastday:
# the specified day of the month is not same as the day specified
# or the last day of the month
frappe.throw(_("Next Date's day and Repeat on Day of Month must be equal"))
def validate_email_id(self):
if self.notify_by_email:
if self.recipients:
email_list = split_emails(self.recipients.replace("\n", ""))
from frappe.utils import validate_email_add
for email in email_list:
if not validate_email_add(email):
frappe.throw(_("{0} is an invalid email address in 'Recipients'").format(email))
else:
frappe.throw(_("'Recipients' not specified"))
def set_next_schedule_date(self):
self.next_schedule_date = get_next_schedule_date(self.start_date,
self.frequency, self.repeat_on_day)
def update_subscription_data(self):
update_doc = False
doc = frappe.get_doc(self.reference_doctype, self.reference_document)
if frappe.get_meta(self.reference_doctype).get_field("from_date"):
doc.from_date = self.from_date
doc.to_date = self.to_date
update_doc = True
if not doc.subscription:
doc.subscription = self.name
update_doc = True
if update_doc:
doc.db_update()
def update_subscription_id(self):
doc = frappe.get_doc(self.reference_doctype, self.reference_document)
if not doc.meta.get_field('subscription'):
frappe.throw(_("Add custom field Subscription Id in the doctype {0}").format(self.reference_doctype))
doc.db_set('subscription', self.name)
def update_status(self, status=None):
self.status = {
'0': 'Draft',
'1': 'Submitted',
'2': 'Cancelled'
}[cstr(self.docstatus or 0)]
if status and status != 'Resumed':
self.status = status
def get_next_schedule_date(start_date, frequency, repeat_on_day):
mcount = month_map.get(frequency)
if mcount:
next_date = get_next_date(start_date, mcount, repeat_on_day)
else:
days = 7 if frequency == 'Weekly' else 1
next_date = add_days(start_date, days)
return next_date
def make_subscription_entry(date=None):
date = date or today()
for data in get_subscription_entries(date):
schedule_date = getdate(data.next_schedule_date)
while schedule_date <= getdate(today()):
create_documents(data, schedule_date)
schedule_date = get_next_schedule_date(schedule_date,
data.frequency, data.repeat_on_day)
if schedule_date and not frappe.db.get_value('Subscription', data.name, 'disabled'):
frappe.db.set_value('Subscription', data.name, 'next_schedule_date', schedule_date)
def get_subscription_entries(date):
return frappe.db.sql(""" select * from `tabSubscription`
where docstatus = 1 and next_schedule_date <=%s
and reference_document is not null and reference_document != ''
and next_schedule_date <= ifnull(end_date, '2199-12-31')
and ifnull(disabled, 0) = 0 and status != 'Stopped' """, (date), as_dict=1)
def create_documents(data, schedule_date):
try:
doc = make_new_document(data, schedule_date)
if getattr(doc, "from_date", None):
update_subscription_period(data, doc)
if data.notify_by_email and data.recipients:
print_format = data.print_format or "Standard"
send_notification(doc, data, print_format=print_format)
frappe.db.commit()
except Exception:
frappe.db.rollback()
frappe.db.begin()
frappe.log_error(frappe.get_traceback())
disable_subscription(data)
frappe.db.commit()
if data.reference_document and not frappe.flags.in_test:
notify_error_to_user(data)
def update_subscription_period(data, doc):
from_date = doc.from_date
to_date = doc.to_date
frappe.db.set_value('Subscription', data.name, 'from_date', from_date)
frappe.db.set_value('Subscription', data.name, 'to_date', to_date)
def disable_subscription(data):
subscription = frappe.get_doc('Subscription', data.name)
subscription.db_set('disabled', 1)
def notify_error_to_user(data):
party = ''
party_type = ''
if data.reference_doctype in ['Sales Order', 'Sales Invoice', 'Delivery Note']:
party_type = 'customer'
elif data.reference_doctype in ['Purchase Order', 'Purchase Invoice', 'Purchase Receipt']:
party_type = 'supplier'
if party_type:
party = frappe.db.get_value(data.reference_doctype, data.reference_document, party_type)
notify_errors(data.reference_document, data.reference_doctype, party, data.owner, data.name)
def make_new_document(args, schedule_date):
doc = frappe.get_doc(args.reference_doctype, args.reference_document)
new_doc = frappe.copy_doc(doc, ignore_no_copy=False)
update_doc(new_doc, doc , args, schedule_date)
new_doc.insert(ignore_permissions=True)
if args.submit_on_creation:
new_doc.submit()
return new_doc
def update_doc(new_document, reference_doc, args, schedule_date):
new_document.docstatus = 0
if new_document.meta.get_field('set_posting_time'):
new_document.set('set_posting_time', 1)
mcount = month_map.get(args.frequency)
if new_document.meta.get_field('subscription'):
new_document.set('subscription', args.name)
if args.from_date and args.to_date:
from_date = get_next_date(args.from_date, mcount)
if (cstr(get_first_day(args.from_date)) == cstr(args.from_date)) and \
(cstr(get_last_day(args.to_date)) == cstr(args.to_date)):
to_date = get_last_day(get_next_date(args.to_date, mcount))
else:
to_date = get_next_date(args.to_date, mcount)
if new_document.meta.get_field('from_date'):
new_document.set('from_date', from_date)
new_document.set('to_date', to_date)
new_document.run_method("on_recurring", reference_doc=reference_doc, subscription_doc=args)
for data in new_document.meta.fields:
if data.fieldtype == 'Date' and data.reqd:
new_document.set(data.fieldname, schedule_date)
def get_next_date(dt, mcount, day=None):
dt = getdate(dt)
dt += relativedelta(months=mcount, day=day)
return dt
def send_notification(new_rv, subscription_doc, print_format='Standard'):
"""Notify concerned persons about recurring document generation"""
print_format = print_format
if not subscription_doc.subject:
subject = _("New {0}: #{1}").format(new_rv.doctype, new_rv.name)
elif "{" in subscription_doc.subject:
subject = frappe.render_template(subscription_doc.subject, {'doc': new_rv})
if not subscription_doc.message:
message = _("Please find attached {0} #{1}").format(new_rv.doctype, new_rv.name)
elif "{" in subscription_doc.message:
message = frappe.render_template(subscription_doc.message, {'doc': new_rv})
attachments = [frappe.attach_print(new_rv.doctype, new_rv.name,
file_name=new_rv.name, print_format=print_format)]
frappe.sendmail(subscription_doc.recipients,
subject=subject, message=message, attachments=attachments)
def notify_errors(doc, doctype, party, owner, name):
recipients = get_system_managers(only_name=True)
frappe.sendmail(recipients + [frappe.db.get_value("User", owner, "email")],
subject=_("[Urgent] Error while creating recurring %s for %s" % (doctype, doc)),
message = frappe.get_template("templates/emails/recurring_document_failed.html").render({
"type": _(doctype),
"name": doc,
"party": party or "",
"subscription": name
}))
assign_task_to_owner(name, "Recurring Documents Failed", recipients)
def assign_task_to_owner(name, msg, users):
for d in users:
args = {
'doctype' : 'Subscription',
'assign_to' : d,
'name' : name,
'description' : msg,
'priority' : 'High'
}
assign_to.add(args)
@frappe.whitelist()
def make_subscription(doctype, docname):
doc = frappe.new_doc('Subscription')
doc.reference_doctype = doctype
doc.reference_document = docname
return doc
@frappe.whitelist()
def stop_resume_subscription(subscription, status):
doc = frappe.get_doc('Subscription', subscription)
frappe.msgprint(_("Subscription has been {0}").format(status))
if status == 'Resumed':
doc.next_schedule_date = get_next_schedule_date(today(),
doc.frequency, doc.repeat_on_day)
doc.update_status(status)
doc.save()
return doc.status
def subscription_doctype_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select parent from `tabDocField`
where fieldname = 'subscription'
and parent like %(txt)s
order by
if(locate(%(_txt)s, parent), locate(%(_txt)s, parent), 99999),
parent
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
|
gpl-3.0
| -6,931,336,271,616,514,000 | 32.551613 | 104 | 0.710288 | false |
googleapis/googleapis-gen
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/currency_constant_service/transports/grpc.py
|
1
|
10221
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import currency_constant
from google.ads.googleads.v6.services.types import currency_constant_service
from .base import CurrencyConstantServiceTransport, DEFAULT_CLIENT_INFO
class CurrencyConstantServiceGrpcTransport(CurrencyConstantServiceTransport):
"""gRPC backend transport for CurrencyConstantService.
Service to fetch currency constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_currency_constant(self) -> Callable[
[currency_constant_service.GetCurrencyConstantRequest],
currency_constant.CurrencyConstant]:
r"""Return a callable for the get currency constant method over gRPC.
Returns the requested currency constant.
Returns:
Callable[[~.GetCurrencyConstantRequest],
~.CurrencyConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_currency_constant' not in self._stubs:
self._stubs['get_currency_constant'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.CurrencyConstantService/GetCurrencyConstant',
request_serializer=currency_constant_service.GetCurrencyConstantRequest.serialize,
response_deserializer=currency_constant.CurrencyConstant.deserialize,
)
return self._stubs['get_currency_constant']
__all__ = (
'CurrencyConstantServiceGrpcTransport',
)
|
apache-2.0
| -9,060,951,187,651,923,000 | 43.633188 | 112 | 0.611878 | false |
JuhaniImberg/cses.py
|
cses/commands/actions.py
|
1
|
6758
|
import click
import sys
import time
import os
from cses.cli import cli
from cses.api import API
from cses.db import DB
from cses.commands.tasks import pass_course
from cses.commands.tasks import pass_task
from cses.tasks import detect_type, languages
from cses.ui import clr, color_prompt
@cli.command()
@click.pass_context
@pass_course
@pass_task
@click.argument("filename",
type=click.Path(exists=True,
dir_okay=False,
resolve_path=True))
def associate(ctx, course, task, filename):
"Associates a file with the current task"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
if not db.files:
db.files = {}
if course not in db.files:
db.files[course] = {}
db.files[course][task] = filename
@cli.command()
@click.pass_context
@pass_course
@pass_task
def submit(ctx, course, task):
"Submits the current tasks code to the server"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
if not db.files or course not in db.files or task not in db.files[course]:
ctx.fail(clr("No file associated with that task"))
file = db.files[course][task]
lang = detect_type(file)
if lang == None:
ctx.fail(clr("Cound not detect the language"))
lang = lang.name
code = ""
with open(file, "r") as fp:
code = fp.read()
sendreq = api.send(db.username, db.password, task, course, lang, code)
if sendreq["result"] == "error":
ctx.fail(sendreq["message"])
ticket = sendreq["ticket"]
click.echo(clr("Submission ID: {}, waiting for results...").format(ticket))
old_status = ""
lineclear = "\033[2K\033[80D"
while True:
resreq = api.result(db.username, db.password, ticket)
if resreq["result"] == "error":
sys.stdout.write(lineclear)
sys.stdout.flush()
ctx.fail(sendreq["message"])
status = resreq["status"]
if status == "COMPILE ERROR":
sys.stdout.write("\n")
ctx.fail("\033[31mCompile merror\033[0m")
elif status == "READY":
click.echo("{}{}Test results".format(lineclear, color_prompt))
for ind, test in enumerate(resreq["test"]):
click.echo("#{}: {}".format(ind+1, test["status"]))
click.echo("Score: {}".format(resreq["score"]))
break
if old_status != status:
sys.stdout.write("{}{}Status: {}".format(lineclear, color_prompt,
status))
sys.stdout.flush()
old_status = status
time.sleep(1)
@cli.command()
@click.pass_context
@pass_course
@pass_task
def show(ctx, course, task):
"Shows the tasks description in a browser"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
courses = api.courses()
name = ""
for i in courses:
if i["id"] == course:
name = i["nick"]
break
else:
ctx.fail(clr("Could not field the course"))
click.launch("http://cses.fi/{}/task/{}/".format(name, task))
@cli.command()
@click.pass_context
@pass_course
@pass_task
def edit(ctx, course, task):
"Edits the current tasks file"
db = ctx.ensure_object(DB)
if not db.files or course not in db.files or task not in db.files[course]:
ctx.fail(clr("No file associated with that task"))
file = db.files[course][task]
db.close()
click.edit(filename=file)
@cli.command()
@click.pass_context
@pass_course
@pass_task
@click.option("--keep-going", "-k", is_flag=True)
@click.option("--full", is_flag=True)
@click.option("--diff", "-d", is_flag=True)
def test(ctx, course, task, keep_going, full, diff):
"Test the task locally"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
if not db.files or course not in db.files or task not in db.files[course]:
ctx.fail(clr("No file associated with that task"))
fname = db.files[course][task]
type = detect_type(fname)
if type == None:
ctx.fail(clr("Could not detect the type"))
type.test(fname, api.tests(db.username, db.password, task, course),
keep_going, full, diff)
@cli.command()
@click.pass_context
@pass_course
@pass_task
def run(ctx, course, task):
"Run the file"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
if not db.files or course not in db.files or task not in db.files[course]:
ctx.fail(clr("No file associated with that task"))
fname = db.files[course][task]
type = detect_type(fname)
if type == None:
ctx.fail(clr("Could not detect the type"))
type.user_run(fname)
@cli.command()
@click.pass_context
@pass_course
@pass_task
def create(ctx, course, task):
"Creates a file for the current task"
db = ctx.ensure_object(DB)
api = ctx.ensure_object(API)
if task in db.files[course]:
if not click.confirm(clr("There is already a file, change the file")):
return
fname = ""
fcontent = ""
click.echo(clr("Available languages:"))
click.echo(", ".join([x.name for x in languages]))
while True:
sel = click.prompt(clr("The language"), default="C++")
sel = sel.lower()
for lang in languages:
if lang.name.lower().startswith(sel):
fname = lang.file_extensions[0]
fcontent = lang.template
break
else:
if not click.confirm(clr("Can't understand you, try again"),
default=True):
ctx.fail(clr("Could not select language"))
else:
continue
break
tasks = api.tasks(course, db.username, db.password)
for i in tasks:
if i["id"] == task:
fcontent = fcontent.format(i["name"])
fname = i["name"].replace(" ", "_") + "." + fname
break
else:
ctx.fail(clr("Could not find the task"))
if course not in db.paths:
ctx.fail(clr("The course doesn't have a default path"))
path = os.path.join(db.paths[course], fname)
try:
os.makedirs(os.path.split(path)[0])
except OSError as e:
if e.errno != 17:
raise
pass
if os.path.isfile(path):
if click.confirm(clr(("There is already a file"
"with the name {} "
"associate it instead?")).format(path),
default=True):
return ctx.invoke(associate, filename=path)
with open(path, "w+") as fp:
fp.write(fcontent)
click.echo(clr("Wrote {}".format(path)))
ctx.invoke(associate, filename=path)
|
mit
| -2,871,418,818,631,801,300 | 28.382609 | 79 | 0.583309 | false |
festicket/django-notification
|
notification/migrations/0002_version_2.py
|
1
|
9845
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Renaming field 'Notice.user'
db.rename_column('notification_notice', 'user_id', 'recipient_id')
# Adding field 'Notice.sender'
db.add_column('notification_notice', 'sender',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='sent_notices', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Renaming field 'Notice.user'
db.rename_column('notification_notice', 'recipient_id', 'user_id')
# Deleting field 'Notice.sender'
db.delete_column('notification_notice', 'sender_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'relationships': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_to'", 'symmetrical': 'False', 'through': "orm['relationships.Relationship']", 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notification.notice': {
'Meta': {'ordering': "['-added']", 'object_name': 'Notice'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'on_site': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recieved_notices'", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_notices'", 'null': 'True', 'to': "orm['auth.User']"}),
'unseen': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'notification.noticequeuebatch': {
'Meta': {'object_name': 'NoticeQueueBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickled_data': ('django.db.models.fields.TextField', [], {})
},
'notification.noticesetting': {
'Meta': {'unique_together': "(('user', 'notice_type', 'medium'),)", 'object_name': 'NoticeSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'notification.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'notification.observeditem': {
'Meta': {'ordering': "['-added']", 'object_name': 'ObservedItem'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'signal': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'relationships.relationship': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('from_user', 'to_user', 'status', 'site'),)", 'object_name': 'Relationship'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'related_name': "'relationships'", 'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['relationships.RelationshipStatus']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"}),
'weight': ('django.db.models.fields.FloatField', [], {'default': '1.0', 'null': 'True', 'blank': 'True'})
},
'relationships.relationshipstatus': {
'Meta': {'ordering': "('name',)", 'object_name': 'RelationshipStatus'},
'from_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symmetrical_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'to_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['notification']
|
mit
| -3,072,889,652,861,836,300 | 70.347826 | 217 | 0.555104 | false |
tmshv/k2
|
Tools/qgis_export_street_features.py
|
1
|
1910
|
# -*- coding: utf-8 -*-
import re
import os
import json
out_filename = os.path.expanduser('~/osm_features.geojson')
layer = iface.activeLayer()
# features = layer.getFeatures()
features = layer.selectedFeatures()
properties = [('OSM_ID', int), 'NAME', 'REF', 'HIGHWAY', 'ONEWAY', 'BRIGDE', 'TUNNEL', 'MAXSPEED', 'LANES', 'WIDTH', 'SURFACE']
properties = []
def listify(i):
try:
iterator = list(i)
out = []
for item in iterator:
out.append(listify(item))
return out
except:
p = transform(i)
return list(p)
def get_field(feature, key, cast=None):
try:
value = feature[key]
value = value if cast is None else cast(value)
value = value if type(value) is unicode else unicode(value)
value = None if value == u'NULL' else value
return value
except KeyError:
return None
def transform(point):
crs4326 = QgsCoordinateReferenceSystem(4326)
crs3857 = QgsCoordinateReferenceSystem(3857)
t = QgsCoordinateTransform(crs3857, crs4326)
p = QgsPoint(point[0], point[1])
return t.transform(p)
def geometry(feature, type):
m = 'as{0}'.format(type)
g = getattr(feature.geometry(), m);
return {
'type': type,
'coordinates': listify(g())
}
def to_feature(feature):
props = {}
for i in properties:
if type(i) is str:
props[i] = get_field(feature, i)
elif type(i) is tuple:
props[i[0]] = get_field(feature, i[0], i[1])
return {
'type': 'Feature',
'properties': props,
'geometry': geometry(feature, 'Polyline')
}
geojson = {
'type': 'FeatureCollection',
'features': list(map(to_feature, features))
}
with open(out_filename, 'w') as out:
j = json.dumps(geojson, indent=4, ensure_ascii=False)
out.write(j.encode('utf-8'))
out.close()
|
mit
| 8,231,745,914,096,852,000 | 22.8875 | 127 | 0.592147 | false |
epam/DLab
|
infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
|
1
|
3644
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
import argparse
import sys
def stop_notebook(resource_group_name, notebook_name):
print("Stopping data engine cluster")
cluster_list = []
try:
for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
try:
if notebook_name == vm.tags['notebook_name']:
if 'master' == vm.tags["Type"]:
cluster_list.append(vm.tags["Name"])
AzureActions().stop_instance(resource_group_name, vm.name)
print("Instance {} has been stopped".format(vm.name))
except:
pass
except:
sys.exit(1)
print("Stopping notebook")
try:
for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
if notebook_name == vm.tags["Name"]:
AzureActions().stop_instance(resource_group_name, vm.name)
print("Instance {} has been stopped".format(vm.name))
except:
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
logging.info('[STOP NOTEBOOK]')
print('[STOP NOTEBOOK]')
try:
stop_notebook(notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
append_result("Failed to stop notebook.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
apache-2.0
| 2,829,598,500,970,698,000 | 35.808081 | 105 | 0.591109 | false |
NeCTAR-RC/nova
|
nova/availability_zones.py
|
1
|
9190
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
import collections
import nova.conf
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova import cache_utils
from nova.cells import opts as cell_opts
from nova import db
from nova import objects
from nova import utils
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
CONF = nova.conf.CONF
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
def _get_cache():
global MC
if MC is None:
MC = cache_utils.get_client(expiration_time=AZ_CACHE_SECONDS)
return MC
def reset_cache():
"""Reset the cache, mainly for testing purposes and update
availability_zone for host aggregate
"""
global MC
MC = None
def _make_cache_key(host, cell_name=None):
return "azcache-%s-%s" % (cell_name or 'none', host.encode('utf-8'))
def _build_metadata_by_host(aggregates, hosts=None):
if hosts and not isinstance(hosts, set):
hosts = set(hosts)
metadata = collections.defaultdict(set)
for aggregate in aggregates:
for host in aggregate.hosts:
if hosts and host not in hosts:
continue
metadata[host].add(list(aggregate.metadata.values())[0])
return metadata
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service) for service in services]
hosts = set([service['host'] for service in services])
aggregates = objects.AggregateList.get_by_metadata_key(context,
'availability_zone', hosts=hosts)
metadata = _build_metadata_by_host(aggregates, hosts=hosts)
# gather all of the availability zones associated with a service host
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
# update the cache
update_host_availability_zone_cache(context,
service['host'], az)
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host):
aggregates = objects.AggregateList.get_by_host(context, host,
key='availability_zone')
if aggregates:
az = aggregates[0].metadata['availability_zone']
else:
az = CONF.default_availability_zone
return az
def update_host_availability_zone_cache(context, host, availability_zone=None):
if not availability_zone:
availability_zone = get_host_availability_zone(context, host)
cache = _get_cache()
cache_key = _make_cache_key(host)
cache.delete(cache_key)
cache.set(cache_key, availability_zone)
def get_availability_zones(context, get_only_available=False,
with_hosts=False):
"""Return available and unavailable zones on demand.
:param get_only_available: flag to determine whether to return
available zones only, default False indicates return both
available zones and not available zones, True indicates return
available zones only
:param with_hosts: whether to return hosts part of the AZs
:type with_hosts: bool
"""
# Override for cells
cell_type = cell_opts.get_cell_type()
if cell_type == 'api':
ctxt = context.elevated()
global_azs = []
mute_azs = []
secs = CONF.cells.mute_child_interval
for cell in db.cell_get_all(ctxt):
last_seen = cell.updated_at
capabilities = jsonutils.loads(cell.capabilities)
if 'availability_zones' not in capabilities:
continue
if last_seen and timeutils.is_older_than(last_seen, secs):
mute_azs.extend(capabilities['availability_zones'])
else:
global_azs.extend(capabilities['availability_zones'])
available_zones = list(set(global_azs))
unavailable_zones = list(set(mute_azs))
if get_only_available:
return available_zones
return (available_zones, unavailable_zones)
enabled_services = objects.ServiceList.get_all(context, disabled=False,
set_zones=False)
enabled_services = set_availability_zones(context, enabled_services)
available_zones = []
for (zone, host) in [(service['availability_zone'], service['host'])
for service in enabled_services]:
if not with_hosts and zone not in available_zones:
available_zones.append(zone)
elif with_hosts:
_available_zones = dict(available_zones)
zone_hosts = _available_zones.setdefault(zone, set())
zone_hosts.add(host)
# .items() returns a view in Py3, casting it to list for Py2 compat
available_zones = list(_available_zones.items())
if not get_only_available:
disabled_services = objects.ServiceList.get_all(context, disabled=True,
set_zones=True)
not_available_zones = []
azs = available_zones if not with_hosts else dict(available_zones)
zones = [(service['availability_zone'], service['host'])
for service in disabled_services
if service['availability_zone'] not in azs]
for (zone, host) in zones:
if not with_hosts and zone not in not_available_zones:
not_available_zones.append(zone)
elif with_hosts:
_not_available_zones = dict(not_available_zones)
zone_hosts = _not_available_zones.setdefault(zone, set())
zone_hosts.add(host)
# .items() returns a view in Py3, casting it to list for Py2
# compat
not_available_zones = list(_not_available_zones.items())
return (available_zones, not_available_zones)
else:
return available_zones
def get_instance_availability_zone(context, instance):
"""Return availability zone of specified instance."""
host = instance.get('host')
cell_type = cell_opts.get_cell_type()
if not host:
# Likely hasn't reached a viable compute node yet so give back the
# desired availability_zone in the instance record if the boot request
# specified one.
az = None
if cell_type == 'api':
sys_metadata = utils.instance_sys_meta(instance)
az = sys_metadata.get('availability_zone')
if not az:
az = instance.get('availability_zone')
return az
cache_string = host
if cell_type == 'api':
cell_name = str(instance.get('cell_name'))
if not cell_name:
return None
cache_string += cell_name
cache_key = _make_cache_key(cache_string)
cache = _get_cache()
az = cache.get(cache_key)
az_inst = instance.get('availability_zone')
if az_inst is not None and az != az_inst:
# NOTE(sbauza): Cache is wrong, we need to invalidate it by fetching
# again the right AZ related to the aggregate the host belongs to.
# As the API is also calling this method for setting the instance
# AZ field, we don't need to update the instance.az field.
# This case can happen because the cache is populated before the
# instance has been assigned to the host so that it would keep the
# former reference which was incorrect. Instead of just taking the
# instance AZ information for refilling the cache, we prefer to
# invalidate the cache and fetch it again because there could be some
# corner cases where this method could be called before the instance
# has been assigned to the host also.
az = None
if not az:
elevated = context.elevated()
if cell_type == 'api':
sys_metadata = utils.instance_sys_meta(instance)
az = sys_metadata.get('availability_zone')
if not az:
return None
else:
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az)
return az
|
apache-2.0
| 473,693,852,520,149,500 | 37.613445 | 79 | 0.626224 | false |
ahmadassaf/zulip
|
zerver/views/messages.py
|
1
|
43747
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Any, AnyStr, Callable, Iterable, Optional, Tuple, Union
from zerver.lib.str_utils import force_bytes, force_text
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body
from zerver.lib.cache import generic_bulk_cached_fetch
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Realm, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, \
parse_usermessage_flags, to_dict_cache_key_id, extract_message_dict, \
stringify_message_dict, \
resolve_email_to_domain, get_realm, get_active_streams, \
bulk_get_streams
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, Select, ColumnElement
import re
import ujson
import datetime
from six.moves import map
import six
class BadNarrowOperator(JsonableError):
def __init__(self, desc, status_code=400):
# type: (str, int) -> None
self.desc = desc
self.status_code = status_code
def to_json_error_msg(self):
# type: () -> str
return _('Invalid narrow operator: {}').format(self.desc)
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
# type: (UserProfile, str) -> None
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# type: (Query, Dict[str, Any]) -> Query
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
# type: (text_type) -> text_type
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
if m:
base_topic = m.group(1)
else:
base_topic = operand
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
regex = r'^(|personal|\(instance ""\))(\.d)*$'
else:
regex = r'^%s(\.d)*$' % (self._pg_re_escape(base_topic),)
cond = column("subject").op("~*")(regex)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
sender = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
return query
def by_id(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if ',' in operand:
# Huddle
try:
emails = [e.strip() for e in operand.split(',')]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
match_positions_byte = func.pgroonga.match_positions_byte
query_extract_keywords = func.pgroonga.query_extract_keywords
keywords = query_extract_keywords(operand)
query = query.column(match_positions_byte(column("rendered_content"),
keywords).label("content_matches"))
query = query.column(match_positions_byte(column("subject"),
keywords).label("subject_matches"))
condition = column("search_pgroonga").op("@@")(operand)
return query.where(maybe_negate(condition))
def _by_search_tsearch(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
# Apparently, the offsets we get from tsearch_extras are counted in
# unicode characters, not in bytes, so we do our processing with text,
# not bytes.
def highlight_string_text_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_text(text)
highlight_start = u'<span class="highlight">'
highlight_stop = u'</span>'
pos = 0
result = u''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result
def highlight_string_bytes_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_bytes(text)
highlight_start = b'<span class="highlight">'
highlight_stop = b'</span>'
pos = 0
result = b''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return force_text(result)
def highlight_string(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
if settings.USING_PGROONGA:
return highlight_string_bytes_offsets(text, locs)
else:
return highlight_string_text_offsets(text, locs)
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
# type: (text_type, text_type, Iterable[Tuple[int, int]], Iterable[Tuple[int, int]]) -> Dict[str, text_type]
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# type: (str) -> List[Dict[str, Any]]
# FIXME: A hack to support old mobile clients
if json == '{}':
return None
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
def convert_term(elem):
# type: (Union[Dict, List]) -> Dict[str, Any]
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2
or any(not isinstance(x, str) and not isinstance(x, six.text_type)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def is_public_stream(stream_name, realm):
# type: (text_type, Realm) -> bool
"""
Determine whether a stream is public, so that
our caller can decide whether we can get
historical messages for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
"""
stream = get_stream(stream_name, realm)
if stream is None:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# type: (Iterable[Dict[str, Any]], Realm) -> bool
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
# type: (Iterable[Dict[str, Any]]) -> Optional[text_type]
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
# type: (UserProfile, Iterable[Dict[str, Any]]) -> List[Selectable]
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in six.itervalues(muted_streams)])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in six.itervalues(muted_streams))
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
# type: (Tuple[str, str]) -> Selectable
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, muted_topics))))
return conditions + [condition]
return conditions
@has_request_variables
def get_old_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
# type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
query = select([column("id").label("message_id")], None, "zerver_message")
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
"zerver_usermessage")
inner_msg_id_col = column("message_id")
else:
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
for term in narrow:
if term['operator'] == 'search' and not is_search:
query = query.column("subject").column("rendered_content")
is_search = True
query = builder.add_term(query, term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = 10000000000000000
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if num_before == 0 and num_after == 0:
# This can happen when a narrow is specified.
after_query = query.where(inner_msg_id_col == anchor)
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
else:
query = after_query
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_old_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
search_fields = dict() # type: Dict[int, Dict[str, text_type]]
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: Message.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ(validator=check_list(check_int)),
operation=REQ('op'), flag=REQ(),
all=REQ(validator=check_bool, default=False),
stream_name=REQ(default=None),
topic_name=REQ(default=None)):
# type: (HttpRequest, UserProfile, List[int], text_type, text_type, bool, Optional[text_type], Optional[text_type]) -> HttpResponse
if all:
target_count_str = "all"
else:
target_count_str = str(len(messages))
log_data_str = "[%s %s/%s]" % (operation, flag, target_count_str)
request._log_data["extra"] = log_data_str
stream = None
if stream_name is not None:
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
if topic_name:
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
message__recipient__type_id=stream.id,
message__recipient__type=Recipient.STREAM,
message__subject__iexact=topic_name).exists()
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
count = do_update_message_flags(user_profile, operation, flag, messages,
all, stream, topic_name)
# If we succeed, update log data str with the actual count for how
# many messages were updated.
if count != len(messages):
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
# type: (HttpResponse, UserProfile, Iterable[text_type]) -> Tuple[bool, UserProfile]
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == domain and user_profile.realm.is_zephyr_mirror_realm
def same_realm_irc_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == domain.replace("irc.", "")
def same_realm_jabber_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
# The ist.mit.edu realm uses mit.edu email addresses so that their accounts
# can receive mail.
if user_profile.realm.domain == 'ist.mit.edu' and domain == 'mit.edu':
return True
return user_profile.realm.domain == domain
@authenticated_api_view(is_webhook=False)
def api_send_message(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return send_message_backend(request, user_profile)
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
# type: (HttpRequest, UserProfile, text_type, List[text_type], bool, Optional[text_type], text_type, Optional[text_type], Optional[text_type], Optional[text_type]) -> HttpResponse
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
realm = get_realm(domain)
if not realm:
return json_error(_("Unknown domain %s") % (domain,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
return json_error(_("Invalid mirrored realm"))
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error(_("IRC stream names must start with #"))
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
@authenticated_json_post_view
def json_update_message(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return update_message_backend(request, user_profile)
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
# type: (HttpRequest, UserProfile, int, Optional[text_type], Optional[str], Optional[text_type]) -> HttpResponse
if not user_profile.realm.allow_message_editing:
return json_error(_("Your organization has turned off message editing."))
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Unknown message id"))
# You only have permission to edit a message if:
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin.
if message.sender == user_profile:
pass
elif (content is None) and ((message.topic_name() == "(no topic)") or
user_profile.is_realm_admin):
pass
else:
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has past"))
if subject is None and content is None:
return json_error(_("Nothing to change"))
if subject is not None:
subject = subject.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
rendered_content = None
if content is not None:
content = content.strip()
if content == "":
raise JsonableError(_("Content can't be empty"))
content = truncate_body(content)
rendered_content = message.render_markdown(content)
if not rendered_content:
raise JsonableError(_("We were unable to render your updated message"))
do_update_message(user_profile, message, subject, propagate_mode, content, rendered_content)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
try:
message = Message.objects.get(id=message_id)
except Message.DoesNotExist:
return json_error(_("No such message"))
if message.sender != user_profile:
return json_error(_("Message was not sent by you"))
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ()):
# type: (HttpRequest, UserProfile, text_type) -> HttpResponse
rendered_content = bugdown.convert(content, user_profile.realm.domain)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse
# Note that this function will only work on messages the user
# actually received
# TODO: We assume that the narrow is a search. For now this works because
# the browser only ever calls this function for searches, since it can't
# apply that narrow operator itself.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
|
apache-2.0
| -1,849,675,274,020,769,500 | 43.685393 | 183 | 0.605207 | false |
emirot/codefights
|
interviewPractice/isListPalindrome.py
|
1
|
1577
|
# Definition for singly-linked list:
# class ListNode(object):
# def __init__(self, x):
# self.value = x
# self.next = None
#
def isListPalindrome(l):
tmp = l
list_len = 0
left = 0
right = 0
while tmp:
list_len += 1
tmp = tmp.next
tmp = l
if list_len == 0 or list_len == 1:
return True
head = l
i = 0
if list_len % 2 == 0:
current = head
prev = None
nn_next = None
new_list = None
while current and i < list_len//2:
current_next = current.next
nn_next = current.next
current.next = prev
prev = current
current = current_next
i += 1
head = prev
tmp = head
while tmp and nn_next:
if tmp.value != nn_next.value:
return False
tmp = tmp.next
nn_next = nn_next.next
return True
else:
current = head
prev = None
nn_next = None
new_list = None
while current and i < list_len//2:
current_next = current.next
nn_next = current.next
current.next = prev
prev = current
current = current_next
i += 1
head = prev
nn_next = nn_next.next
tmp = head
while tmp and nn_next:
if tmp.value != nn_next.value:
return False
tmp = tmp.next
nn_next = nn_next.next
return True
return False
|
apache-2.0
| -476,341,232,066,421,950 | 22.893939 | 42 | 0.464172 | false |
lmazuel/azure-sdk-for-python
|
azure-batch/azure/batch/models/pool_evaluate_auto_scale_options.py
|
1
|
1773
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolEvaluateAutoScaleOptions(Model):
"""Additional parameters for evaluate_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
super(PoolEvaluateAutoScaleOptions, self).__init__()
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
|
mit
| -4,791,265,605,864,592,000 | 44.461538 | 106 | 0.663847 | false |
Robobench/rapman-subuser
|
logic/subuserCommands/subuser.py
|
1
|
5001
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
import pathConfig
#external imports
import sys,optparse
#internal imports
import subuserlib.classes.user,subuserlib.commandLineArguments,subuserlib.subuser
def parseCliArgs(sysargs):
usage = "usage: subuser %prog [add|remove|create-shortcut] NAME IMAGESOURCE"
description = """
Add and remove subusers. Create shorcuts for launching subusers.
EXAMPLES:
Add a new subuser named foo based on the image foo@default.
$ subuser subuser add foo foo@default
Remove the subuser named foo.
$ subuser subuser remove foo
Create a launcher for the subuser named foo.
$ subuser subuser create-shorcut foo
Remove the launcher (if one exists) for the subuser named foo.
$ subuser subuser remove-shortcut foo
"""
parser=optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
return parser.parse_args(args=sysargs)
def subuser(sysargs):
"""
Manage subusers
Tests
-----
**Setup:**
>>> import subuser #import self
>>> import subuserlib.classes.user
At the start of our tests, the test environment has one subuser named ``foo``.
>>> user = subuserlib.classes.user.User()
>>> set(user.getRegistry().getSubusers().keys()) == set([u'foo'])
True
We add another subuser named ``bar``.
>>> subuser.subuser(["add","bar","bar@file:///home/travis/remote-test-repo"])
Adding subuser bar bar@file:///home/travis/remote-test-repo
Adding new temporary repository file:///home/travis/remote-test-repo
Verifying subuser configuration.
Verifying registry consistency...
Unregistering any non-existant installed images.
Checking if images need to be updated or installed...
Installing bar ...
Installed new image for subuser bar
Running garbage collector on temporary repositories...
Now we have two subusers.
>>> user = subuserlib.classes.user.User()
>>> set(user.getRegistry().getSubusers().keys()) == set([u'foo', 'bar'])
True
We remove ``bar``.
>>> subuser.subuser(["remove","bar"])
Removing subuser bar
If you wish to remove the subusers image, issue the command $ subuser remove-old-images
Verifying subuser configuration.
Verifying registry consistency...
Unregistering any non-existant installed images.
Checking if images need to be updated or installed...
Running garbage collector on temporary repositories...
Now we only have one subuser.
>>> user = subuserlib.classes.user.User()
>>> set(user.getRegistry().getSubusers().keys()) == set([u'foo'])
True
If we try adding a subuser which fails to install do to a bad ``SubuserImagefile`` an error is displayed, a cleanup process occures, and nothing terribly bad happens.
This works for syntax errors.
>>> try:
... subuser.subuser(["add","broken-syntax","broken-syntax@file:///home/travis/remote-test-repo"])
... except SystemExit:
... pass
Adding subuser broken-syntax broken-syntax@file:///home/travis/remote-test-repo
Verifying subuser configuration.
Verifying registry consistency...
Unregistering any non-existant installed images.
Checking if images need to be updated or installed...
Error while building image: Error in SubuserImagefile one line 0
Subuser image does not exist: ""
Cleaning up.
>>> try:
... subuser.subuser(["add","broken-non-existant-dependency","broken-non-existant-dependency@file:///home/travis/remote-test-repo"])
... except SystemExit:
... pass
Adding subuser broken-non-existant-dependency broken-non-existant-dependency@file:///home/travis/remote-test-repo
Verifying subuser configuration.
Verifying registry consistency...
Unregistering any non-existant installed images.
Checking if images need to be updated or installed...
Error while building image: Error in SubuserImagefile one line 0
Subuser image does not exist: "non-existant-I-do-not-exist!!!!!"
Cleaning up.
"""
options,args = parseCliArgs(sysargs)
try:
action = args[0]
except IndexError:
parseCliArgs(["--help"])
user = subuserlib.classes.user.User()
if action == "add":
if not len(args) == 3:
sys.exit("Wrong number of arguments to add. See `subuser subuser -h`.")
name = args[1]
imageSourceId = args[2]
subuserlib.subuser.add(user,name,imageSourceId)
elif action == "remove":
name = args[1]
subuserlib.subuser.remove(user,name)
elif action == "create-shortcut":
name = args[1]
subuserlib.subuser.setExecutableShortcutInstalled(user,name,True)
elif action == "remove-shortcut":
name = args[1]
subuserlib.subuser.setExecutableShortcutInstalled(user,name,False)
else:
sys.exit("Action "+args[0]+" does not exist. Try:\n subuser subuser --help")
#################################################################################################
if __name__ == "__main__":
subuser(sys.argv[1:])
|
lgpl-3.0
| -6,174,975,571,733,158,000 | 32.790541 | 168 | 0.707259 | false |
jean/sentry
|
src/sentry/features/__init__.py
|
1
|
1610
|
from __future__ import absolute_import
from .base import * # NOQA
from .handler import * # NOQA
from .manager import * # NOQA
default_manager = FeatureManager() # NOQA
default_manager.add('auth:register')
default_manager.add('organizations:api-keys', OrganizationFeature) # NOQA
default_manager.add('organizations:create')
default_manager.add('organizations:sso', OrganizationFeature) # NOQA
default_manager.add('organizations:onboarding', OrganizationFeature) # NOQA
default_manager.add('organizations:callsigns', OrganizationFeature) # NOQA
default_manager.add('organizations:repos', OrganizationFeature) # NOQA
default_manager.add('organizations:release-commits', OrganizationFeature) # NOQA
default_manager.add('organizations:group-unmerge', OrganizationFeature) # NOQA
default_manager.add('organizations:bitbucket-repos', OrganizationFeature) # NOQA
default_manager.add('projects:global-events', ProjectFeature) # NOQA
default_manager.add('projects:plugins', ProjectPluginFeature) # NOQA
default_manager.add('projects:data-forwarding', ProjectFeature) # NOQA
default_manager.add('projects:rate-limits', ProjectFeature) # NOQA
default_manager.add('workflow:release-emails', ProjectFeature) # NOQA
default_manager.add('projects:sample-events', ProjectFeature) # NOQA
default_manager.add('projects:similarity-indexing', ProjectFeature) # NOQA
default_manager.add('projects:custom-filters', ProjectFeature) # NOQA
default_manager.add('projects:stream-hit-counts', ProjectFeature) # NOQA
# expose public api
add = default_manager.add
get = default_manager.get
has = default_manager.has
|
bsd-3-clause
| 6,863,698,881,766,799,000 | 50.935484 | 81 | 0.786957 | false |
saulshanabrook/pushgp.py
|
setup.py
|
1
|
1109
|
from setuptools import setup, find_packages
setup(
name='pushgp',
version='0.1.0',
description='Use genetic programming, implemented with Push, for machine learning.',
long_description=open('README.rst').read(),
author='Saul Shanabrook',
author_email='s.shanabrook@gmail.com',
url='https://github.com/saulshanabrook/pushgp.py',
packages=find_packages(exclude=['tests.*', 'tests']),
install_requires=[
'numpy',
'scipy',
#'Matplotlib',
#'jinja2',
#'pyzmq',
'scikit-learn==0.15-git',
],
dependency_links=[
'https://github.com/scikit-learn/scikit-learn/tarball/master#egg=gearman-0.15-git'
],
license="BSD",
zip_safe=False,
keywords='pushgp.py',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
|
bsd-3-clause
| -5,720,976,648,348,623,000 | 30.685714 | 90 | 0.597836 | false |
gtamazian/bioformats
|
bioformats/eggnog.py
|
1
|
4319
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# Gaik Tamazian, 2019
# mail (at) gtamazian (dot) com
"""Routines for reading, writing and processing eggNOG output files."""
import sys
from . import gff3
from . import snpeff
from . import vcf
assert sys.version_info >= (3, 5), "Python 3.5 or higher required"
def load_annotations(ann_fname):
"""Load eggNOG annotations from a file.
Each eggNog record contains the following fields:
1) query
2) seed ortholog
3) e-value
4) score
5) predicted name
6) GO terms
7) KEGG GO
8) BiGG reactions
9) taxonomy scope
10) best OG
11) COG category (optional)
12) eggNOG HMM description (optional).
:param ann_fname: eggNOG annotation file name
:returns: the list of tuples; each tuple corresponds to an eggNOG record
"""
r = []
with open(ann_fname) as ann_file:
r = map(lambda x: (x[0], (int(x[1][0]), x[1][1])) + x[2:],
map(lambda x: (x[0], x[1].split('.'),
float(x[2]), float(x[3]),
x[4] if x[4] else None,
x[5].split(',') if x[5] else None,
x[6] if x[6] else None,
x[7] if x[7] else None,
x[8] if x[8] else None,
x[9].split(',') if x[9] else None,
tuple(map(
lambda x: None if x == "NA" else x,
x[10].split('|', 2))),
x[11] if len(x) > 11 else None,
x[12] if len(x) > 12 else None),
map(lambda x: x.rstrip().split('\t', 12),
filter(lambda s: not s.startswith('#'),
ann_file.readlines()))))
r = list(r)
return r
def iterate_annotated_genes(emapper_fname, gff3_fname):
"""
Parse records from a GFF3 file in a gene-wise manner and add to the
gene records eggNOG-mapper annotation.
The function iterates genes in the same way as gff3.iterate_genes
and adds gene name and description from the specified eggNOG-mapper
(emapper) file. A gene is assigned a name and a description if the
name and the description are the same for all transcripts of the
gene.
:param emapper_fname: a name of an eggNOG-mapper annotation file
:param gff3_fname: a name of a GFF3 file
:returns: an iterator of gene-related records
"""
ann = {k[0]: (k[4], k[-1])
for k in load_annotations(emapper_fname) if k[4]}
for g, tr in gff3.iterate_genes(gff3_fname):
tr_ann = {ann[gff3.get_protein_id(k[1])] for k in tr
if gff3.get_protein_id(k[1]) in ann}
if len(tr_ann) == 1:
tr_ann = tr_ann.pop()
if tr_ann[0]:
g[8]["gene"] = tr_ann[0]
if tr_ann[1]:
g[8]["Description"] = tr_ann[1]
yield (g, tr)
def iterate_annotated_snpeff_vcf_records(emapper_fname, vcf_fname):
"""
Add gene names to snpEff records from a VCF file.
:param emapper_fname: a name of an eggNOG-mapper annotation file
:param vcf_fname: a name of an snpEff-annotated VCF file
:returns: an iterator of VCF file records with eggNOG gene names
added to their snpEff annotation records
"""
gene_names = {k[0]: k[4]
for k in load_annotations(emapper_fname) if k[4]}
with open(vcf_fname) as vcf_file:
for line in vcf_file:
if line.startswith('#'):
continue
rec = vcf.parse_line(line)
if rec[7]:
info = dict(rec[7])
if "ANN" in info:
eff = snpeff.parse_ann(info["ANN"])
for j, k in enumerate(eff):
if k[6] in gene_names:
eff[j] = k[:3] + (gene_names[k[6]], ) + \
k[4:]
info["ANN"] = snpeff.print_ann(eff)
yield rec[:7] + [[(k, v) for k, v in info.items()]] + \
rec[8:]
else:
yield rec
|
mit
| 1,900,790,480,074,695,400 | 35.294118 | 76 | 0.500347 | false |
stormi/tsunami
|
src/secondaires/exportaide/config.py
|
1
|
2088
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la configuration par défaut du module 'exportaide'."""
CFG_FORMAT = r"""
# Ce fichier contient la configuration globale de l'export d'aide.
## Format
# Utiliser un des formats proposé en précisant simplement son nom.
# Si vous ne souhaitez pas utiliser de format, laissez le nom du format à
# None. Si vous utilisez un format, il sera probablement nécessaire
# de le configurer plus précisément dans un des sous-répertoires de
# la configuration du module 'exportaide'.
nom_format = None
"""
|
bsd-3-clause
| 5,959,338,843,308,739,000 | 46.272727 | 79 | 0.775962 | false |
amdouglas/OpenPNM
|
test/integration/test_source1.py
|
1
|
4317
|
import OpenPNM
import scipy as sp
print('-----> Using OpenPNM version: '+OpenPNM.__version__)
pn = OpenPNM.Network.Cubic(shape=[10,10,40],spacing=0.0001)
pn.add_boundaries()
Ps = pn.pores('boundary',mode='not')
Ts = pn.find_neighbor_throats(pores=Ps,mode='intersection',flatten=True)
geom = OpenPNM.Geometry.Toray090(network=pn,pores=Ps,throats=Ts)
Ps = pn.pores('boundary')
Ts = pn.find_neighbor_throats(pores=Ps,mode='not_intersection')
boun = OpenPNM.Geometry.Boundary(network=pn,pores=Ps,throats=Ts)
air = OpenPNM.Phases.Air(network=pn,name='air')
#---------------------------------------------------------------------------------------------
phys_air = OpenPNM.Physics.Standard(network=pn,phase=air,pores=sp.r_[600:pn.Np],throats=pn.Ts)
#Add some source terms to phys_air1
phys_air['pore.item1'] = 0.5e-13
phys_air['pore.item2'] = 1.5
phys_air['pore.item3'] = 2.5e-14
phys_air.add_model(model=OpenPNM.Physics.models.generic_source_term.power_law,
propname='pore.blah1',
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
x='mole_fraction',
regen_mode='deferred')
#phys_air.models['pore.blah1']['regen_mode'] = 'normal'
phys_air['pore.item4'] = 0.3e-11
phys_air['pore.item5'] = 0.5
phys_air['pore.item6'] = 2
phys_air['pore.item7'] = -0.34
phys_air['pore.item8'] = 2e-14
phys_air.add_model(model=OpenPNM.Physics.models.generic_source_term.natural_exponential,
propname='pore.blah2',
A1='item4',
A2='item5',
A3='pore.item6',
A4='pore.item7',
A5='pore.item8',
x='mole_fraction',
regen_mode='deferred')
#-----------------------------------------------------------------------------------------------
phys_air2 = OpenPNM.Physics.Standard(network=pn,phase=air,pores=sp.r_[0:600])
#Add some source terms to phys_air2
phys_air2['pore.item9'] = 1.5e-13
phys_air2['pore.item10'] = 1.7
phys_air2['pore.item11'] = 1.5e-14
phys_air2.add_model(model=OpenPNM.Physics.models.generic_source_term.power_law,
propname='pore.blah1',
A1='item9',
A2='item10',
A3='item11',
x='mole_fraction',
regen_mode='deferred')
#-----------------------------------------------------------------------------------------------
alg = OpenPNM.Algorithms.FickianDiffusion(network=pn,phase=air)
BC1_pores = pn.pores('right_boundary')
alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.6, pores=BC1_pores)
BC2_pores = pn.pores('left_boundary')
alg.set_boundary_conditions(bctype='Neumann_group', bcvalue=0.2*1e-11, pores=BC2_pores)
#-----------------------------------------------------------------------------------------------
alg.set_source_term(source_name='pore.blah1',pores=sp.r_[500:700])
alg.set_source_term(source_name='pore.blah2',pores=sp.r_[800:900])
alg.setup()
alg.solve()
alg.return_results()
#-----------------------------------------------------------------------------------------------
# This part is not necessary for validation, just for returning the rate values back to the physics
phys_air.regenerate()
phys_air2.regenerate()
#-----------------------------------------------------------------------------------------------
print('--------------------------------------------------------------')
print('steps: ',alg._steps)
print('tol_reached: ',alg._tol_reached)
print('--------------------------------------------------------------')
print('reaction from the physics for pores [500:700]:',\
sp.sum(1.5e-13*air['pore.mole_fraction'][sp.r_[500:600]]**1.7+1.5e-14)\
+sp.sum(0.5e-13*air['pore.mole_fraction'][sp.r_[600:700]]**1.5+2.5e-14))
print('rate from the algorithm for pores [500:700]:',alg.rate(sp.r_[500:700])[0])
print('--------------------------------------------------------------')
print('reaction from the physics for pores [800:900]:',sp.sum(0.3e-11*sp.exp(0.5*air['pore.mole_fraction'][sp.r_[800:900]]**2-0.34)+2e-14))
print('rate from the algorithm for pores [800:900]:',alg.rate(sp.r_[800:900])[0])
|
mit
| 9,075,524,792,746,184,000 | 48.62069 | 139 | 0.506139 | false |
luwei0917/awsemmd_script
|
script/BuildAllAtomsFromLammps_seq.py
|
1
|
12949
|
#!/usr/bin/python
# ----------------------------------------------------------------------
# Copyright (2010) Aram Davtyan and Garegin Papoian
# Papoian's Group, University of Maryland at Collage Park
# http://papoian.chem.umd.edu/
# Last Update: 03/04/2011
# ----------------------------------------------------------------------
import sys
#from Bio.PDB.PDBParser import PDBParser
atom_type = {'1' : 'C', '2' : 'N', '3' : 'O', '4' : 'C', '5' : 'H', '6' : 'C'}
atom_desc = {'1' : 'C-Alpha', '2' : 'N', '3' : 'O', '4' : 'C-Beta', '5' : 'H-Beta', '6' : 'C-Prime'}
PDB_type = {'1' : 'CA', '2' : 'N', '3' : 'O', '4' : 'CB', '5' : 'HB', '6' : 'C' }
def one2three(one_letter_code):
""" translate a protein sequence from 3 to 1 letter code"""
code = {
"R": "ARG", "K": "LYS", "N": "ASN", "Q": "GLN", "E": "GLU",
"D": "ASP", "H": "HIS", "Y": "TYR", "W": "TRP", "S":"SER",
"T":"THR", "G":"GLY", "P":"PRO", "A":"ALA", "M":"MET",
"C":"CYS", "F":"PHE", "L":"LEU", "V":"VAL", "I":"ILE" }
index = code[one_letter_code]
return index
class PDB_Atom:
no = 0
ty = ''
res = 'UNK'
res_no = 0
x = 0.0
y = 0.0
z = 0.0
atm = 'C'
def __init__(self, no, ty, res, res_no, x, y, z, atm):
self.no = no
self.ty = ty
self.res = res
self.res_no = res_no
self.x = x
self.y = y
self.z = z
self.atm = atm
def write_(self, f):
f.write('ATOM')
f.write((' '+str(self.no))[-7:])
f.write(' ')
f.write((self.ty+' ')[:4])
f.write(self.res)
f.write(' ')
f.write('T')
f.write((' '+str(self.res_no))[-4:])
f.write((' '+str(round(self.x,3)))[-12:])
f.write((' '+str(round(self.y,3)))[-8:])
f.write((' '+str(round(self.z,3)))[-8:])
f.write(' 1.00')
f.write(' 0.00')
f.write((' '+self.atm)[-12:]+' ')
f.write('\n')
class Atom:
No = 0
ty = ''
x = 0.0
y = 0.0
z = 0.0
desc = ''
def __init__(self, No, ty, No_m, x, y, z, desc=''):
self.No = No
self.ty = ty
self.No_m = No_m
self.x = x
self.y = y
self.z = z
self.desc = desc
def write_(self, f):
f.write(str(self.No))
f.write(' ')
f.write(PDB_type[self.No_m])
f.write(' ')
f.write(str(round(self.x,8)))
f.write(' ')
f.write(str(round(self.y,8)))
f.write(' ')
f.write(str(round(self.z,8)))
f.write(' ')
f.write(self.desc)
f.write('\n')
#if len(sys.argv)!=3 and len(sys.argv)!=4:
if len(sys.argv)!=4 and len(sys.argv)!=5 and len(sys.argv)!=6:
print "\n" + sys.argv[0] + " lammps_Input pdb_Output pdbID.seq [snapshot] [chnlen_1,chnlen_2,...]\n"
sys.exit()
lammps_file = sys.argv[1]
output_file = ""
if len(sys.argv)>2: output_file = sys.argv[2]
seq_file = sys.argv[3]
fh = open(seq_file, 'r')
total_seq=''
for line in fh.readlines():
seq = line.strip()
total_seq=total_seq+seq
fh.close()
print total_seq, len(total_seq)
psf_file = output_file
if output_file[-4:]!=".pdb": output_file = output_file + ".pdb"
if psf_file[-4:]==".pdb": psf_file = psf_file[:-3] + "psf"
if psf_file[-4:]!=".psf": psf_file = psf_file + ".psf"
snapshot = -1
Chain_sizes = []
#if len(sys.argv)>4: snapshot = int(sys.argv[4])
if len(sys.argv)>4:
if sys.argv[4].find(',') == -1 :
snapshot = int(sys.argv[4])
if len(sys.argv) > 5 : #parse
Chain_sizes = sys.argv[5].split(',')
else : #parse
Chain_sizes = sys.argv[4].split(',')
print Chain_sizes
Cterminal_Cp_indices = []
Total_Chain_size = 0
if len(Chain_sizes) != 0:
for Chain_size in Chain_sizes:
Total_Chain_size += int(Chain_size)
Cterminal_Cp_indices.append((int(Total_Chain_size)-1)*5+2)
an = 0.4831806
bn = 0.7032820
cn = -0.1864262
ap = 0.4436538
bp = 0.2352006
cp = 0.3211455
n_atoms = 0
i_atom = 0
item = ''
step = 0
atoms = []
atoms2 = []
atoms3 = []
bonds = []
box = []
A = []
out = open(output_file, 'w')
def convertToPDB():
ires = 1
for ia in atoms2:
if ia.desc == 'N': ires = ires + 1
resname = one2three(total_seq[ires-1])
atom = PDB_Atom(ia.No, PDB_type[ia.No_m], resname, ires, ia.x, ia.y, ia.z, ia.ty)
atoms3.append(atom)
def buildAllAtoms():
index = 0
last_Ca_index = -1
last_O_index = -1
Cp_index = -1
NullVal = Atom(0, '', '6', 0.0, 0.0, 0.0, '')
# atoms2 = []
for i in range(0, len(atoms)):
ia = atoms[i]
index = index + 1
if ia.desc == 'O':
last_O_index = i
if ia.desc == 'C-Alpha':
if last_Ca_index != -1:
Cai = atoms[last_Ca_index]
Cai1 = ia
Oi = atoms[last_O_index]
nx = an*Cai.x + bn*Cai1.x + cn*Oi.x
ny = an*Cai.y + bn*Cai1.y + cn*Oi.y
nz = an*Cai.z + bn*Cai1.z + cn*Oi.z
px = ap*Cai.x + bp*Cai1.x + cp*Oi.x
py = ap*Cai.y + bp*Cai1.y + cp*Oi.y
pz = ap*Cai.z + bp*Cai1.z + cp*Oi.z
N = Atom(index, 'N', '2', nx, ny, nz, 'N')
index = index + 1
Cp = Atom(int(Cai.No) + 1, 'C', '6', px, py, pz, 'C-Prime')
# Cp = Atom(index, 'C', '6', px, py, pz, 'C-Prime')
# index = index + 1
atoms2.append(N)
atoms2.pop(Cp_index)
atoms2.insert(Cp_index, Cp)
# atoms2.append(Cp)
last_Ca_index = i
ia.No = index
atoms2.append(ia)
if ia.desc == 'C-Alpha':
atoms2.append(NullVal)
Cp_index = index
index = index + 1
if atoms2[Cp_index].No==0: atoms2.pop(Cp_index)
for i in range(Cp_index, len(atoms2)):
atoms2[i].No = atoms2[i].No - 1
def buildBonds():
N_index = -1
Ca_index = -1
Cp_index = -1
O_index = -1
Cb_index = -1
Hb_index = -1
for i in range(0, len(atoms2)):
ia = atoms2[i]
if ia.desc == 'N':
if N_index!=-1 and Ca_index!=-1:
bonds.append([N_index, Ca_index])
if Ca_index!=-1 and Cp_index!=-1:
bonds.append([Ca_index, Cp_index])
if Cp_index!=-1 and O_index!=-1:
bonds.append([Cp_index, O_index])
if Ca_index!=-1 and Cb_index!=-1:
bonds.append([Ca_index, Cb_index])
if Ca_index!=-1 and Hb_index!=-1:
bonds.append([Ca_index, Hb_index])
N_index = i+1
if Cp_index!=-1 and Cp_index not in Cterminal_Cp_indices :
bonds.append([Cp_index, N_index])
Ca_index = -1
Cp_index = -1
O_index = -1
Cb_index = -1
Hb_index = -1
if ia.desc == 'C-Alpha': Ca_index = i+1
if ia.desc == 'C-Beta': Cb_index = i+1
if ia.desc == 'H-Beta': Hb_index = i+1
if ia.desc == 'C-Prime': Cp_index = i+1
if ia.desc == 'O': O_index = i+1
if N_index!=-1 and Ca_index!=-1:
bonds.append([N_index, Ca_index])
if Ca_index!=-1 and Cb_index!=-1:
bonds.append([Ca_index, Cb_index])
if Ca_index!=-1 and Hb_index!=-1:
bonds.append([Ca_index, Hb_index])
def print_atom_array():
out.write("ITEM: TIMESTEP\n")
out.write(str(step))
out.write("\n")
out.write("ITEM: NUMBER OF ATOMS\n")
out.write(str(n_atoms))
out.write("\n")
out.write("ITEM: BOX BOUNDS\n")
for ib in box:
out.write(ib)
out.write("\n")
out.write("ITEM: ATOMS\n")
for ia in atoms2:
ia.write_(out)
def print_pdb():
for ia in atoms3:
ia.write_(out)
out.write("END\n");
def print_psf():
space8 = " "
psfout = open(psf_file,'w')
psfout.write("PDF\n\n\t2 !NTITLE\n\n")
psfout.write((space8+str(len(atoms3)))[-8:]+" !NATOM\n")
for ia in atoms2:
psfout.write((space8+str(ia.No))[-8:]+" PROT 1")
psfout.write(" R00")
psfout.write(" "+ia.ty)
psfout.write(" 1")
psfout.write(" 0 1 0\n")
psfout.write("\n")
psfout.write((space8+str(len(bonds)))[-8:]+" !NBOND")
for i in range(0, len(bonds)):
ib = bonds[i]
if i%4==0: psfout.write("\n")
psfout.write((space8+str(ib[0]))[-8:])
psfout.write((space8+str(ib[1]))[-8:])
psfout.close()
nFrame = 0
found = False
lfile = open(lammps_file)
if snapshot<0:
for l in lfile:
l = l.strip()
if l[:5]=="ITEM:":
item = l[6:]
else:
if item == "TIMESTEP":
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
print_pdb()
step = int(l)
atoms = []
atoms2 = []
atoms3 = []
box = []
A = []
nFrame = nFrame + 1
elif item == "NUMBER OF ATOMS":
n_atoms = int(l)
xyz_count = 0
elif item[:10] == "BOX BOUNDS":
if xyz_count <= 1:
xyz_count += 1
box.append(l)
l = l.split()
# A.append([float(l[0]), float(l[1])])
l_left = (float(l[0]) - float(l[1]))/2.0
l_right = (float(l[1]) - float(l[0]))/2.0
# l_left = 0.0
# l_right = float(l[1]) - float(l[0])
A.append([l_left, l_right])
# A.append([float(l[0]), float(l[1])])
# print l_right - l_left
else:
xyz_count = 0
box.append(l)
l = l.split()
A.append([float(l[0]), float(l[1])])
# l_left = (float(l[0]) - float(l[1]))/2.0
# l_right = (float(l[1]) - float(l[0]))/2.0
# A.append([l_left, l_right])
# print l_right - l_left
elif item[:5] == "ATOMS":
l = l.split()
i_atom = l[0]
x = float(l[2])
y = float(l[3])
z = float(l[4])
x = (A[0][1] - A[0][0])*x + A[0][0]
y = (A[1][1] - A[1][0])*y + A[1][0]
z = (A[2][1] - A[2][0])*z + A[2][0]
desc = atom_desc[l[1]]
atom = Atom(i_atom, atom_type[l[1]], l[1], x, y, z, desc)
atoms.append(atom)
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
print_pdb()
buildBonds()
print_psf()
else:
for l in lfile:
l = l.strip()
if l[:5]=="ITEM:":
item = l[6:]
if item == "TIMESTEP":
if found: break
elif nFrame==snapshot: found = True
nFrame = nFrame + 1
elif found:
if item == "TIMESTEP":
step = int(l)
elif item == "NUMBER OF ATOMS":
n_atoms = int(l)
xyz_count = 0
elif item[:10] == "BOX BOUNDS":
if xyz_count <= 1:
xyz_count += 1
box.append(l)
l = l.split()
# A.append([float(l[0]), float(l[1])])
l_left = (float(l[0]) - float(l[1]))/2.0
l_right = (float(l[1]) - float(l[0]))/2.0
A.append([l_left, l_right])
# print l_right - l_left
else:
xyz_count = 0
box.append(l)
l = l.split()
A.append([float(l[0]), float(l[1])])
# l_left = (float(l[0]) - float(l[1]))/2.0
# l_right = (float(l[1]) - float(l[0]))/2.0
# A.append([l_left, l_right])
# print l_right - l_left
elif item[:5] == "ATOMS":
l = l.split()
i_atom = l[0]
x = float(l[2])
y = float(l[3])
z = float(l[4])
x = (A[0][1] - A[0][0])*x + A[0][0]
y = (A[1][1] - A[1][0])*y + A[1][0]
z = (A[2][1] - A[2][0])*z + A[2][0]
desc = atom_desc[l[1]]
atom = Atom(i_atom, atom_type[l[1]], l[1], x, y, z, desc)
atoms.append(atom)
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
print_pdb()
buildBonds()
print_psf()
lfile.close()
out.close()
|
mit
| -3,018,403,266,057,203,700 | 29.75772 | 104 | 0.426211 | false |
monkeysecurity/botor
|
setup.py
|
1
|
1204
|
"""
Botor
=====
A thin wrapper around boto3
:copyright: (c) 2016 by Netflix, see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
import sys
import os.path
from setuptools import setup, find_packages
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
# When executing the setup.py, we need to be able to import ourselves. This
# means that we need to add the src/ directory to the sys.path
sys.path.insert(0, ROOT)
about = {}
with open(os.path.join(ROOT, "botor", "__about__.py")) as f:
exec(f.read(), about)
install_requires = [
'boto3>=1.3.1',
'boto>=2.41.0',
'joblib>=0.9.4'
]
tests_require = []
docs_require = []
dev_require = []
setup(
name=about["__title__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__email__"],
url=about["__uri__"],
description=about["__summary__"],
long_description=open(os.path.join(ROOT, 'README.md')).read(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'docs': docs_require,
'dev': dev_require
}
)
|
apache-2.0
| -2,690,839,259,569,215,500 | 20.909091 | 76 | 0.628738 | false |
aitjcize/QCamber
|
src/codegen/codegen.py
|
1
|
3393
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# codegen.py
#
# Copyright (C) 2013 - Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import re
import sys
import os.path
from xml.etree import ElementTree
from optparse import OptionParser, make_option
class TemplateEngine(object):
def __init__(self, template_file, target_dir=None):
with open(template_file, 'r') as f:
self.template = f.read()
self.target_dir = target_dir
self.outfile_suffix = template_file.split('.')[-2]
def interpolate(self, matchobj):
expr = matchobj.group(1)
ops = expr.split('|')
target = ops[0]
attrib = None
if '#' in target:
target, attrib = target.split('#')
target_node = self.root.find(target)
if target_node is None:
return ''
result = target_node.text if not attrib else target_node.attrib[attrib]
for op in ops[1:]:
result = eval('result.%s()' % op)
result = result.strip('\n ').rstrip(' ')
if matchobj.group(0).endswith('\n'):
result += '\n'
return result
def render(self, dataxml):
tree = ElementTree.parse(dataxml)
self.root = tree.getroot()
filename = '%s.%s' % (self.root.find('.').attrib['name'].lower(),
self.outfile_suffix)
if self.target_dir:
filename = os.path.join(self.target_dir, filename)
print 'GEN %s' % filename
result = re.sub(r'{{([^{}]+?)}}\n?', self.interpolate, self.template,
flags=re.S).strip() + '\n'
# remove whitespace only line
result = re.sub(r'\n +\n', r'\n\n', result, flags=re.S)
with open(filename, 'w') as f:
f.write(result)
def main():
option_list = [
make_option('-C', '--change-target-dir', action='store', type='string',
dest='target_dir', default=None,
help='change output directory'),
make_option('-t', '--template', action='store', type='string',
dest='template', default=None,
help='name of template'),
]
parser = OptionParser(usage='Usage: %s [OPTION...] xml1 xml2 ...' %sys.argv,
option_list=option_list)
options, args = parser.parse_args()
if not options.template:
raise RuntimeError("no template specified.")
e = TemplateEngine(options.template, options.target_dir)
for xml in args:
e.render(xml)
if __name__ == '__main__':
try:
main()
except Exception as e:
print 'error:', str(e)
|
gpl-3.0
| 3,293,000,234,375,598,600 | 29.845455 | 80 | 0.591217 | false |
coll-gate/collgate
|
server/accession/migrations/0007_auto_20180328_1749.py
|
1
|
2013
|
# Generated by Django 2.0.2 on 2018-03-28 15:49
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accession', '0006_action_name'),
]
operations = [
migrations.CreateModel(
name='StockLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=255, unique=True)),
('label', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='accession.StockLocation')),
],
options={
'permissions': (('get_stocklocation', 'Can get a stock location'), ('list_stocklocation', 'Can list stock locations')),
'verbose_name': 'stock location',
},
),
migrations.AlterModelOptions(
name='accessionpanel',
options={'permissions': (('get_accessionpanel', 'Can get a accession panel'), ('list_accessionpanel', 'Can list accession panels')), 'verbose_name': 'accession panel'},
),
migrations.AlterField(
model_name='accessionpanel',
name='accessions',
field=models.ManyToManyField(related_name='panels', to='accession.Accession'),
),
migrations.AlterField(
model_name='batchpanel',
name='batches',
field=models.ManyToManyField(related_name='panels', to='accession.Batch'),
),
migrations.AddField(
model_name='batch',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='batches', to='accession.StockLocation'),
),
]
|
mit
| -8,494,079,501,967,267,000 | 41.829787 | 180 | 0.604074 | false |
jcjl013/ylplines
|
setup.py
|
1
|
1461
|
#!/usr/bin/env python
"""
ylplines - Clarity for Yelp
Copyright (C) 2016 Jeff Lee
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""Set up prerequisite packages and dependencies."""
from setuptools import setup
setup(
name='ylplines',
version='0.1.0',
description='Yelp trends',
author='Jeff Lee',
author_email='jeffcjlee@gmail.com',
url='jeffcjlee.com',
install_requires=[
'Django==1.8.4',
'psycopg2>=2.6.1',
'yelp',
'lxml',
'cssselect',
'requests-futures>=0.9.7',
'coverage>=4.1b2',
'coveralls',
'django-widget-tweaks',
'Sphinx==1.4.9',
'celery[redis]<4',
'redis',
'kombu<4',
'amqp<2.0,>=1.4.9',
'anyjson',
'billiard<3.4',
'newrelic',
],
dependency_links=[
'https://pypi.python.org/simple/django/'
],
)
|
gpl-3.0
| 9,151,149,374,232,757,000 | 27.647059 | 69 | 0.638604 | false |
Balandat/cont_no_regret
|
ContNoRegret/NoRegretAlgos.py
|
1
|
23992
|
'''
Basic Algorithms for the Continuous No-Regret Problem.
@author: Maximilian Balandat
@date May 25, 2015
'''
import numpy as np
from .LossFunctions import ZeroLossFunction, ctypes_integrate
from .DualAveraging import compute_nustar
from .Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes
from .Potentials import ExponentialPotential, pExpPotential
from scipy.stats import linregress
class ContNoRegretProblem(object):
""" Basic class describing a Continuous No-Regret problem. """
def __init__(self, domain, lossfuncs, L, M, desc='nodesc'):
""" Constructor for the basic problem class. Here lossfuncs
is a list of loss LossFunction objects. """
self.domain, self.L, self.M = domain, L, M
self.lossfuncs = lossfuncs
self.T = len(lossfuncs)
self.optaction, self.optval = None, None
self.desc = desc
if domain.n == 2:
if isinstance(domain, nBox):
self.pltpoints = [domain.grid(2000)]
elif isinstance(domain, UnionOfDisjointnBoxes):
weights = np.array([nbox.volume for nbox in domain.nboxes])
self.pltpoints = [nbox.grid(2000*weight/sum(weights)) for nbox,weight in zip(domain.nboxes,weights)]
def cumulative_loss(self, points):
""" Computes the cumulative loss at the given points """
loss = np.zeros((points.shape[0], 1))
for lossfunc in self.lossfuncs:
loss = loss + lossfunc.val(points)
return loss
def create_pltpoints(self, Nplot):
""" Create a number of points used for plotting the evolution of
the density function for the DA algorithm """
if self.domain.n != 2:
return None
if isinstance(self.domain, nBox):
return [self.domain.grid(Nplot)]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
weights = np.array([nbox.volume for nbox in self.domain.nboxes])/self.domain.volume
return [nbox.grid(np.ceil(weight*Nplot)) for nbox,weight in zip(self.domain.nboxes, weights)]
elif isinstance(self.domain, DifferenceOfnBoxes):
if len(self.domain.inner) > 1:
raise Exception('Can only create pltpoints for DifferenceOfnBoxes with single box missing!')
bnds_inner, bnds_outer = self.domain.inner[0].bounds, self.domain.outer.bounds
nboxes = [nBox([bnds_outer[0], [bnds_inner[1][1], bnds_outer[1][1]]]),
nBox([bnds_outer[0], [bnds_outer[1][0], bnds_inner[1][0]]]),
nBox([[bnds_outer[0][0], bnds_inner[0][0]], bnds_inner[1]]),
nBox([[bnds_inner[0][1], bnds_outer[0][1]], bnds_inner[1]])]
weights = np.array([nbox.volume for nbox in nboxes])/self.domain.volume
return [nbox.grid(np.ceil(weight*Nplot)) for nbox,weight in zip(nboxes, weights)]
def run_simulation(self, N, algo, Ngrid=100000, label='nolabel', **kwargs):
""" Runs the no-regret algorithm for different parameters and returns the
results as a 'Result' object. Accepts optimal constant rates in the
dictionary 'etaopts', constant rates in the array-like 'etas', and
time-varying rates with parameters in the array-like 'alphas', 'thetas' """
result_args = {}
if algo == 'Greedy':
regs_Greedy = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
print('Simulating Greedy')
regrets = self.simulate(N, algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(regs_Greedy, regrets)
result_args['regs_{}'.format(algo)] = regs_Greedy
elif algo == 'GP':
regs_GP = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
print('Simulating GP, rate eta_t=t^(-0.5)')
regrets = self.simulate(N, etas=(1+np.arange(self.T))**(-0.5), algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(regs_GP, regrets)
self.regret_bound(regs_GP, algo, alpha=0.5)
result_args['regs_{}'.format(algo)] = regs_GP
elif algo == 'OGD':
regs_OGD = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
theta = 1/kwargs['H']
print('Simulating OGD, rate eta_t={0:.2f}t^(-1)'.format(theta))
regrets = self.simulate(N, etas=theta/(1+np.arange(self.T)), algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(regs_OGD, regrets)
self.regret_bound(regs_OGD, algo, H=kwargs['H'])
result_args['regs_{}'.format(algo)] = regs_OGD
elif algo == 'DA':
pot = kwargs['potential']
reg_info = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
if kwargs.get('opt_rate') == True:
if isinstance(pot, ExponentialPotential):
theta = np.sqrt((pot.c_omega*(self.domain.n-np.log(self.domain.v))
+ pot.d_omega*self.domain.v)/2/self.M**2)
alpha = None
print('Simulating {0}, {1}, opt. rate '.format(algo, pot.desc) +
'eta_t={0:.3f} sqrt(log t/t)'.format(theta))
etas = theta*np.sqrt(np.log(1+np.arange(self.T)+1)/(1+np.arange(self.T)))
elif isinstance(pot, pExpPotential):
try:
M = pot.M
except AttributeError:
M = self.M
theta = np.sqrt((pot.c_omega*(self.domain.n-np.log(self.domain.v))
+ pot.d_omega*self.domain.v)/2/M**2)
alpha = None
print('Simulating {0}, {1}, opt. rate '.format(algo, pot.desc) +
'eta_t={0:.3f} sqrt(log t/t)'.format(theta))
etas = theta*np.sqrt(np.log(1+np.arange(self.T)+1)/(1+np.arange(self.T)))
else:
try:
M = pot.M
except AttributeError:
M = self.M
alpha, theta = pot.alpha_opt(self.domain.n), pot.theta_opt(self.domain, M)
print('Simulating {0}, {1}, opt. rate '.format(algo, pot.desc) +
'eta_t={0:.3f}t^(-{1:.3f})$'.format(theta, alpha))
etas = theta*(1+np.arange(self.T))**(-alpha)
regrets = self.simulate(N, etas=etas, algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(reg_info, regrets)
self.regret_bound(reg_info, algo, alpha=alpha, theta=theta, potential=pot)
result_args['regs_DAopt'] = reg_info
if 'etas' in kwargs:
print('Simulating {0}, {1}, custom rate'.format(algo, pot.desc))
regrets = self.simulate(N, algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(reg_info, regrets)
# self.regret_bound(reg_info, algo, alpha=alpha, theta=theta, potential=pot)
result_args['regs_DAetas'] = reg_info
if kwargs.get('animate') is not None:
result_args['pltdata'] = kwargs.get('animate')
if kwargs.get('KL') is not None:
result_args['KL'] = kwargs.get('KL')
# if 'etaopts' in kwargs:
# regs_etaopts = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
# 'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
# for T,eta in kwargs['etaopts'].items():
# if algo == 'DA':
# print('Simulating {0}, {1}, opt. constant rate eta_t={2:.3f}'.format(algo, pot.desc, eta))
# else:
# print('Simulating {0}, opt. constant rate eta_t={1:.3f}'.format(algo, eta))
# regrets = self.simulate(N, etas=eta*np.ones(self.T), algo=algo, Ngrid=Ngrid, **kwargs)[2]
# self.parse_regrets(regs_etaopts, regrets)
# result_args['regs_etaopts'] = regs_etaopts
# if 'etas' in kwargs:
# regs_etas = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
# 'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
# for eta in kwargs['etas']:
# if algo == 'DA':
# print('Simulating {0}, {1}, constant rate eta={2:.3f}'.format(algo, kwargs['potential'].desc, eta))
# else:
# print('Simulating {0}, constant rate eta={1:.3f}'.format(algo, eta))
# regrets = self.simulate(N, etas=eta*np.ones(self.T), algo=algo, Ngrid=Ngrid, **kwargs)[2]
# self.parse_regrets(regs_etas, regrets)
# result_args['etas'] = kwargs['etas']
# result_args['regs_etas'] = regs_etas
# if 'alphas' in kwargs:
# regs_alphas = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
# 'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
# for alpha,theta in zip(kwargs['alphas'], kwargs['thetas']): # run for Nloss different sequences of loss functions
# if algo == 'DA':
# print('Simulating {0}, {1}, decaying rate with alpha={2:.3f}, theta={3}'.format(algo, kwargs['potential'].desc, alpha, theta))
# else:
# print('Simulating {0}, decaying rate with alpha={1:.3f}, theta={2}'.format(algo, alpha, theta))
# regrets = self.simulate(N, etas=theta*(1+np.arange(self.T))**(-alpha), algo=algo, Ngrid=Ngrid, **kwargs)[2]
# self.parse_regrets(regs_alphas, regrets)
# self.regret_bound(regs_alphas, algo, alpha=alpha, theta=theta, potential=kwargs['potential'])
# result_args['alphas'] = kwargs['alphas']
# result_args['thetas'] = kwargs['thetas']
# result_args['regs_alphas'] = regs_alphas
else:
regs_norate = {'savg':[], 'tsavg':[], 'tsavgbnd':[], 'perc_10':[],
'perc_90':[], 'tavg_perc_10':[], 'tavg_perc_90':[]}
print('Simulating {0}, exp-concavity parameter alpha={1:.3f}'.format(algo, kwargs['alpha']))
regrets = self.simulate(N, algo=algo, Ngrid=Ngrid, **kwargs)[2]
self.parse_regrets(regs_norate, regrets)
self.regret_bound(regs_norate, algo, **kwargs)
result_args['regs_{}'.format(algo)] = regs_norate
# write the results to file (save memory) and return the file handler
results = Results(self, label=label, algo=algo, **result_args)
return results
def simulate(self, N, algo='DA', Ngrid=200000, **kwargs):
""" Simulates the result of running the No-Regret algorithm (N times).
Returns a list of sequences of decisions and associated losses, one for each run.
The grid is used for computing both the regret and the actions! """
if algo in ['DA', 'GP', 'OGD']:
etas = kwargs.get('etas')
if algo == 'DA':
pot = kwargs['potential']
if algo in ['ONS', 'FTAL', 'EWOO']:
alpha = kwargs['alpha']
beta = 0.5*np.minimum(1/4/self.L/self.domain.diameter, alpha)
epsilon = 1/beta**2/self.domain.diameter**2
# set up some data structures for keeping record
actions, losses, cumloss, regrets = [], [], [], []
gridpoints = self.domain.grid(Ngrid)
approxL = np.zeros(gridpoints.shape[0])
cumLossFunc = ZeroLossFunction(self.domain)
# now run the iterations
for t, lossfunc in enumerate(self.lossfuncs):
if t == 0:
print('pid {}: Starting...'.format(kwargs['pid']))
elif t % 25 == 0:
print('pid {}: t={}'.format(kwargs['pid'], t))
if algo == 'Greedy':
if t == 0:
action = self.domain.sample_uniform(N)
else:
action = np.array([cumLossFunc.min(argmin=True)[1],]*N)
if algo in ['GP', 'OGD']: # GP and OGD are the same except for the rates
if t == 0:
action = self.domain.sample_uniform(N) # pick arbitrary action in the first step, may as well sample
else:
action = self.lossfuncs[t-1].proj_gradient(actions[-1], etas[t]) # do the projected gradient step
elif algo == 'DA': # Our very own Dual Averaging algorithm
if t == 0:
# compute nustar for warm-starting the intervals of root-finder
nustar = -1/etas[t]*pot.phi_inv(1/self.domain.volume)
action = self.domain.sample_uniform(N)
if kwargs.get('KL') is not None:
kwargs.get('KL').append(0)
else:
nustar = compute_nustar(self.domain, pot, etas[t], cumLossFunc, self.M, nustar,
etas[t-1], t, pid=kwargs['pid'], tmpfolder=kwargs['tmpfolder'], KL=kwargs.get('KL'))
weights = np.maximum(pot.phi(-etas[t]*(approxL + nustar)), 0)
np.random.seed()
action = gridpoints[np.random.choice(weights.shape[0], size=N, p=weights/np.sum(weights))]
del weights
if kwargs.get('animate') is not None:
kwargs.get('animate').append([np.maximum(pot.phi(-etas[t]*(cumLossFunc.val(pltpoints) + nustar)), 0)
for pltpoints in self.pltpoints])
elif algo == 'ONS': # Hazan's Online Newton Step
if t == 0:
action = self.domain.sample_uniform(N) # pick arbitrary action in the first step, may as well sample
grad = lossfunc.grad(action)
A = np.einsum('ij...,i...->ij...', grad, grad) + epsilon*np.array([np.eye(self.domain.n),]*N)
Ainv = np.array([np.linalg.inv(mat) for mat in A])
else:
points = actions[-1] - np.einsum('ijk...,ik...->ij...', Ainv, grad)/beta
action = self.domain.gen_project(points, A)
grad = lossfunc.grad(action)
A = A + np.einsum('ij...,i...->ij...', grad, grad)
z = np.einsum('ijk...,ik...->ij...', Ainv, grad)
Ainv = Ainv - np.einsum('ij...,i...->ij...', z, z)/(1 + np.einsum('ij,ij->i',grad,z))[:,np.newaxis,np.newaxis]
elif algo == 'FTAL':
if t == 0:
action = self.domain.sample_uniform(N) # pick arbitrary action in the first step, may as well sample
grad = lossfunc.grad(action)
A = np.einsum('ij...,i...->ij...', grad, grad)
b = grad*(np.einsum('ij,ij->i', grad, action) - 1/beta)[:,np.newaxis]
Ainv = np.array([np.linalg.pinv(mat) for mat in A]) # so these matrices are singular... what's the issue?
else:
points = np.einsum('ijk...,ik...->ij...', Ainv, b)
action = self.domain.gen_project(points, A)
grad = lossfunc.grad(action)
A = A + np.einsum('ij...,i...->ij...', grad, grad)
b = b + grad*(np.einsum('ij,ij->i', grad, action) - 1/beta)[:,np.newaxis]
# the following uses the matrix inversion lemma for
# efficient computation the update of Ainv
z = np.einsum('ijk...,ik...->ij...', Ainv, grad)
Ainv = Ainv - np.einsum('ij...,i...->ij...', z, z)/(1 + np.einsum('ij,ij->i',grad,z))[:,np.newaxis,np.newaxis]
elif algo == 'EWOO':
if t == 0:
if not self.domain.isconvex():
raise Exception('EWOO algorithm only makes sense if the domain is convex!')
action = self.domain.sample_uniform(N)
else:
if isinstance(self.domain, nBox):
ranges = [self.domain.bounds]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
ranges = [nbox.bounds for nbox in self.domain.nboxes]
else:
raise Exception('For now, domain must be an nBox or a UnionOfDisjointnBoxes!')
action_ewoo = action_EWOO(cumLossFunc, alpha, ranges, tmpfolder=kwargs['tmpfolder'])
action = np.array([action_ewoo,]*N)
# now store the actions, losses, etc.
actions.append(action)
loss = lossfunc.val(action)
losses.append(loss)
if t == 0:
cumloss.append(loss)
cumLossFunc = lossfunc
else:
cumloss.append(cumloss[-1] + loss)
cumLossFunc = cumLossFunc + lossfunc
# compute and append regret
approxL += lossfunc.val(gridpoints)
optval = cumLossFunc.min()
regrets.append(cumloss[-1] - optval)
return np.transpose(np.array(actions), (1,0,2)), np.transpose(np.array(losses)), np.transpose(np.array(regrets))
def parse_regrets(self, reg_results, regrets):
""" Function that computes some aggregate information from the
raw regret samples in the list 'regrets' """
reg_results['savg'].append(np.average(regrets, axis=0))
reg_results['perc_10'].append(np.percentile(regrets, 10, axis=0))
reg_results['perc_90'].append(np.percentile(regrets, 90, axis=0))
reg_results['tsavg'].append(reg_results['savg'][-1]/(1+np.arange(self.T)))
reg_results['tavg_perc_10'].append(reg_results['perc_10'][-1]/(1+np.arange(self.T)))
reg_results['tavg_perc_90'].append(reg_results['perc_90'][-1]/(1+np.arange(self.T)))
return reg_results
def regret_bound(self, reg_results, algo, **kwargs):
""" Computes the regret bound for the ContNoRegret Problem. """
t = 1 + np.arange(self.T)
n, D, L = self.domain.n, self.domain.diameter, self.L
if algo == 'DA':
pot, v = kwargs['potential'], self.domain.v
if 'etas' in kwargs:
etas = kwargs['etas']
raise NotImplementedError('Need to implement general bound in terms of etas')
else:
if (isinstance(pot, ExponentialPotential) or isinstance(pot, pExpPotential)):
reg_bnd = self.M*np.sqrt(8*(pot.c_omega*(n-np.log(v)) + pot.d_omega*v))*np.sqrt(np.log(t+1)/t) + L*D/t
else:
alpha, theta = kwargs['alpha'], kwargs['theta']
lpsi = pot.l_psi()[0]
C, epsilon = pot.bounds_asymp()
try:
M = pot.M
except AttributeError:
M = self.M
reg_bnd = (M**2*theta/lpsi/(1-alpha)*t**(-alpha)
+ (L*D + C/theta*v**(-epsilon))*t**(-(1-alpha)/(1+n*epsilon)))
elif algo == 'GP':
# for now assume eta_t = t**(-0.5)
reg_bnd = (D**2/2 + L**2)*t**(-0.5) - L**2/2/t
elif algo == 'OGD':
reg_bnd = L**2/2/kwargs['H']*(1+np.log(t))/t
elif algo == 'ONS':
reg_bnd = 5*(1/kwargs['alpha'] + L*D)*n*np.log(t+1)/t
elif algo == 'FTAL':
reg_bnd = 64*(1/kwargs['alpha'] + L*D)*n*(1+np.log(t))/t
elif algo == 'EWOO':
reg_bnd = 1/kwargs['alpha']*n*(1+np.log(t+1))/t
else:
raise NotImplementedError
reg_results['tsavgbnd'].append(reg_bnd)
class Results(object):
""" Class for 'result' objects that contain simulation results
generated by ContNoRegretProblems """
def __init__(self, problem, **kwargs):
self.problem = problem #, self.regs = problem, regs
self.label = kwargs.get('label')
self.algo = kwargs.get('algo')
if kwargs.get('pltdata') is not None:
self.pltdata = kwargs.get('pltdata')
if kwargs.get('KL') is not None:
self.KL = kwargs.get('KL')
if 'etas' in kwargs:
self.etas = kwargs['etas']
if self.algo == 'DA':
try: self.regs_norate = kwargs['regs_DAopt']
except: KeyError
try: self.regs_norate = kwargs['regs_DAetas']
except: KeyError
try: self.etaopts, self.regs_etaopts = kwargs['etaopts'], kwargs['regs_etaopts']
except KeyError: pass
try: self.etas, self.regs_etas = kwargs['etas'], kwargs['regs_etas']
except KeyError: pass
try: self.alphas, self.thetas, self.regs_alphas = kwargs['alphas'], kwargs['thetas'], kwargs['regs_alphas']
except KeyError: pass
else:
self.regs_norate = kwargs['regs_{}'.format(self.algo)]
Nslopes = np.minimum(1000, np.floor(self.problem.T/3))
self.slopes, self.slopes_bnd = self.estimate_loglog_slopes(Nslopes)
def estimate_loglog_slopes(self, N=500):
""" Estimates slopes of the asymptotic log-log plot
for each element f tsavg_regert, using the N last data points """
slopes, slopes_bnd = {}, {}
try:
slopes['etaopts'] = self.loglog_slopes(self.regs_etaopts['tsavg'], N)
slopes_bnd['etaopts'] = self.loglog_slopes(self.regs_etaopts['tsavgbnd'], N)
except AttributeError: pass
try:
slopes['etas'] = self.loglog_slopes(self.regs_etas['tsavg'], N)
slopes_bnd['etas'] = self.loglog_slopes(self.regs_etas['tsavgbnd'], N)
except AttributeError: pass
try:
slopes['alphas'] = self.loglog_slopes(self.regs_alphas['tsavg'], N)
slopes_bnd['alphas'] = self.loglog_slopes(self.regs_alphas['tsavgbnd'], N)
except AttributeError: pass
try:
slopes['{}'.format(self.algo)] = self.loglog_slopes(self.regs_norate['tsavg'], N)
slopes_bnd['{}'.format(self.algo)] = self.loglog_slopes(self.regs_norate['tsavgbnd'], N)
except AttributeError: pass
return slopes, slopes_bnd
def loglog_slopes(self, regrets, N):
slopes = []
for regret in regrets:
T = np.arange(len(regret)-N, len(regret))
Y = regret[len(regret)-N:]
slope = linregress(np.log(T), np.log(Y))[0]
slopes.append(slope)
return slopes
def action_EWOO(cumLossFunc, alpha, ranges, tmpfolder='libs/'):
""" Function for computing the (single) action of the EWOO algorithm """
header = ['#include <math.h>\n\n',
'double alpha = {};\n'.format(alpha)]
func = cumLossFunc.gen_ccode()
ccode = header + func + [' return exp(-alpha*loss);\n',
' }']
integr = ctypes_integrate(ccode, ranges, tmpfolder)
actions = []
for i in range(cumLossFunc.domain.n):
footer = [' return args[{}]*exp(-alpha*loss);\n'.format(i),
' }']
ccode = header + func + footer
actions.append(ctypes_integrate(ccode, ranges, tmpfolder)/integr)
return np.array(actions)
|
mit
| -6,658,337,883,254,105,000 | 55.056075 | 152 | 0.520007 | false |
Jan-zou/LeetCode
|
python/HashTable/205_isomorphic_strings.py
|
1
|
1289
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters.
No two characters may map to the same character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note: You may assume both s and t have the same length.
Tags: Hash Table
Time: O(n); Space: O(1)
'''
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
return self.halfIsom(s, t) and self.halfIsom(t, s)
def halfIsom(self, s, t):
lookup = {}
for i in xrange(len(s)):
if s[i] not in lookup:
lookup[s[i]] = t[i]
elif lookup[s[i]] != t[i]:
return False
return True
if __name__ == '__main__':
print Solution().isIsomorphic('ab', 'aa')
print Solution().isIsomorphic('foo', 'bar')
|
mit
| -6,590,881,442,122,380,000 | 29.690476 | 116 | 0.576416 | false |
gaasedelen/lighthouse
|
coverage/frida/frida-drcov.py
|
1
|
10181
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import os
import signal
import sys
import frida
"""
Frida BB tracer that outputs in DRcov format.
Frida script is responsible for:
- Getting and sending the process module map initially
- Getting the code execution events
- Parsing the raw event into a GumCompileEvent
- Converting from GumCompileEvent to DRcov block
- Sending a list of DRcov blocks to python
Python side is responsible for:
- Attaching and detaching from the target process
- Removing duplicate DRcov blocks
- Formatting module map and blocks
- Writing the output file
"""
# Our frida script, takes two string arguments to embed
# 1. whitelist of modules, in the form "['module_a', 'module_b']" or "['all']"
# 2. threads to trace, in the form "[345, 765]" or "['all']"
js = """
"use strict";
var whitelist = %s;
var threadlist = %s;
// Get the module map
function make_maps() {
var maps = Process.enumerateModulesSync();
var i = 0;
// We need to add the module id
maps.map(function(o) { o.id = i++; });
// .. and the module end point
maps.map(function(o) { o.end = o.base.add(o.size); });
return maps;
}
var maps = make_maps()
send({'map': maps});
// We want to use frida's ModuleMap to create DRcov events, however frida's
// Module object doesn't have the 'id' we added above. To get around this,
// we'll create a mapping from path -> id, and have the ModuleMap look up the
// path. While the ModuleMap does contain the base address, if we cache it
// here, we can simply look up the path rather than the entire Module object.
var module_ids = {};
maps.map(function (e) {
module_ids[e.path] = {id: e.id, start: e.base};
});
var filtered_maps = new ModuleMap(function (m) {
if (whitelist.indexOf('all') >= 0) { return true; }
return whitelist.indexOf(m.name) >= 0;
});
// This function takes a list of GumCompileEvents and converts it into a DRcov
// entry. Note that we'll get duplicated events when two traced threads
// execute the same code, but this will be handled by the python side.
function drcov_bbs(bbs, fmaps, path_ids) {
// We're going to use send(..., data) so we need an array buffer to send
// our results back with. Let's go ahead and alloc the max possible
// reply size
/*
// Data structure for the coverage info itself
typedef struct _bb_entry_t {
uint start; // offset of bb start from the image base
ushort size;
ushort mod_id;
} bb_entry_t;
*/
var entry_sz = 8;
var bb = new ArrayBuffer(entry_sz * bbs.length);
var num_entries = 0;
for (var i = 0; i < bbs.length; ++i) {
var e = bbs[i];
var start = e[0];
var end = e[1];
var path = fmaps.findPath(start);
if (path == null) { continue; }
var mod_info = path_ids[path];
var offset = start.sub(mod_info.start).toInt32();
var size = end.sub(start).toInt32();
var mod_id = mod_info.id;
// We're going to create two memory views into the array we alloc'd at
// the start.
// we want one u32 after all the other entries we've created
var x = new Uint32Array(bb, num_entries * entry_sz, 1);
x[0] = offset;
// we want two u16's offset after the 4 byte u32 above
var y = new Uint16Array(bb, num_entries * entry_sz + 4, 2);
y[0] = size;
y[1] = mod_id;
++num_entries;
}
// We can save some space here, rather than sending the entire array back,
// we can create a new view into the already allocated memory, and just
// send back that linear chunk.
return new Uint8Array(bb, 0, num_entries * entry_sz);
}
// Punt on self modifying code -- should improve speed and lighthouse will
// barf on it anyways
Stalker.trustThreshold = 0;
console.log('Starting to stalk threads...');
// Note, we will miss any bbs hit by threads that are created after we've
// attached
Process.enumerateThreads({
onMatch: function (thread) {
if (threadlist.indexOf(thread.id) < 0 &&
threadlist.indexOf('all') < 0) {
// This is not the thread you're look for
return;
}
console.log('Stalking thread ' + thread.id + '.');
Stalker.follow(thread.id, {
events: {
compile: true
},
onReceive: function (event) {
var bb_events = Stalker.parse(event,
{stringify: false, annotate: false});
var bbs = drcov_bbs(bb_events, filtered_maps, module_ids);
// We're going to send a dummy message, the actual bb is in the
// data field. We're sending a dict to keep it consistent with
// the map. We're also creating the drcov event in javascript,
// so on the py recv side we can just blindly add it to a set.
send({bbs: 1}, bbs);
}
});
},
onComplete: function () { console.log('Done stalking threads.'); }
});
"""
# These are global so we can easily access them from the frida callbacks or
# signal handlers. It's important that bbs is a set, as we're going to depend
# on it's uniquing behavior for deduplication
modules = []
bbs = set([])
outfile = 'frida-cov.log'
# This converts the object frida sends which has string addresses into
# a python dict
def populate_modules(image_list):
global modules
for image in image_list:
idx = image['id']
path = image['path']
base = int(image['base'], 0)
end = int(image['end'], 0)
size = image['size']
m = {
'id': idx,
'path': path,
'base': base,
'end': end,
'size': size}
modules.append(m)
print('[+] Got module info.')
# called when we get coverage data from frida
def populate_bbs(data):
global bbs
# we know every drcov block is 8 bytes, so lets just blindly slice and
# insert. This will dedup for us.
block_sz = 8
for i in range(0, len(data), block_sz):
bbs.add(data[i:i+block_sz])
# take the module dict and format it as a drcov logfile header
def create_header(mods):
header = ''
header += 'DRCOV VERSION: 2\n'
header += 'DRCOV FLAVOR: frida\n'
header += 'Module Table: version 2, count %d\n' % len(mods)
header += 'Columns: id, base, end, entry, checksum, timestamp, path\n'
entries = []
for m in mods:
# drcov: id, base, end, entry, checksum, timestamp, path
# frida doesnt give us entry, checksum, or timestamp
# luckily, I don't think we need them.
entry = '%3d, %#016x, %#016x, %#016x, %#08x, %#08x, %s' % (
m['id'], m['base'], m['end'], 0, 0, 0, m['path'])
entries.append(entry)
header_modules = '\n'.join(entries)
return ("%s%s\n" % (header, header_modules)).encode("utf-8")
# take the recv'd basic blocks, finish the header, and append the coverage
def create_coverage(data):
bb_header = b'BB Table: %d bbs\n' % len(data)
return bb_header + b''.join(data)
def on_message(msg, data):
#print(msg)
pay = msg['payload']
if 'map' in pay:
maps = pay['map']
populate_modules(maps)
else:
populate_bbs(data)
def sigint(signo, frame):
print('[!] SIGINT, saving %d blocks to \'%s\'' % (len(bbs), outfile))
save_coverage()
print('[!] Done')
os._exit(1)
def save_coverage():
header = create_header(modules)
body = create_coverage(bbs)
with open(outfile, 'wb') as h:
h.write(header)
h.write(body)
def main():
global outfile
parser = argparse.ArgumentParser()
parser.add_argument('target',
help='target process name or pid',
default='-1')
parser.add_argument('-o', '--outfile',
help='coverage file',
default='frida-cov.log')
parser.add_argument('-w', '--whitelist-modules',
help='module to trace, may be specified multiple times [all]',
action='append', default=[])
parser.add_argument('-t', '--thread-id',
help='threads to trace, may be specified multiple times [all]',
action='append', type=int, default=[])
parser.add_argument('-D', '--device',
help='select a device by id [local]',
default='local')
args = parser.parse_args()
outfile = args.outfile
device = frida.get_device(args.device)
target = -1
for p in device.enumerate_processes():
if args.target in [str(p.pid), p.name]:
if target == -1:
target = p.pid
else:
print('[-] Warning: multiple processes on device match '
'\'%s\', using pid: %d' % (args.target, target))
if target == -1:
print('[-] Error: could not find process matching '
'\'%s\' on device \'%s\'' % (args.target, device.id))
sys.exit(1)
signal.signal(signal.SIGINT, sigint)
whitelist_modules = ['all']
if len(args.whitelist_modules):
whitelist_modules = args.whitelist_modules
threadlist = ['all']
if len(args.thread_id):
threadlist = args.thread_id
json_whitelist_modules = json.dumps(whitelist_modules)
json_threadlist = json.dumps(threadlist)
print('[*] Attaching to pid \'%d\' on device \'%s\'...' %
(target, device.id))
session = device.attach(target)
print('[+] Attached. Loading script...')
script = session.create_script(js % (json_whitelist_modules, json_threadlist))
script.on('message', on_message)
script.load()
print('[*] Now collecting info, control-C or control-D to terminate....')
sys.stdin.read()
print('[*] Detaching, this might take a second...')
session.detach()
print('[+] Detached. Got %d basic blocks.' % len(bbs))
print('[*] Formatting coverage and saving...')
save_coverage()
print('[!] Done')
sys.exit(0)
if __name__ == '__main__':
main()
|
mit
| -5,940,395,265,699,300,000 | 28.682216 | 82 | 0.595718 | false |
nextgis-extra/tests
|
lib_gdal/pyscripts/test_gdal_ls_py.py
|
1
|
9097
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: test_gdal_ls_py.py 33811 2016-03-29 23:12:41Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal_ls.py testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2011-2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from osgeo import gdal
import sys
sys.path.append( '../pymod' )
import gdaltest
import test_py_scripts
###############################################################################
def run_gdal_ls(argv):
script_path = test_py_scripts.get_py_script('gdal_ls')
if script_path is None:
return ('skip', None)
saved_syspath = sys.path
sys.path.append(script_path)
try:
import gdal_ls
except:
sys.path = saved_syspath
return ('fail', None)
sys.path = saved_syspath
from sys import version_info
if version_info >= (3,0,0):
import io
outstr = io.StringIO()
else:
import StringIO
outstr = StringIO.StringIO()
ret = gdal_ls.gdal_ls(argv, outstr)
retstr = outstr.getvalue()
outstr.close()
if ret != 0:
gdaltest.post_reason('got error code : %d' % ret)
return ('fail', 'None')
return ('success', retstr)
###############################################################################
# List one file
def test_gdal_ls_py_1():
# TODO: Why the '' as the first element of the list here and below?
ret, ret_str = run_gdal_ls(['', '-l', '../ogr/data/poly.shp'])
if ret != 'success':
return ret
if ret_str.find('poly.shp') == -1:
print(ret_str)
return 'fail'
return 'success'
###############################################################################
# List one dir
def test_gdal_ls_py_2():
ret, ret_str = run_gdal_ls(['', '-l', '../ogr/data'])
if ret != 'success':
return ret
if ret_str.find('poly.shp') == -1:
print(ret_str)
return 'fail'
return 'success'
###############################################################################
# List recursively
def test_gdal_ls_py_3():
ret, ret_str = run_gdal_ls(['', '-R', '../ogr/data'])
if ret != 'success':
return ret
if ret_str.find('PROJ_UNITS') == -1:
print(ret_str)
return 'fail'
return 'success'
###############################################################################
# List in a .zip
def test_gdal_ls_py_4():
ret, ret_str = run_gdal_ls(['', '-l', '/vsizip/../ogr/data/poly.zip'])
if ret != 'success':
return ret
if ret_str.find('-r--r--r-- 1 unknown unknown 415 2008-02-11 21:35 /vsizip/../ogr/data/poly.zip/poly.PRJ') == -1:
print(ret_str)
if gdaltest.skip_on_travis():
# FIXME
# Fails on Travis with dates at 1970-01-01 00:00
# Looks like a 32/64bit issue with Python bindings of VSIStatL()
return 'skip'
return 'fail'
return 'success'
###############################################################################
# List dir in /vsicurl/
def test_gdal_ls_py_5():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would stall for a long time')
return 'skip'
f = gdal.VSIFOpenL('/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip', 'rb')
if f is None:
return 'skip'
d = gdal.VSIFReadL(1,1,f)
gdal.VSIFCloseL(f)
if len(d) == 0:
return 'skip'
ret, ret_str = run_gdal_ls(['', '-R', 'http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/'])
if ret != 'success':
return ret
if ret_str.find('/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/wkb_wkt/3d_broken_line.wkb') == -1:
print(ret_str)
return 'fail'
return 'success'
###############################################################################
# List in a .zip in /vsicurl/
def test_gdal_ls_py_6():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
f = gdal.VSIFOpenL('/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip', 'rb')
if f is None:
return 'skip'
d = gdal.VSIFReadL(1,1,f)
gdal.VSIFCloseL(f)
if len(d) == 0:
return 'skip'
ret, ret_str = run_gdal_ls(['', '-l', '/vsizip/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip'])
if ret != 'success':
return ret
if ret_str.find('-r--r--r-- 1 unknown unknown 415 2008-02-11 21:35 /vsizip/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip/poly.PRJ') == -1:
print(ret_str)
if gdaltest.skip_on_travis():
# FIXME
# Fails on Travis with dates at 1970-01-01 00:00
# Looks like a 32/64bit issue with Python bindings of VSIStatL()
return 'skip'
return 'fail'
return 'success'
###############################################################################
# List dir in /vsicurl/ and recurse in zip
def test_gdal_ls_py_7():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would stall for a long time')
return 'skip'
f = gdal.VSIFOpenL('/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip', 'rb')
if f is None:
return 'skip'
d = gdal.VSIFReadL(1,1,f)
gdal.VSIFCloseL(f)
if len(d) == 0:
return 'skip'
ret, ret_str = run_gdal_ls(['', '-R', '-Rzip', 'http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/'])
if ret != 'success':
return ret
if ret_str.find('/vsizip//vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip/poly.PRJ') == -1:
print(ret_str)
return 'fail'
return 'success'
###############################################################################
# List FTP dir in /vsicurl/
def test_gdal_ls_py_8():
if not gdaltest.run_slow_tests():
return 'skip'
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would stall for a long time')
return 'skip'
f = gdal.VSIFOpenL('/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/poly.zip', 'rb')
if f is None:
return 'skip'
d = gdal.VSIFReadL(1,1,f)
gdal.VSIFCloseL(f)
if len(d) == 0:
return 'skip'
ret, ret_str = run_gdal_ls(['', '-l', '-R', '-Rzip', 'ftp://ftp.remotesensing.org/gdal/data/aig'])
if ret != 'success':
return ret
if ret_str.find('-r--r--r-- 1 unknown unknown 24576 2007-03-29 00:00 /vsicurl/ftp://ftp.remotesensing.org/gdal/data/aig/nzdem/info/arc0002r.001') == -1:
print(ret_str)
return 'fail'
if ret_str.find('-r--r--r-- 1 unknown unknown 24576 2007-03-29 12:20 /vsizip//vsicurl/ftp://ftp.remotesensing.org/gdal/data/aig/nzdem.zip/nzdem/info/arc0002r.001') == -1:
print(ret_str)
return 'fail'
return 'success'
gdaltest_list = [
test_gdal_ls_py_1,
test_gdal_ls_py_2,
test_gdal_ls_py_3,
test_gdal_ls_py_4,
test_gdal_ls_py_5,
test_gdal_ls_py_6,
test_gdal_ls_py_7,
test_gdal_ls_py_8,
]
if __name__ == '__main__':
gdal.SetConfigOption('GDAL_RUN_SLOW_TESTS', 'YES')
gdaltest.setup_run( 'test_gdal_ls_py' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
gpl-2.0
| 6,877,321,798,984,083,000 | 28.440129 | 182 | 0.540288 | false |
anandpdoshi/frappe
|
frappe/commands/site.py
|
1
|
14212
|
from __future__ import unicode_literals, absolute_import
import click
import hashlib, os, sys
import frappe
from frappe.commands import pass_context, get_site
from frappe.commands.scheduler import _is_scheduler_enabled
from frappe.limits import update_limits, get_limits
from frappe.installer import update_site_config
from frappe.utils import touch_file, get_site_path
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Create a new site"
if not db_name:
db_name = hashlib.sha1(site).hexdigest()[:10]
frappe.init(site=site, new_site=True)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password,
verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None,
verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"""Install a new Frappe site"""
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except:
enable_scheduler = False
make_site_dirs()
try:
installing = touch_file(get_site_path('locks', 'installing.lock'))
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name,
admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
apps_to_install = ['frappe'] + (frappe.conf.get("install_apps") or []) + (install_apps or [])
for app in apps_to_install:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
frappe.db.commit()
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print "*** Scheduler is", scheduler_status, "***"
finally:
os.remove(installing)
frappe.destroy()
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@click.option('--with-public-files', help='Restores the public files of the site, given path to its tar file')
@click.option('--with-private-files', help='Restores the private files of the site, given path to its tar file')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None, with_public_files=None, with_private_files=None):
"Restore site database from an sql file"
from frappe.installer import extract_sql_gzip, extract_tar_files
# Extract the gzip file if user has passed *.sql.gz file instead of *.sql file
if not os.path.exists(sql_file_path):
sql_file_path = '../' + sql_file_path
if not os.path.exists(sql_file_path):
print 'Invalid path {0}' + sql_file_path[3:]
sys.exit(1)
if sql_file_path.endswith('sql.gz'):
sql_file_path = extract_sql_gzip(os.path.abspath(sql_file_path))
site = get_site(context)
frappe.init(site=site)
db_name = db_name or frappe.conf.db_name or hashlib.sha1(site).hexdigest()[:10]
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force)
# Extract public and/or private files to the restored site, if user has given the path
if with_public_files:
extract_tar_files(site, with_public_files, 'public')
if with_private_files:
extract_tar_files(site, with_private_files, 'private')
@click.command('reinstall')
@pass_context
def reinstall(context):
"Reinstall site ie. wipe all data and start over"
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('list-apps')
@pass_context
def list_apps(context):
"List apps in site"
site = get_site(context)
frappe.init(site=site)
frappe.connect()
print "\n".join(frappe.get_installed_apps())
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@click.option('--send-welcome-email', default=False, is_flag=True)
@pass_context
def add_system_manager(context, email, first_name, last_name, send_welcome_email):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name, send_welcome_email)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
from frappe.migrate import migrate
for site in context.sites:
print 'Migrating', site
frappe.init(site=site)
frappe.connect()
try:
migrate(context.verbose, rebuild_website=rebuild_website)
finally:
frappe.destroy()
@click.command('run-patch')
@click.argument('module')
@pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
"Set a default site"
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None,
backup_path_private_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, backup_path_private_files=backup_path_private_files, force=True)
if verbose:
from frappe.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
print "private files backup taken -", odb.backup_path_private_files, "- on", now()
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
"Remove app from site's installed-apps list"
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
@click.command('uninstall-app')
@click.argument('app')
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run=False):
"Remove app and linked modules from site"
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app, dry_run)
finally:
frappe.destroy()
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
@click.option('--archived-sites-path')
def drop_site(site, root_login='root', root_password=None, archived_sites_path=None):
"Remove site from database and filesystem"
from frappe.installer import get_current_host, make_connection
from frappe.model.db_schema import DbManager
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
scheduled_backup(ignore_files=False, force=True)
db_name = frappe.local.conf.db_name
frappe.local.db = make_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
if not archived_sites_path:
archived_sites_path = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_path):
os.mkdir(archived_sites_path)
move(archived_sites_path, site)
def move(dest_dir, site):
import os
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-admin-password')
@click.argument('admin-password')
@pass_context
def set_admin_password(context, admin_password):
"Set Administrator password for a site"
import getpass
from frappe.utils.password import update_password
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
update_password('Administrator', admin_password)
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('set-limit')
@click.option('--site', help='site name')
@click.argument('limit')
@click.argument('value')
@pass_context
def set_limit(context, site, limit, value):
"""Sets user / space / email limit for a site"""
_set_limits(context, site, ((limit, value),))
@click.command('set-limits')
@click.option('--site', help='site name')
@click.option('--limit', 'limits', type=(unicode, unicode), multiple=True)
@pass_context
def set_limits(context, site, limits):
_set_limits(context, site, limits)
def _set_limits(context, site, limits):
import datetime
if not limits:
return
if not site:
site = get_site(context)
with frappe.init_site(site):
new_limits = {}
for limit, value in limits:
if limit not in ('emails', 'space', 'users', 'expiry',
'support_email', 'support_chat', 'upgrade_url'):
frappe.throw('Invalid limit {0}'.format(limit))
if limit=='expiry':
try:
datetime.datetime.strptime(value, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
elif limit=='space':
value = float(value)
elif limit in ('users', 'emails'):
value = int(value)
new_limits[limit] = value
update_limits(new_limits)
@click.command('clear-limits')
@click.option('--site', help='site name')
@click.argument('limits', nargs=-1, type=click.Choice(['emails', 'space', 'users', 'expiry',
'support_email', 'support_chat', 'upgrade_url']))
@pass_context
def clear_limits(context, site, limits):
"""Clears given limit from the site config, and removes limit from site config if its empty"""
from frappe.limits import clear_limit as _clear_limit
if not limits:
return
if not site:
site = get_site(context)
with frappe.init_site(site):
_clear_limit(limits)
# Remove limits from the site_config, if it's empty
limits = get_limits()
if not limits:
update_site_config('limits', 'None', validate=False)
commands = [
add_system_manager,
backup,
drop_site,
install_app,
list_apps,
migrate,
new_site,
reinstall,
reload_doc,
remove_from_installed_apps,
restore,
run_patch,
set_admin_password,
uninstall,
set_limit,
set_limits,
clear_limits,
_use,
]
|
mit
| -2,448,101,645,080,719,400 | 31.226757 | 244 | 0.726428 | false |
berth64/modded_modded_1257ad
|
source/header_common.py
|
1
|
23877
|
###################################################
# header_common.py
# This file contains common declarations.
# DO NOT EDIT THIS FILE!
###################################################
#client events
multiplayer_event_set_item_selection = 0
multiplayer_event_set_bot_selection = 1
multiplayer_event_admin_start_map = 2
multiplayer_event_admin_set_max_num_players = 3
multiplayer_event_admin_set_num_bots_in_team = 4
multiplayer_event_admin_set_friendly_fire = 5
multiplayer_event_admin_set_ghost_mode = 6
multiplayer_event_admin_set_control_block_dir = 7
multiplayer_event_admin_set_add_to_servers_list = 8
multiplayer_event_admin_set_respawn_period = 9
multiplayer_event_admin_set_game_max_minutes = 10
multiplayer_event_admin_set_round_max_seconds = 11
multiplayer_event_admin_set_game_max_points = 12
multiplayer_event_admin_set_point_gained_from_flags = 13
multiplayer_event_admin_set_point_gained_from_capturing_flag = 14
multiplayer_event_admin_set_server_name = 15
multiplayer_event_admin_set_game_password = 16
multiplayer_event_admin_set_team_faction = 17
multiplayer_event_open_admin_panel = 18
multiplayer_event_change_team_no = 19
multiplayer_event_change_troop_id = 20
multiplayer_event_start_new_poll = 21
multiplayer_event_answer_to_poll = 22
multiplayer_event_admin_kick_player = 23
multiplayer_event_admin_ban_player = 24
multiplayer_event_admin_set_num_bots_voteable = 25
multiplayer_event_admin_set_factions_voteable = 26
multiplayer_event_admin_set_maps_voteable = 27
multiplayer_event_admin_set_player_respawn_as_bot = 28
multiplayer_event_admin_set_combat_speed = 29
multiplayer_event_admin_set_respawn_count = 30
multiplayer_event_admin_set_kick_voteable = 31
multiplayer_event_admin_set_ban_voteable = 32
multiplayer_event_admin_set_valid_vote_ratio = 33
multiplayer_event_admin_set_auto_team_balance_limit = 34
multiplayer_event_admin_set_welcome_message = 35
multiplayer_event_admin_set_initial_gold_multiplier = 36
multiplayer_event_admin_set_battle_earnings_multiplier = 37
multiplayer_event_admin_set_round_earnings_multiplier = 38
multiplayer_event_admin_set_melee_friendly_fire = 39
multiplayer_event_admin_set_friendly_fire_damage_self_ratio = 40
multiplayer_event_admin_set_friendly_fire_damage_friend_ratio = 41
multiplayer_event_admin_set_allow_player_banners = 42
multiplayer_event_admin_set_force_default_armor = 43
multiplayer_event_admin_set_anti_cheat = 44
multiplayer_event_open_game_rules = 45
multiplayer_event_offer_duel = 46
multiplayer_event_admin_set_disallow_ranged_weapons = 47
#server events
multiplayer_event_return_max_num_players = 50
multiplayer_event_return_num_bots_in_team = 51
multiplayer_event_return_friendly_fire = 52
multiplayer_event_return_ghost_mode = 53
multiplayer_event_return_control_block_dir = 54
multiplayer_event_return_combat_speed = 55
multiplayer_event_return_add_to_servers_list = 56
multiplayer_event_return_respawn_period = 57
multiplayer_event_return_game_max_minutes = 58
multiplayer_event_return_round_max_seconds = 59
multiplayer_event_return_game_max_points = 60
multiplayer_event_return_point_gained_from_flags = 61
multiplayer_event_return_point_gained_from_capturing_flag = 62
multiplayer_event_return_server_name = 63
multiplayer_event_return_game_password = 64
multiplayer_event_return_game_type = 65
multiplayer_event_return_confirmation = 66
multiplayer_event_return_rejection = 67
multiplayer_event_show_multiplayer_message = 68
multiplayer_event_draw_this_round = 69
multiplayer_event_set_attached_scene_prop = 70
multiplayer_event_set_team_flag_situation = 71
multiplayer_event_set_team_score = 72
multiplayer_event_set_num_agents_around_flag = 73
multiplayer_event_ask_for_poll = 74
multiplayer_event_change_flag_owner = 75
multiplayer_event_use_item = 76
multiplayer_event_set_scene_prop_open_or_close = 77
multiplayer_event_set_round_start_time = 78
multiplayer_event_force_start_team_selection = 79
multiplayer_event_start_death_mode = 80
multiplayer_event_return_num_bots_voteable = 81
multiplayer_event_return_factions_voteable = 82
multiplayer_event_return_maps_voteable = 83
multiplayer_event_return_next_team_faction = 84
multiplayer_event_return_player_respawn_as_bot = 85
multiplayer_event_set_player_score_kill_death = 86
multiplayer_event_set_day_time = 87
multiplayer_event_return_respawn_count = 88
multiplayer_event_return_player_respawn_spent = 89
multiplayer_event_return_kick_voteable = 90
multiplayer_event_return_ban_voteable = 91
multiplayer_event_return_valid_vote_ratio = 92
multiplayer_event_return_auto_team_balance_limit = 93
multiplayer_event_return_initial_gold_multiplier = 94
multiplayer_event_return_battle_earnings_multiplier = 95
multiplayer_event_return_round_earnings_multiplier = 96
multiplayer_event_return_renaming_server_allowed = 97
multiplayer_event_return_changing_game_type_allowed = 98
multiplayer_event_return_melee_friendly_fire = 99
multiplayer_event_return_friendly_fire_damage_self_ratio = 100
multiplayer_event_return_friendly_fire_damage_friend_ratio = 101
multiplayer_event_return_allow_player_banners = 102
multiplayer_event_return_force_default_armor = 103
multiplayer_event_return_anti_cheat = 104
multiplayer_event_return_open_game_rules = 105
multiplayer_event_return_max_num_bots = 106
multiplayer_event_return_server_mission_timer_while_player_joined = 107
multiplayer_event_show_duel_request = 108
multiplayer_event_start_duel = 109
multiplayer_event_cancel_duel = 110
multiplayer_event_show_server_message = 111
multiplayer_event_return_disallow_ranged_weapons = 112
#multiplayer message types
multiplayer_message_type_auto_team_balance_done = 2
multiplayer_message_type_auto_team_balance_next = 3
multiplayer_message_type_capture_the_flag_score = 4
multiplayer_message_type_flag_returned_home = 5
multiplayer_message_type_capture_the_flag_stole = 6
multiplayer_message_type_poll_result = 7
multiplayer_message_type_flag_neutralized = 8
multiplayer_message_type_flag_captured = 9
multiplayer_message_type_flag_is_pulling = 10
multiplayer_message_type_auto_team_balance_no_need = 11
multiplayer_message_type_round_result_in_battle_mode = 12
multiplayer_message_type_round_result_in_siege_mode = 13
multiplayer_message_type_round_draw = 14
multiplayer_message_type_target_destroyed = 15
multiplayer_message_type_defenders_saved_n_targets = 16
multiplayer_message_type_attackers_won_the_round = 17
multiplayer_message_type_start_death_mode = 18
#multiplayer game type indices
multiplayer_game_type_deathmatch = 0
multiplayer_game_type_team_deathmatch = 1
multiplayer_game_type_battle = 2
multiplayer_game_type_destroy = 3
multiplayer_game_type_capture_the_flag = 4
multiplayer_game_type_headquarters = 5
multiplayer_game_type_siege = 6
multiplayer_game_type_duel = 7
multiplayer_num_game_types = 8
#admin panel value ranges
multiplayer_round_max_seconds_min = 60
multiplayer_round_max_seconds_max = 901
multiplayer_respawn_period_min = 3
multiplayer_respawn_period_max = 31 #can only be 30 seconds max (due to agent deletion after that period)
multiplayer_siege_mod_defender_team_extra_respawn_time = 27 #It was 20 in 1.113 but it is increased it to 25 in 1.121 because defenders were mostly defeating enemy.
multiplayer_new_agents_finish_spawning_time = 30
multiplayer_max_possible_player_id = 1000
#team 1 and team 2 are 0 and 1 respectively
multi_team_spectator = 2
multi_team_unassigned = multi_team_spectator + 1
multi_data_maps_for_game_type_begin = 0
multi_data_maps_for_game_type_end = multi_data_maps_for_game_type_begin + 30
multi_data_troop_button_indices_begin = multi_data_maps_for_game_type_end
multi_data_troop_button_indices_end = multi_data_troop_button_indices_begin + 16 #maximum 16 troops per faction
multi_data_item_button_indices_begin = multi_data_troop_button_indices_end
multi_data_item_button_indices_end = multi_data_item_button_indices_begin + 100 #maximum 100 items per troop
multi_data_flag_owner_begin = multi_data_item_button_indices_end
multi_data_flag_owner_end = multi_data_flag_owner_begin + 10 #maximum of 10 flags per scene
multi_data_flag_players_around_begin = multi_data_flag_owner_end
multi_data_flag_players_around_end = multi_data_flag_players_around_begin + 10 #maximum of 10 flags per scene
multi_data_flag_owned_seconds_begin = multi_data_flag_players_around_end
multi_data_flag_owned_seconds_end = multi_data_flag_owned_seconds_begin + 10 #maximum of 10 flags per scene
multi_data_flag_pull_code_begin = multi_data_flag_owned_seconds_end
multi_data_flag_pull_code_end = multi_data_flag_pull_code_begin + 10 #maximum of 10 flags per scene
multi_data_player_index_list_begin = multi_data_flag_pull_code_end
#Entry points 100..109 is used for showing initial points for moveable and usable scene props like siege ladder.
multi_entry_points_for_usable_items_start = 100
multi_entry_points_for_usable_items_end = multi_entry_points_for_usable_items_start + 10
#multi_item_class_type_other = 0
multi_item_class_type_sword = 1
multi_item_class_type_axe = 2
multi_item_class_type_blunt = 3
multi_item_class_type_war_picks = 4
multi_item_class_type_cleavers = 5
multi_item_class_type_two_handed_sword = 6
multi_item_class_type_two_handed_axe = 7
multi_item_class_type_spear = 8
multi_item_class_type_lance = 9
multi_item_class_type_small_shield = 10
multi_item_class_type_large_shield = 11
multi_item_class_type_bow = 12
multi_item_class_type_crossbow = 13
multi_item_class_type_arrow = 14
multi_item_class_type_bolt = 15
multi_item_class_type_throwing = 16
multi_item_class_type_throwing_axe = 17
multi_item_class_type_horse = 18
multi_item_class_type_light_armor = 19
multi_item_class_type_medium_armor = 20
multi_item_class_type_heavy_armor = 21
multi_item_class_type_light_helm = 22
multi_item_class_type_heavy_helm = 23
multi_item_class_type_light_foot = 24
multi_item_class_type_heavy_foot = 25
multi_item_class_type_glove = 26
multi_item_class_type_melee_weapons_begin = multi_item_class_type_sword
multi_item_class_type_melee_weapons_end = multi_item_class_type_small_shield
multi_item_class_type_ranged_weapons_begin = multi_item_class_type_bow
multi_item_class_type_ranged_weapons_end = multi_item_class_type_horse
multi_item_class_type_shields_begin = multi_item_class_type_melee_weapons_end
multi_item_class_type_shields_end = multi_item_class_type_bow
multi_item_class_type_weapons_begin = multi_item_class_type_sword
multi_item_class_type_weapons_end = multi_item_class_type_horse
multi_item_class_type_horses_begin = multi_item_class_type_weapons_end
multi_item_class_type_horses_end = multi_item_class_type_light_armor
multi_item_class_type_bodies_begin = multi_item_class_type_horses_end
multi_item_class_type_bodies_end = multi_item_class_type_light_helm
multi_item_class_type_heads_begin = multi_item_class_type_bodies_end
multi_item_class_type_heads_end = multi_item_class_type_light_foot
multi_item_class_type_feet_begin = multi_item_class_type_heads_end
multi_item_class_type_feet_end = multi_item_class_type_glove
multi_item_class_type_gloves_begin = multi_item_class_type_feet_end
multi_item_class_type_gloves_end = multi_item_class_type_glove + 1
multi_troop_class_other = 0
multi_troop_class_infantry = 1
multi_troop_class_spearman = 2
multi_troop_class_cavalry = 3
multi_troop_class_archer = 4
multi_troop_class_crossbowman = 5
multi_troop_class_mounted_archer = 6
multi_troop_class_mounted_crossbowman = 7
multi_num_valid_entry_points = 64
multi_num_valid_entry_points_div_2 = 32
#normal money management system
multi_battle_round_team_money_add = 500
multi_destroy_save_or_destroy_target_money_add = 100
multi_destroy_target_money_add = 1500
multi_initial_gold_value = 1000
multi_max_gold_that_can_be_stored = 15000
multi_killer_agent_standard_money_add = 150 #(2/3 = 100 for battle & destroy, 3/3 = 150 for siege, 4/3 = 200 for deathmatch/team deathmatch/capture the flag/headquarters)
multi_killer_agent_loot_percentage_share = 12 #(2/3 = 8% for battle & destroy, 3/3 = 12% for siege, 4/3 = 16% for deathmatch/team deathmatch/capture the flag/headquarters)
multi_dead_agent_loot_percentage_share = 48 #(2/3 = 32% for battle & destroy, 3/3 = 48% for siege, 4/3 = 64% for deathmatch/team deathmatch/capture the flag/headquarters)
multi_minimum_gold = 1000 #(same in all modes)
multi_minimum_target_health = 1200
multi_max_seconds_flag_can_stay_in_ground = 60
multi_capture_the_flag_score_flag_returning = 1
multi_initial_spawn_point_team_1 = 0
multi_initial_spawn_point_team_2 = 32
multi_base_point_team_1 = 64
multi_base_point_team_2 = 65
multi_siege_flag_point = 66
multi_death_mode_point = 67
multi_headquarters_pole_height = 900
multi_headquarters_flag_height_to_win = 800 #used in sd death mode
multi_headquarters_flag_initial_height = 100 #used in sd death mode
multi_headquarters_max_distance_sq_to_raise_flags = 1600 #4m * 4m * 100 = 1600
multi_headquarters_distance_sq_to_set_flag = 8100 #9m * 9m * 100 = 8100
multi_headquarters_distance_sq_to_change_flag = 400 #2m * 2m * 100 = 400
multi_headquarters_distance_to_change_flag = 200 #2m * 100 = 200
multi_distance_sq_to_use_belfry = 36 #6m * 6m = 36 (there is no * 100 for this one because it uses get_sq_distance_between_positions_in_meters instead of get_sq_distance_between_positions)
multi_max_sq_dist_between_agents_to_longer_mof_time = 49 #7m * 7m = 49m
min_allowed_flag_height_difference_to_make_score = 50
#these two values are about when master of field will be kicked
multiplayer_battle_formula_value_a = 15
multiplayer_battle_formula_value_b = 18000 #think about 18000-20000 if death mod very much happens.
multiplayer_spawn_above_opt_enemy_dist_point = 32 #while finding most suitable spawn point if nearest enemy is further than 32 meters give negative points to that spawn point
multiplayer_spawn_min_enemy_dist_limit = 45 #while finding most suitable spawn point if nearest enemy is closer than 45 meters give negative points to that spawn point, (squared increase)
multiplayer_poll_disable_period = 900 #15 minutes
#menu variables
escape_menu_item_height = 40
bignum = 0x40000000000000000000000000000000
op_num_value_bits = 24 + 32
tag_register = 1
tag_variable = 2
tag_string = 3
tag_item = 4
tag_troop = 5
tag_faction = 6
tag_quest = 7
tag_party_tpl = 8
tag_party = 9
tag_scene = 10
tag_mission_tpl = 11
tag_menu = 12
tag_script = 13
tag_particle_sys = 14
tag_scene_prop = 15
tag_sound = 16
tag_local_variable = 17
tag_map_icon = 18
tag_skill = 19
tag_mesh = 20
tag_presentation = 21
tag_quick_string = 22
tag_track = 23
tag_tableau = 24
tag_animation = 25
tags_end = 26
opmask_register = tag_register << op_num_value_bits
opmask_variable = tag_variable << op_num_value_bits
##opmask_string = tag_string << op_num_value_bits
##opmask_item_index = tag_item << op_num_value_bits
##opmask_troop_index = tag_troop << op_num_value_bits
##opmask_faction_index = tag_faction << op_num_value_bits
opmask_quest_index = tag_quest << op_num_value_bits
##opmask_p_template_index = tag_party_tpl << op_num_value_bits
##opmask_party_index = tag_party << op_num_value_bits
##opmask_scene_index = tag_scene << op_num_value_bits
##opmask_mission_tpl_index = tag_mission_tpl << op_num_value_bits
##opmask_menu_index = tag_menu << op_num_value_bits
##opmask_script = tag_script << op_num_value_bits
##opmask_particle_sys = tag_particle_sys << op_num_value_bits
##opmask_scene_prop = tag_scene_prop << op_num_value_bits
##opmask_sound = tag_sound << op_num_value_bits
##opmask_map_icon = tag_map_icon << op_num_value_bits
opmask_local_variable = tag_local_variable << op_num_value_bits
opmask_quick_string = tag_quick_string << op_num_value_bits
def reg(reg_no):
if (reg_no < 0):
print ("Error register_no negative")
cause_error()
return opmask_register | reg_no
def find_object(objects,object_id):
result = -1
num_objects = len(objects)
i_object = 0
object_id_lowercase = object_id.lower()
while (i_object < num_objects) and (result == -1):
object = objects[i_object]
if (object[0].lower() == object_id_lowercase):
result = i_object
i_object += 1
return result
# LAV TWEAKS BEGIN
for index in xrange(128):
globals()['s%d' % index] = index
globals()['pos%d' % index] = index
globals()['reg%d' % index] = opmask_register | index
#s0 = 0
#s1 = 1
#s2 = 2
#s3 = 3
#s4 = 4
#s5 = 5
#s6 = 6
#s7 = 7
#s8 = 8
#s9 = 9
#s10 = 10
#s11 = 11
#s12 = 12
#s13 = 13
#s14 = 14
#s15 = 15
#s16 = 16
#s17 = 17
#s18 = 18
#s19 = 19
#s20 = 20
#s21 = 21
#s22 = 22
#s23 = 23
#s24 = 24
#s25 = 25
#s26 = 26
#s27 = 27
#s28 = 28
#s29 = 29
#s30 = 30
#s31 = 31
#s32 = 32
#s33 = 33
#s34 = 34
#s35 = 35
#s36 = 36
#s37 = 37
#s38 = 38
#s39 = 39
#s40 = 40
#s41 = 41
#s42 = 42
#s43 = 43
#s44 = 44
#s45 = 45
#s46 = 46
#s47 = 47
#s48 = 48
#s49 = 49
#s50 = 50
#s51 = 51
#s52 = 52
#s53 = 53
#s54 = 54
#s55 = 55
#s56 = 56
#s57 = 57
#s58 = 58
#s59 = 59
#s60 = 60
#s61 = 61
#s62 = 62
#s63 = 63
#
#s64 = 64
#s65 = 65
#s66 = 66
#s67 = 67
#
#
#pos0 = 0
#pos1 = 1
#pos2 = 2
#pos3 = 3
#pos4 = 4
#pos5 = 5
#pos6 = 6
#pos7 = 7
#pos8 = 8
#pos9 = 9
#pos10 = 10
#pos11 = 11
#pos12 = 12
#pos13 = 13
#pos14 = 14
#pos15 = 15
#pos16 = 16
#pos17 = 17
#pos18 = 18
#pos19 = 19
#pos20 = 20
#pos21 = 21
#pos22 = 22
#pos23 = 23
#pos24 = 24
#pos25 = 25
#pos26 = 26
#pos27 = 27
#pos28 = 28
#pos29 = 29
#pos30 = 30
#pos31 = 31
#pos32 = 32
#pos33 = 33
#pos34 = 34
#pos35 = 35
#pos36 = 36
#pos37 = 37
#pos38 = 38
#pos39 = 39
#pos40 = 40
#pos41 = 41
#pos42 = 42
#pos43 = 43
#pos44 = 44
#pos45 = 45
#pos46 = 46
#pos47 = 47
#pos48 = 48
#pos49 = 49
#pos50 = 50
#pos51 = 51
#pos52 = 52
#pos53 = 53
#pos54 = 54
#pos55 = 55
#pos56 = 56
#pos57 = 57
#pos58 = 58
#pos59 = 59
#pos60 = 60
#pos61 = 61
#pos62 = 62
#pos63 = 63
pos_belfry_begin = 64
#
#reg0 = opmask_register| 0
#reg1 = opmask_register| 1
#reg2 = opmask_register| 2
#reg3 = opmask_register| 3
#reg4 = opmask_register| 4
#reg5 = opmask_register| 5
#reg6 = opmask_register| 6
#reg7 = opmask_register| 7
#reg8 = opmask_register| 8
#reg9 = opmask_register| 9
#reg10 = opmask_register|10
#reg11 = opmask_register|11
#reg12 = opmask_register|12
#reg13 = opmask_register|13
#reg14 = opmask_register|14
#reg15 = opmask_register|15
#reg16 = opmask_register|16
#reg17 = opmask_register|17
#reg18 = opmask_register|18
#reg19 = opmask_register|19
#reg20 = opmask_register|20
#reg21 = opmask_register|21
#reg22 = opmask_register|22
#reg23 = opmask_register|23
#reg24 = opmask_register|24
#reg25 = opmask_register|25
#reg26 = opmask_register|26
#reg27 = opmask_register|27
#reg28 = opmask_register|28
#reg29 = opmask_register|29
#reg30 = opmask_register|30
#reg31 = opmask_register|31
#reg32 = opmask_register|32
#reg33 = opmask_register|33
#reg34 = opmask_register|34
#reg35 = opmask_register|35
#reg36 = opmask_register|36
#reg37 = opmask_register|37
#reg38 = opmask_register|38
#reg39 = opmask_register|39
#reg40 = opmask_register|40
#reg41 = opmask_register|41
#reg42 = opmask_register|42
#reg43 = opmask_register|43
#reg44 = opmask_register|44
#reg45 = opmask_register|45
#reg46 = opmask_register|46
#reg47 = opmask_register|47
#reg48 = opmask_register|48
#reg49 = opmask_register|49
#reg50 = opmask_register|50
#reg51 = opmask_register|51
#reg52 = opmask_register|52
#reg53 = opmask_register|53
#reg54 = opmask_register|54
#reg55 = opmask_register|55
#reg56 = opmask_register|56
#reg57 = opmask_register|57
#reg58 = opmask_register|58
#reg59 = opmask_register|59
#reg60 = opmask_register|60
#reg61 = opmask_register|61
#reg62 = opmask_register|62
#reg63 = opmask_register|63
#
#reg65 = opmask_register|65
# LAV TWEAKS END
spf_all_teams_are_enemy = 0x00000001,
spf_is_horseman = 0x00000002,
spf_examine_all_spawn_points = 0x00000004,
spf_team_0_spawn_far_from_entry_32 = 0x00000008,
spf_team_1_spawn_far_from_entry_0 = 0x00000010,
spf_team_1_spawn_far_from_entry_66 = 0x00000020,
spf_team_0_spawn_near_entry_0 = 0x00000040,
spf_team_0_spawn_near_entry_66 = 0x00000080,
spf_team_1_spawn_near_entry_32 = 0x00000100,
spf_team_0_walkers_spawn_at_high_points = 0x00000200,
spf_team_1_walkers_spawn_at_high_points = 0x00000400,
spf_try_to_spawn_close_to_at_least_one_enemy = 0x00000800,
spf_care_agent_to_agent_distances_less = 0x00001000,
|
agpl-3.0
| -3,918,707,714,485,447,000 | 38.19697 | 188 | 0.631863 | false |
telnik/jCnC
|
db_manip.py
|
1
|
4702
|
#!/usr/bin/python2
import sys, sqlite3, argparse
class dbmanager:
"""
DataStore Class for Juniper Devices
Stores hosts, thier usernames and passwords, and groups.
Groupes are lists of hosts with names.
Methods:
create_host -- insert a new host
modify_host_name -- modify a hosts host name
modify_host_user -- modify a hosts user name
modify_host_pass -- modify a hosts password
delete_host -- delete a host
show_host -- retrieve all the info about the host
show_all -- retrieve all info about all hosts
create_group -- create a new group by name
modify_group_name -- change the name of an exsisting group
delete_group -- delete a group
show_group -- retrieve all info about all hosts in the group
create_db -- initialize a new database
is_host -- returns true if string matches a hostname
is_group -- returns true if string matches a groupname
is_member -- returns true if host is already in group
"""
def __init__(self):
self.db_connection = sqlite3.connect("/var/local/pyezd/store.db", isolation_level=None)
self.db_cursor = self.db_connection.cursor()
def __exit__(self):
self.db_connection.close()
def create_db(self):
self.db_cursor.execute("CREATE TABLE devices (hostname text PRIMARY KEY ON CONFLICT ABORT, username text, password text) ")
self.db_cursor.execute("CREATE TABLE relations (groupname text, hostname text)")
self.db_cursor.execute("CREATE TABLE groups (groupname text PRIMARY KEY ON CONFLICT ABORT)")
def is_host(self, host):
self.db_cursor.execute("SELECT hostname FROM devices where hostname = ?", (host,))
if not self.db_cursor.fetchall():
return False
return True
def is_group(self, gname):
self.db_cursor.execute("Select groupname FROM groups WHERE groupname = ?", (gname,))
if not self.db_cursor.fetchall():
return False
return True
def is_member(self, hostname, groupname):
self.db_cursor.execute("Select hostname FROM relations WHERE hostname = ? AND groupname = ?", (hostname, groupname))
if not self.db_cursor.fetchall():
return False
return True
def create_host(self, host, user, passw):
if self.is_host(host):
return False
self.db_cursor.execute("INSERT INTO devices VALUES (?, ?, ?)", ( host, user, passw ) )
return True
def modify_host_name(self, host, newname):
if self.is_host(newname):
return False
self.db_cursor.execute("UPDATE devices SET hostname = ? where hostname = ?", (newname, host) )
return True
def modify_host_user(self, host, newuser):
self.db_cursor.execute("UPDATE devices SET username = ? where hostname = ?", (newuser, host) )
return True
def modify_host_pass(self, host, newpass):
self.db_cursor.execute("UPDATE devices SET password = ? where hostname = ?", (newpass, host) )
return True
def delete_host(self, host):
self.db_cursor.execute("DELETE FROM devices where hostname = ?", (host,) )
self.db_cursor.execute("DELETE FROM relations where hostname = ?", (host,) )
return True
def show_host(self, host):
self.db_cursor.execute("SELECT * FROM devices WHERE hostname = ?", (host,) )
return self.db_cursor.fetchall()
def show_all(self):
self.db_cursor.execute("SELECT * FROM devices ORDER BY hostname")
return self.db_cursor.fetchall()
def create_group(self, gname):
if self.is_host(gname):
return False
if self.is_group(gname):
return False
self.db_cursor.execute("INSERT INTO groups VALUES (?)", (gname,))
return True
def delete_group(self, gname):
self.db_cursor.execute("DELETE FROM groups WHERE groupname = ?", (gname,))
self.db_cursor.execute("DELETE FROM relations WHERE groupname = ?", (gname,))
def modify_gname(self, oldname, newname):
if self.is_group(newname):
return False
if self.is_group(newname):
return False
self.db_cursor.execute("UPDATE groups SET groupname = ? where groupname = ?", (newname, oldname))
return True
def show_group_members(self, gname):
self.db_cursor.execute("SELECT devices.hostname, devices.username, devices.password FROM devices, relations, groups WHERE devices.hostname = relations.hostname AND relations.groupname = groups.groupname AND groups.groupname = ?", (gname,))
return self.db_cursor.fetchall()
def show_group_names(self):
self.db_cursor.execute("SELECT groupname FROM groups")
return self.db_cursor.fetchall()
def attach(self, hostname, groupname):
if self.is_member(hostname, groupname):
return False
self.db_cursor.execute("INSERT INTO relations VALUES (?, ?)", (groupname, hostname))
return True
def detach(self, hostname, groupname):
self.db_cursor.execute("DELETE FROM relations WHERE hostname = ? AND groupname = ?", (hostname, groupname))
|
mit
| -5,491,197,333,589,773,000 | 34.89313 | 241 | 0.711399 | false |
pydo/vlc-gif-creator
|
gif_creator.py
|
1
|
4266
|
#!/usr/bin/env python
import configparser
import os
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from urllib.parse import unquote
import requests
from moviepy.editor import VideoFileClip
from requests.exceptions import ConnectionError
def generate_gif(media_path: str, time: int, size: float, gif_len: int, gif_name: str, counter: int,
fps, output_path='.'):
"""
Args:
media_path: full path to media
time: in seconds to create the gif
size: to resize the gif
gif_len: duration of gif in seconds
gif_name: base name of all gifs
counter: number appended to filename
output_path: path to write out file
"""
file_path = os.path.join(output_path, gif_name + str(counter) + '.gif')
clip = VideoFileClip(media_path, audio=False).subclip(
t_start=time,
t_end=time + gif_len
).resize(size)
if fps:
clip.write_gif(file_path, program='ffmpeg', fps=float(fps))
else:
clip.write_gif(file_path, program='ffmpeg')
return file_path
def get_media_path(sess: requests.Session, url: str, filename: str) -> str:
"""
Args:
sess: requests session object. required for http auth
url: url pointing to vlc's playlist.xml
filename: name of media content
Returns: full path to media
"""
resp = sess.post(url)
tree = ET.fromstring(resp.text)
media_path = tree.find('.//leaf[@name="{filename}"]'.format(filename=filename)).get('uri')
media_path = unquote(media_path)
return media_path
def get_media_time(sess: requests.Session, url: str) -> tuple:
"""
Args:
sess: requests session object. required for http auth
url: url pointing to vlc's status.xml
Returns: time in seconds
"""
try:
resp = sess.post(url)
except ConnectionError as e:
print(e, "\nConnection failed. Did you remember to enable VLC's lua http server?\n"
"Please refer to this guide https://github.com/pydo/vlc-gif-creator/blob/master/README.md#setup")
sys.exit(-1)
tree = ET.fromstring(resp.text)
time = tree.find('.//time')
filename = tree.find('.//info[@name="filename"]')
return int(time.text), filename.text
def main(opts, counter, sess):
sess.auth = (opts['user'], opts['password'])
time, filename = get_media_time(sess, opts['status'])
path = get_media_path(sess, opts['playlist'], filename)
file_path = generate_gif(path, time, opts['resize'], opts['gif_len'], opts['gif_name'], counter,
opts['gif_fps'], opts['output_path'])
append_credits(file_path)
def get_config(config_file: str) -> dict:
"""
Returns: dict containing config.ini options
"""
config = configparser.ConfigParser()
config.read(config_file)
user = config['VLC CREDENTIALS']['user']
password = config['VLC CREDENTIALS']['password']
resize = float(config['CROPPING']['resize'])
playlist = config['VLC SERVER']['playlist']
status = config['VLC SERVER']['status']
gif_len = int(config['GIF']['length'])
gif_name = config['GIF']['name']
output_path = config['GIF']['output_path']
gif_fps = config['GIF']['fps']
return dict(
user=user, password=password, resize=resize,
playlist=playlist, status=status, gif_len=gif_len,
gif_name=gif_name, output_path=output_path, gif_fps=gif_fps
)
def create_output_dir(path: str):
"""Create gif output dir if it doesn't exists"""
if not Path(path).exists():
os.makedirs(path)
def append_credits(file_path: str):
with open(file_path, 'ab') as f:
f.write('Made with ♥ using github.com/pydo/vlc-gif-creator'.encode('utf8'))
def run(config_file='config.ini'):
counter = 0
sess = requests.session()
config = get_config(config_file)
create_output_dir(config['output_path'])
while True:
make_gif = input('Create a gif? y/Y:yes, q/Q:quit\n').lower()
if make_gif == 'y':
print('making gif...')
main(config, counter, sess)
counter += 1
if make_gif == 'q':
print('Bye.')
sys.exit()
if __name__ == '__main__':
run()
|
mit
| 294,241,484,578,071,300 | 30.585185 | 114 | 0.618902 | false |
dialogflow/fulfillment-translate-python
|
translate_response.py
|
1
|
4345
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a sample for a translation fulfillment webhook for an API.AI agent
This is meant to be used with the sample translate agent for API.AI, it uses
the Google Cloud Translation API and requires an API key from an API project
with the Google Cloud Translation API enabled.
"""
_TRANSLATE_W = [
u'Sure. What do you want to translate and into which language?',
u'Okay. Just tell me what you need to translate and into which language.',
u'No problem. What are we translating, and into which language?',
u'All right. What do you need translated, and into which language?'
]
_TRANSLATE_INTO_W = [
u'Which language would you like to translate this into?',
u'Okay. What language are you trying to translate into?',
u'Which language did you want this translated into?',
u'Just tell me what language you want this translated into.'
]
_TRANSLATE_W_FROM = [
u'Sure thing. Just tell me what you want to translate from {lang}.',
u'Absolutely. I can translate from {lang}.'
u' What would you like to translate?',
u'I am familiar with {lang}.'
u' Let me know what you need to translate from it.',
u'Easy enough. What do you want to translate from {lang}?'
]
_TRANSLATE_W_TO = [
u'Sure thing. Just tell me what you want to translate into {lang}.',
u'Absolutely. I can translate into {lang}.'
u' What would you like to translate?',
u'I am familiar with {lang}. Let me know what you need to translate.',
u'Easy enough. What do you want to translate into {lang}?'
]
_TRANSLATE_W_FROM_TO = [
u'Sure thing. Just tell me what you want to translate from'
u' {lang_from} into {lang_to}.',
u'Absolutely. I can translate from {lang_from} into {lang_to}.'
u' What would you like to translate?',
u'Of course! I can translate from {lang_from} into {lang_to}.'
u' Just let me know what you need to translate.',
u'Easy enough.'
u' What do you want to translate from {lang_from} into {lang_to}?',
]
_TRANSLATE_UNKNOWN_LANGUAGE = [
u'Sorry, I couldn\'t find a translation into this language.',
u'Unfortunately this language is unfamiliar to me.'
u' I wasn\'t able to get a translation.',
u'I\'m not too familiar with this language.'
u' I wasn\'t able to translate that.',
u'Sorry. I haven\'t learned this language yet.'
]
_TRANSLATE_RESULT = [
u'Here is how that translates from {fromLang} into {toLang}: {text}.',
u'Here is the translation from {fromLang} into {toLang}: {text}.',
u'Okay, I translated that from {fromLang} into {toLang} for you: {text}.',
u'That translates from {fromLang} to {toLang} like so: {text}.',
]
_TRANSLATE_NETWORK_ERROR = [
u'Sorry, the translation service is not responding.'
u' Let\'s try again in a bit.',
u'I can\'t connect to the translation service now.'
u' Sorry about this. Let\'s retry in a minute.',
u'Seems like there\'s a connection problem with the translation service.'
u' Let\'s give a moment and try again.',
u'Looks like the translation service isn\'t responding right now.'
u' We can try again in a moment if you like.'
]
_TRANSLATE_ERROR = [
u'I\'m not quite sure what happened,'
u' but I was unable to get translation at this time.',
u'Sorry, I ran into an unexpected problem while trying'
u' to get a translation. Let\'s try that again.',
u'I\'m sorry. I wasn\'t able to complete that translation for some reason.'
u' Let\'s try again in a moment.',
u'Looks like something went wrong in the middle of that translation.'
u' Better try that again.',
u'I\'m not sure what happened,'
u' but I wasn\'t able to finish translating that.'
u' We may need to try that again.'
]
|
apache-2.0
| -1,530,639,517,313,129,200 | 40.778846 | 79 | 0.690679 | false |
schaabs/sandbox
|
py/keyvault/crytpography/algorithms/rsa_oaep.py
|
1
|
1049
|
from ..algorithm import EncryptionAlgorithm
from ..transform import CryptoTransform
from abc import ABCMeta
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
def _default_encryption_padding():
return padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None)
class _RsaCryptoTransform(CryptoTransform):
def __init__(self, key):
self._key = key
def dispose(self):
self._key = None
class _RsaOaepDecryptor(_RsaCryptoTransform):
def transform(self, data, **kwargs):
self._key.decrypt(data, _default_encryption_padding())
class _RsaOaepEncryptor(_RsaCryptoTransform):
def transform(self, data, **kwargs):
self._key.encrypt(data, _default_encryption_padding())
class RsaOaep(EncryptionAlgorithm):
_name = 'RSA-OAEP'
def create_encryptor(self, key):
return _RsaCryptoTransform(key)
def create_decryptor(self, key):
return _RsaOaepDecryptor(key)
RsaOaep.register()
|
mit
| 3,135,554,473,970,366,500 | 25.225 | 103 | 0.720686 | false |
UncleRus/MultiConf
|
src/ui/dialogs.py
|
1
|
3974
|
# -*- coding: utf-8 -*-
from PySide.QtCore import *
from PySide.QtGui import *
class AsyncProcess (QThread):
progressUpdated = Signal (int)
changed = Signal (str)
errorOccured = Signal (str)
bannerUpdated = Signal (str)
def __init__ (self, target, parent, changeCursor = False):
super (AsyncProcess, self).__init__ (parent)
self.target = target
self.changeCursor = changeCursor
self.finished.connect (self.deleteLater)
def run (self):
try:
if self.changeCursor:
QApplication.setOverrideCursor (Qt.WaitCursor)
self.target ()
except Exception as e:
raise
self.errorOccured.emit (str (e).decode ('utf-8'))
finally:
if self.changeCursor:
QApplication.restoreOverrideCursor ()
class ProcessDialog (QDialog):
def __init__ (self, parent):
super (ProcessDialog, self).__init__ (parent, Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowMinMaxButtonsHint)
self.setupUi ()
def setupUi (self):
l = QVBoxLayout (self)
self.lBanner = QLabel (self)
self.lBanner.setStyleSheet ('font-size: 12pt; padding-top: 16px; padding-bottom: 16px;')
l.addWidget (self.lBanner)
self.pbProgress = QProgressBar (self)
l.addWidget (self.pbProgress)
self.lStatus = QLabel (self)
l.addWidget (self.lStatus)
bl = QHBoxLayout ()
bl.addStretch ()
self.bContinue = QPushButton (self)
self.bContinue.clicked.connect (self.onContinueClicked)
bl.addWidget (self.bContinue)
self.bCancel = QPushButton (self)
self.bCancel.clicked.connect (self.onCancelClicked)
bl.addWidget (self.bCancel)
l.addLayout (bl)
self.resize (400, 200)
self.reset ()
def keyPressEvent (self, event):
if not self.bCancel.isEnabled () and event.key () in (Qt.Key_Escape, Qt.Key_Enter):
event.ignore ()
return
super (ProcessDialog, self).keyPressEvent (event)
def setupProcess (self):
self.process.bannerUpdated.connect (self.lBanner.setText, Qt.QueuedConnection)
self.process.started.connect (self.lockInterface, Qt.QueuedConnection)
self.process.finished.connect (self.unlockInterface, Qt.QueuedConnection)
self.process.terminated.connect (self.reset, Qt.QueuedConnection)
self.process.errorOccured.connect (self.showError, Qt.QueuedConnection)
self.process.changed.connect (self.setStatus, Qt.QueuedConnection)
self.process.progressUpdated.connect (self.updateProgress, Qt.QueuedConnection)
def reset (self):
self.bContinue.setText (_('Continue'))
self.bContinue.setEnabled (True)
self.bCancel.setText (_('Cancel'))
self.bCancel.setEnabled (True)
self.bCancel.show ()
self.finished = False
def lockInterface (self):
# process started
self.bContinue.setEnabled (False)
self.bCancel.setEnabled (False)
def unlockInterface (self):
# process finished
self.bContinue.hide ()
self.bCancel.setText (_('Close'))
self.bCancel.setEnabled (True)
QApplication.restoreOverrideCursor ()
self.finished = True
def showError (self, error):
QMessageBox.critical (self, _('Firmware upload error'), error)
self.lStatus.setText ('Error: %s' % error)
self.reset ()
def setStatus (self, message):
self.lStatus.setText (message)
def updateProgress (self, value):
self.pbProgress.setValue (value)
def onContinueClicked (self):
self.start ()
def onCancelClicked (self):
if self.finished:
self.accept ()
else:
self.reject ()
|
gpl-3.0
| 6,590,716,775,031,166,000 | 31.394958 | 127 | 0.60921 | false |
taohungyang/cloud-custodian
|
tools/c7n_gcp/tests/test_mu_gcp.py
|
1
|
4369
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from c7n.testing import functional
from c7n_gcp import mu
from gcp_common import BaseTest
HELLO_WORLD = """\
def handler(event, context):
print("gcf handler invoke %s" % event)
"""
class FunctionTest(BaseTest):
def get_function(self, events=(), factory=None, **kw):
if not events:
assert factory
events = [mu.HTTPEvent(factory)]
config = dict(
name="custodian-dev",
labels=[],
runtime='python37',
events=events)
config.update(kw)
archive = mu.custodian_archive()
archive.close()
return mu.CloudFunction(config, archive)
def test_deploy_function(self):
factory = self.replay_flight_data('mu-deploy')
manager = mu.CloudFunctionManager(factory)
func = self.get_function(factory=factory)
manager.publish(func)
func_info = manager.get(func.name)
self.assertTrue(func_info['httpsTrigger'])
self.assertEqual(func_info['status'], 'DEPLOY_IN_PROGRESS')
self.assertEqual(
func_info['name'],
'projects/custodian-1291/locations/us-central1/functions/custodian-dev')
@functional
def test_api_subscriber(self):
# integration styled..
factory = self.replay_flight_data('mu-api-subscriber')
p = self.load_policy(
{'name': 'topic-created',
'resource': 'gcp.pubsub-topic',
'mode': {
'type': 'gcp-audit',
'methods': ['google.pubsub.v1.Publisher.CreateTopic']}},
session_factory=factory)
# Create all policy resources.
p.provision()
session = factory()
project_id = session.get_default_project()
region = 'us-central1'
func_client = session.client('cloudfunctions', 'v1', 'projects.locations.functions')
pubsub_client = session.client('pubsub', 'v1', 'projects.topics')
sink_client = session.client('logging', 'v2', 'projects.sinks')
# Check on the resources for the api subscription
# check function exists
func_info = func_client.execute_command(
'get', {'name': 'projects/{}/locations/{}/functions/topic-created'.format(
project_id, region)})
self.assertEqual(
func_info['eventTrigger']['eventType'],
'providers/cloud.pubsub/eventTypes/topic.publish')
self.assertEqual(
func_info['eventTrigger']['resource'],
'projects/{}/topics/custodian-auto-audit-topic-created'.format(
project_id))
# check sink exists
sink = sink_client.execute_command(
'get', {'sinkName': 'projects/{}/sinks/custodian-auto-audit-topic-created'.format(
project_id)})
self.assertEqual(
sink['destination'],
'pubsub.googleapis.com/projects/{}/topics/custodian-auto-audit-topic-created'.format(
project_id))
# check the topic iam policy
topic_policy = pubsub_client.execute_command(
'getIamPolicy', {
'resource': 'projects/{}/topics/custodian-auto-audit-topic-created'.format(
project_id)})
self.assertEqual(
topic_policy['bindings'],
[{u'role': u'roles/pubsub.publisher', u'members': [sink['writerIdentity']]}])
# todo set this up as test cleanups, dependent on ordering at the moment, fifo atm
# it appears, we want lifo.
if self.recording:
# we sleep to allow time for in progress operations on creation to complete
# function requirements building primarily.
time.sleep(42)
p.get_execution_mode().deprovision()
|
apache-2.0
| -5,203,804,360,819,266,000 | 36.34188 | 97 | 0.615702 | false |
jbking/python-stdnet
|
stdnet/backends/redisb/client/extensions.py
|
1
|
16403
|
import os
from hashlib import sha1
from collections import namedtuple
from datetime import datetime
from copy import copy
from stdnet.utils.structures import OrderedDict
from stdnet.utils import iteritems, format_int
from stdnet import odm
try:
import redis
except ImportError: # pragma nocover
from stdnet import ImproperlyConfigured
raise ImproperlyConfigured('Redis backend requires redis python client')
from redis.client import BasePipeline
RedisError = redis.RedisError
p = os.path
DEFAULT_LUA_PATH = p.join(p.dirname(p.dirname(p.abspath(__file__))), 'lua')
redis_connection = namedtuple('redis_connection', 'address db')
###########################################################
# GLOBAL REGISTERED SCRIPT DICTIONARY
all_loaded_scripts = {}
_scripts = {}
def registered_scripts():
return tuple(_scripts)
def get_script(script):
return _scripts.get(script)
###########################################################
def script_callback(response, script=None, **options):
if script:
return script.callback(response, **options)
else:
return response
def read_lua_file(dotted_module, path=None, context=None):
'''Load lua script from the stdnet/lib/lua directory'''
path = path or DEFAULT_LUA_PATH
bits = dotted_module.split('.')
bits[-1] += '.lua'
name = os.path.join(path, *bits)
with open(name) as f:
data = f.read()
if context:
data = data.format(context)
return data
def parse_info(response):
'''Parse the response of Redis's INFO command into a Python dict.
In doing so, convert byte data into unicode.'''
info = {}
response = response.decode('utf-8')
def get_value(value):
if ',' and '=' not in value:
return value
sub_dict = {}
for item in value.split(','):
k, v = item.split('=')
try:
sub_dict[k] = int(v)
except ValueError:
sub_dict[k] = v
return sub_dict
data = info
for line in response.splitlines():
keyvalue = line.split(':')
if len(keyvalue) == 2:
key, value = keyvalue
try:
data[key] = int(value)
except ValueError:
data[key] = get_value(value)
else:
data = {}
info[line[2:]] = data
return info
def dict_update(original, data):
target = original.copy()
target.update(data)
return target
class RedisExtensionsMixin(object):
'''Extension for Redis clients.
'''
prefix = ''
RESPONSE_CALLBACKS = dict_update(
redis.StrictRedis.RESPONSE_CALLBACKS,
{'EVALSHA': script_callback,
'INFO': parse_info}
)
@property
def is_async(self):
return False
@property
def is_pipeline(self):
return False
def address(self):
'''Address of redis server.
'''
raise NotImplementedError
def execute_script(self, name, keys, *args, **options):
'''Execute a registered lua script at ``name``.
The script must be implemented via subclassing :class:`RedisScript`.
:param name: the name of the registered script.
:param keys: tuple/list of keys pased to the script.
:param args: argument passed to the script.
:param options: key-value parameters passed to the
:meth:`RedisScript.callback` method once the script has finished
execution.
'''
script = get_script(name)
if not script:
raise RedisError('No such script "%s"' % name)
address = self.address()
if address not in all_loaded_scripts:
all_loaded_scripts[address] = set()
loaded = all_loaded_scripts[address]
toload = script.required_scripts.difference(loaded)
for name in toload:
s = get_script(name)
self.script_load(s.script)
loaded.update(toload)
return script(self, keys, args, options)
def countpattern(self, pattern):
'''delete all keys matching *pattern*.
'''
return self.execute_script('countpattern', (), pattern)
def delpattern(self, pattern):
'''delete all keys matching *pattern*.
'''
return self.execute_script('delpattern', (), pattern)
def zdiffstore(self, dest, keys, withscores=False):
'''Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``.
'''
keys = (dest,) + tuple(keys)
wscores = 'withscores' if withscores else ''
return self.execute_script('zdiffstore', keys, wscores,
withscores=withscores)
def zpopbyrank(self, name, start, stop=None, withscores=False, desc=False):
'''Pop a range by rank.
'''
stop = stop if stop is not None else start
return self.execute_script('zpop', (name,), 'rank', start,
stop, int(desc), int(withscores),
withscores=withscores)
def zpopbyscore(self, name, start, stop=None, withscores=False,
desc=False):
'''Pop a range by score.
'''
stop = stop if stop is not None else start
return self.execute_script('zpop', (name,), 'score', start,
stop, int(desc), int(withscores),
withscores=withscores)
class RedisScriptMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(RedisScriptMeta, cls).__new__
abstract = attrs.pop('abstract', False)
new_class = super_new(cls, name, bases, attrs)
if not abstract:
self = new_class(new_class.script, new_class.__name__)
_scripts[self.name] = self
return new_class
class RedisScript(RedisScriptMeta('_RS', (object,), {'abstract': True})):
'''Class which helps the sending and receiving lua scripts.
It uses the ``evalsha`` command.
.. attribute:: script
The lua script to run
.. attribute:: required_scripts
A list/tuple of other :class:`RedisScript` names required by this
script to properly execute.
.. attribute:: sha1
The SHA-1_ hexadecimal representation of :attr:`script` required by the
``EVALSHA`` redis command. This attribute is evaluated by the library,
it is not set by the user.
.. _SHA-1: http://en.wikipedia.org/wiki/SHA-1
'''
abstract = True
script = None
required_scripts = ()
def __init__(self, script, name):
if isinstance(script, (list, tuple)):
script = '\n'.join(script)
self.__name = name
self.script = script
rs = set((name,))
rs.update(self.required_scripts)
self.required_scripts = rs
@property
def name(self):
return self.__name
@property
def sha1(self):
if not hasattr(self, '_sha1'):
self._sha1 = sha1(self.script.encode('utf-8')).hexdigest()
return self._sha1
def __repr__(self):
return self.name if self.name else self.__class__.__name__
__str__ = __repr__
def preprocess_args(self, client, args):
return args
def callback(self, response, **options):
'''Called back after script execution.
This is the only method user should override when writing a new
:class:`RedisScript`. By default it returns ``response``.
:parameter response: the response obtained from the script execution.
:parameter options: Additional options for the callback.
'''
return response
def __call__(self, client, keys, args, options):
args = self.preprocess_args(client, args)
numkeys = len(keys)
keys_args = tuple(keys) + args
options.update({'script': self, 'redis_client': client})
return client.execute_command('EVALSHA', self.sha1, numkeys,
*keys_args, **options)
############################################################################
## BATTERY INCLUDED REDIS SCRIPTS
############################################################################
class countpattern(RedisScript):
script = '''\
return # redis.call('keys', ARGV[1])
'''
def preprocess_args(self, client, args):
if args and client.prefix:
args = tuple(('%s%s' % (client.prefix, a) for a in args))
return args
class delpattern(countpattern):
script = '''\
local n = 0
for i,key in ipairs(redis.call('keys', ARGV[1])) do
n = n + redis.call('del', key)
end
return n
'''
class zpop(RedisScript):
script = read_lua_file('commands.zpop')
def callback(self, response, withscores=False, **options):
if not response or not withscores:
return response
return zip(response[::2], map(float, response[1::2]))
class zdiffstore(RedisScript):
script = read_lua_file('commands.zdiffstore')
class move2set(RedisScript):
script = (read_lua_file('commands.utils'),
read_lua_file('commands.move2set'))
class keyinfo(RedisScript):
script = read_lua_file('commands.keyinfo')
def preprocess_args(self, client, args):
if args and client.prefix:
a = ['%s%s' % (client.prefix, args[0])]
a.extend(args[1:])
args = tuple(a)
return args
def callback(self, response, redis_client=None, **options):
client = redis_client
if client.is_pipeline:
client = client.client
encoding = 'utf-8'
all_keys = []
for key, typ, length, ttl, enc, idle in response:
key = key.decode(encoding)[len(client.prefix):]
key = RedisKey(key=key, client=client,
type=typ.decode(encoding),
length=length,
ttl=ttl if ttl != -1 else False,
encoding=enc.decode(encoding),
idle=idle)
all_keys.append(key)
return all_keys
###############################################################################
## key info models
class RedisDbQuery(odm.QueryBase):
@property
def client(self):
return self.session.router[self.model].backend.client
def items(self):
client = self.client
info = yield client.info()
rd = []
for n, data in self.keyspace(info):
rd.append(self.instance(n, data))
yield rd
def get(self, db=None):
if db is not None:
info = yield self.client.info()
data = info.get('db%s' % db)
if data:
yield self.instance(db, data)
def keyspace(self, info):
n = 0
keyspace = info['Keyspace']
while keyspace:
info = keyspace.pop('db%s' % n, None)
if info:
yield n, info
n += 1
def instance(self, db, data):
rdb = self.model(db=int(db), keys=data['keys'],
expires=data['expires'])
rdb.session = self.session
return rdb
class RedisDbManager(odm.Manager):
'''Handler for gathering information from redis.'''
names = ('Server', 'Memory', 'Persistence',
'Replication', 'Clients', 'Stats', 'CPU')
converters = {'last_save_time': ('date', None),
'uptime_in_seconds': ('timedelta', 'uptime'),
'uptime_in_days': None}
query_class = RedisDbQuery
def __init__(self, *args, **kwargs):
self.formatter = kwargs.pop('formatter', RedisDataFormatter())
self._panels = OrderedDict()
super(RedisDbManager, self).__init__(*args, **kwargs)
@property
def client(self):
return self.backend.client
def panels(self):
info = yield self.client.info()
panels = {}
for name in self.names:
val = self.makepanel(name, info)
if val:
panels[name] = val
yield panels
def makepanel(self, name, info):
if name not in info:
return
pa = []
nicename = self.formatter.format_name
nicebool = self.formatter.format_bool
boolval = (0, 1)
for k, v in iteritems(info[name]):
add = True
if k in self.converters or isinstance(v, int):
fdata = self.converters.get(k, ('int', None))
if fdata:
formatter = getattr(self.formatter,
'format_{0}'.format(fdata[0]))
k = fdata[1] or k
v = formatter(v)
else:
add = False
elif v in boolval:
v = nicebool(v)
if add:
pa.append({'name': nicename(k),
'value': v})
return pa
def delete(self, instance):
'''Delete an instance'''
flushdb(self.client) if flushdb else self.client.flushdb()
class KeyQuery(odm.QueryBase):
'''A lazy query for keys in a redis database.'''
db = None
def count(self):
return self.db.client.countpattern(self.pattern)
def filter(self, db=None):
self.db = db
return self
def all(self):
return list(self)
def delete(self):
return self.db.client.delpattern(self.pattern)
def __len__(self):
return self.count()
def __getitem__(self, slic):
o = copy(self)
if isinstance(slic, slice):
o.slice = slic
return o.all()
else:
return self[slic:slic + 1][0]
def __iter__(self):
db = self.db
c = db.client
if self.slice:
start, num = self.get_start_num(self.slice)
qs = c.execute_script('keyinfo', (), self.pattern, start, num)
else:
qs = c.execute_script('keyinfo', (), self.pattern)
for q in qs:
q.database = db
yield q
def get_start_num(self, slic):
start, step, stop = slic.start, slic.step, slic.stop
N = None
if stop is None or stop < 0:
N = self.count()
stop = stop or 0
stop += N
start = start or 0
if start < 0:
if N is None:
N = self.count()
start += N
return start + 1, stop - start
class RedisKeyManager(odm.Manager):
query_class = KeyQuery
def delete(self, instances):
if instances:
keys = tuple((instance.id for instance in instances))
return instances[0].client.delete(*keys)
class RedisDb(odm.StdModel):
db = odm.IntegerField(primary_key=True)
manager_class = RedisDbManager
def __unicode__(self):
return '%s' % self.db
class Meta:
attributes = ('keys', 'expires')
class RedisKey(odm.StdModel):
key = odm.SymbolField(primary_key=True)
db = odm.ForeignKey(RedisDb, related_name='all_keys')
manager_class = RedisKeyManager
def __unicode__(self):
return self.key
class Meta:
attributes = 'type', 'length', 'ttl', 'encoding', 'idle', 'client'
class RedisDataFormatter(object):
def format_bool(self, val):
return 'yes' if val else 'no'
def format_name(self, name):
return name
def format_int(self, val):
return format_int(val)
def format_date(self, dte):
try:
d = datetime.fromtimestamp(dte)
return d.isoformat().split('.')[0]
except:
return ''
def format_timedelta(self, td):
return td
|
bsd-3-clause
| -7,290,421,867,818,127,000 | 27.87796 | 79 | 0.533805 | false |
graziano-giuliani/pythoncode
|
pyuwphysret/common/pyfiles/atmos/mr2rho.py
|
1
|
1485
|
#!/usr/bin/env python
# dp2e.py
import numpy as num
def mr2rho(w,t,p):
"""rho = mr2rho(w,t,p)
w in g/kg
t in K
p in mb
This was provide by Holger Linne' from Max Planck Institute.
Dave Turner
Pacific Northwest National Laboratory
Currently at the University of Wisconsin-Madison
dave.turner@pnl.gov
"""
rho = w * (p * 0.3477) / t
return rho
if __name__ == '__main__':
print(mr2rho.__doc__)
t = num.array(
( 24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.10, 14.69, 13.22, 11.52,
9.53, 7.24, 4.80, 2.34, 0.04, -2.29, -4.84, -7.64,-10.66,-13.95,
-17.54,-21.45,-25.58,-29.90,-34.33,-38.94,-43.78,-48.80,-53.94,-58.79,
-63.27,-67.32,-70.74,-73.62,-75.74,-77.07,-77.43,-76.63,-75.06,-73.14,
-71.43 ))
t = t + 273.15
r = num.array(
( 17.78, 16.92, 15.93, 14.87, 13.78, 12.70, 11.84, 10.96, 10.15, 9.31,
8.46, 7.73, 7.05, 6.32, 5.62, 4.91, 4.10, 3.30, 2.67, 2.15,
1.66, 1.26, 0.95, 0.68, 0.45, 0.28, 0.17, 0.10, 0.06, 0.04,
0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01,
0.02 ))
p = num.array(
( 1012.0, 991.3, 969.1, 945.5, 920.4, 893.8, 865.7, 836.1, 805.1, 772.8,
739.5, 705.2, 670.3, 635.0, 599.7, 564.5, 529.8, 495.7, 462.6, 430.7,
400.0, 370.8, 343.0, 316.7, 292.0, 266.8, 247.2, 227.0, 208.2, 190.8,
174.7, 159.9, 146.2, 133.6, 121.9, 111.3, 101.5, 92.6, 84.4, 76.9,
70.0 ))
rho = mr2rho(r,t,p)
print(rho)
|
mit
| -3,557,386,664,818,324,000 | 33.534884 | 77 | 0.521212 | false |
felixwidmaier/bull
|
setup.py
|
1
|
1095
|
"""
Copyright 2017 Felix Widmaier
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
setup(
name='bull',
version='2017.1',
packages=['bull.models', 'bull.optimizers', 'bull.layers',
'bull.layers.nn', 'bull.layers.rnn', 'bull.layers.conv', 'bull.core.util',
'bull.cost'],
url='https://github.com/felixwidmaier/bull',
license='Apache License 2.0',
author='Felix Widmaier',
author_email='dev.felixwidmaier@gmx.de',
description='Up and coming machine learning library', requires=['numpy', 'h5py']
)
|
apache-2.0
| -2,376,815,182,173,399,600 | 35.5 | 88 | 0.697717 | false |
dannysellers/django_orders
|
load_accts.py
|
1
|
1434
|
import os
import csv
def fix_date(datestr):
"""
Dates in the account_list are MM/DD/YYYY, but Django's DateField
requires YYYY-MM-DD format
"""
_createdate = datestr.split('/')
if len(_createdate[2]) == 2:
_createdate[2] = '20' + str(_createdate[2])
_createdate = [_createdate[2], _createdate[0], _createdate[1]]
_createdate = '-'.join(_createdate)
return _createdate
def load_db (filename):
with open(filename, 'rU') as f:
_reader = csv.reader(f)
_fieldnames = _reader.next()
if _fieldnames:
_dictreader = csv.DictReader(f, fieldnames = _fieldnames)
_dictreader.next() # don't parse the first row again
for row in _dictreader:
name = row['Names']
acct = row['Acct']
createdate = fix_date(row['Date Created'])
add_customer(name=name, acct=acct, createdate=createdate)
# print("{} accounts loaded.".format(len(Customer.objects.all())))
def add_customer (name, acct, createdate, email='address@domain.com'):
c = Customer.objects.get_or_create(name = name, acct = acct, email = email,
status = 1, createdate = createdate)
return c
if __name__ == '__main__':
filename = raw_input('Account list to load? (default account_list.csv):\t')
if not filename:
filename = 'account_list.csv'
print("Loading accounts from {}".format(filename))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'order_tracker.settings')
from tracker.models import Customer
load_db(filename)
|
gpl-2.0
| -726,519,760,493,132,400 | 29.531915 | 76 | 0.677824 | false |
houssine78/addons
|
partner_wharehouse/__openerp__.py
|
1
|
1444
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Business Open Source Solution
# Copyright (C) 2018 Coop IT Easy SCRLfs.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Partner Warehouse",
"version": "9.0.1.0",
"depends": [
'base',
'sale',
'sale_stock',
],
"author": "Rémy TAYMANS <remy@coopiteasy.be>",
"category": "",
"website": "www.coopiteasy.be",
"license": "AGPL-3",
"description": """
Let the warehouse of the sale order be set accordingly to a default
warehouse set on the partner.
""",
'data': [
'views/res_partner.xml',
],
'installable': True,
}
|
agpl-3.0
| -7,691,341,484,493,901,000 | 34.195122 | 78 | 0.573112 | false |
googleapis/python-dialogflow-cx
|
tests/unit/gapic/dialogflowcx_v3beta1/test_sessions.py
|
1
|
65364
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.sessions import SessionsAsyncClient
from google.cloud.dialogflowcx_v3beta1.services.sessions import SessionsClient
from google.cloud.dialogflowcx_v3beta1.services.sessions import transports
from google.cloud.dialogflowcx_v3beta1.services.sessions.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.dialogflowcx_v3beta1.types import audio_config
from google.cloud.dialogflowcx_v3beta1.types import entity_type
from google.cloud.dialogflowcx_v3beta1.types import intent
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import session
from google.cloud.dialogflowcx_v3beta1.types import session_entity_type
from google.oauth2 import service_account
from google.protobuf import struct_pb2 # type: ignore
from google.type import latlng_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SessionsClient._get_default_mtls_endpoint(None) is None
assert SessionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
SessionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SessionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SessionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert SessionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [SessionsClient, SessionsAsyncClient,])
def test_sessions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize("client_class", [SessionsClient, SessionsAsyncClient,])
def test_sessions_client_service_account_always_use_jwt(client_class):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SessionsGrpcTransport, "grpc"),
(transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_sessions_client_service_account_always_use_jwt_true(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize("client_class", [SessionsClient, SessionsAsyncClient,])
def test_sessions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_sessions_client_get_transport_class():
transport = SessionsClient.get_transport_class()
available_transports = [
transports.SessionsGrpcTransport,
]
assert transport in available_transports
transport = SessionsClient.get_transport_class("grpc")
assert transport == transports.SessionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
SessionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionsClient)
)
@mock.patch.object(
SessionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SessionsAsyncClient),
)
def test_sessions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SessionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SessionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc", "true"),
(
SessionsAsyncClient,
transports.SessionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SessionsClient, transports.SessionsGrpcTransport, "grpc", "false"),
(
SessionsAsyncClient,
transports.SessionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SessionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionsClient)
)
@mock.patch.object(
SessionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SessionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_sessions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_sessions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SessionsClient, transports.SessionsGrpcTransport, "grpc"),
(SessionsAsyncClient, transports.SessionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_sessions_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_sessions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.sessions.transports.SessionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SessionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_detect_intent(
transport: str = "grpc", request_type=session.DetectIntentRequest
):
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = session.DetectIntentResponse(
response_id="response_id_value",
output_audio=b"output_audio_blob",
response_type=session.DetectIntentResponse.ResponseType.PARTIAL,
allow_cancellation=True,
)
response = client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session.DetectIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.DetectIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
assert response.response_type == session.DetectIntentResponse.ResponseType.PARTIAL
assert response.allow_cancellation is True
def test_detect_intent_from_dict():
test_detect_intent(request_type=dict)
def test_detect_intent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
client.detect_intent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session.DetectIntentRequest()
@pytest.mark.asyncio
async def test_detect_intent_async(
transport: str = "grpc_asyncio", request_type=session.DetectIntentRequest
):
client = SessionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.DetectIntentResponse(
response_id="response_id_value",
output_audio=b"output_audio_blob",
response_type=session.DetectIntentResponse.ResponseType.PARTIAL,
allow_cancellation=True,
)
)
response = await client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session.DetectIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.DetectIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
assert response.response_type == session.DetectIntentResponse.ResponseType.PARTIAL
assert response.allow_cancellation is True
@pytest.mark.asyncio
async def test_detect_intent_async_from_dict():
await test_detect_intent_async(request_type=dict)
def test_detect_intent_field_headers():
client = SessionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.DetectIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
call.return_value = session.DetectIntentResponse()
client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_detect_intent_field_headers_async():
client = SessionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.DetectIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.detect_intent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.DetectIntentResponse()
)
await client.detect_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_streaming_detect_intent(
transport: str = "grpc", request_type=session.StreamingDetectIntentRequest
):
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_detect_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter([session.StreamingDetectIntentResponse()])
response = client.streaming_detect_intent(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, session.StreamingDetectIntentResponse)
def test_streaming_detect_intent_from_dict():
test_streaming_detect_intent(request_type=dict)
@pytest.mark.asyncio
async def test_streaming_detect_intent_async(
transport: str = "grpc_asyncio", request_type=session.StreamingDetectIntentRequest
):
client = SessionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_detect_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[session.StreamingDetectIntentResponse()]
)
response = await client.streaming_detect_intent(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, session.StreamingDetectIntentResponse)
@pytest.mark.asyncio
async def test_streaming_detect_intent_async_from_dict():
await test_streaming_detect_intent_async(request_type=dict)
def test_match_intent(transport: str = "grpc", request_type=session.MatchIntentRequest):
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.match_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = session.MatchIntentResponse(text="text_value",)
response = client.match_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session.MatchIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.MatchIntentResponse)
def test_match_intent_from_dict():
test_match_intent(request_type=dict)
def test_match_intent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.match_intent), "__call__") as call:
client.match_intent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session.MatchIntentRequest()
@pytest.mark.asyncio
async def test_match_intent_async(
transport: str = "grpc_asyncio", request_type=session.MatchIntentRequest
):
client = SessionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.match_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.MatchIntentResponse()
)
response = await client.match_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session.MatchIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.MatchIntentResponse)
@pytest.mark.asyncio
async def test_match_intent_async_from_dict():
await test_match_intent_async(request_type=dict)
def test_match_intent_field_headers():
client = SessionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.MatchIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.match_intent), "__call__") as call:
call.return_value = session.MatchIntentResponse()
client.match_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_match_intent_field_headers_async():
client = SessionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.MatchIntentRequest()
request.session = "session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.match_intent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.MatchIntentResponse()
)
await client.match_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "session=session/value",) in kw["metadata"]
def test_fulfill_intent(
transport: str = "grpc", request_type=session.FulfillIntentRequest
):
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fulfill_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = session.FulfillIntentResponse(
response_id="response_id_value", output_audio=b"output_audio_blob",
)
response = client.fulfill_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session.FulfillIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.FulfillIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
def test_fulfill_intent_from_dict():
test_fulfill_intent(request_type=dict)
def test_fulfill_intent_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fulfill_intent), "__call__") as call:
client.fulfill_intent()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session.FulfillIntentRequest()
@pytest.mark.asyncio
async def test_fulfill_intent_async(
transport: str = "grpc_asyncio", request_type=session.FulfillIntentRequest
):
client = SessionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fulfill_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.FulfillIntentResponse(
response_id="response_id_value", output_audio=b"output_audio_blob",
)
)
response = await client.fulfill_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session.FulfillIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session.FulfillIntentResponse)
assert response.response_id == "response_id_value"
assert response.output_audio == b"output_audio_blob"
@pytest.mark.asyncio
async def test_fulfill_intent_async_from_dict():
await test_fulfill_intent_async(request_type=dict)
def test_fulfill_intent_field_headers():
client = SessionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.FulfillIntentRequest()
request.match_intent_request.session = "match_intent_request.session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fulfill_intent), "__call__") as call:
call.return_value = session.FulfillIntentResponse()
client.fulfill_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"match_intent_request.session=match_intent_request.session/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_fulfill_intent_field_headers_async():
client = SessionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session.FulfillIntentRequest()
request.match_intent_request.session = "match_intent_request.session/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fulfill_intent), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
session.FulfillIntentResponse()
)
await client.fulfill_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"match_intent_request.session=match_intent_request.session/value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SessionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SessionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SessionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SessionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SessionsGrpcTransport,)
def test_sessions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SessionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_sessions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.sessions.transports.SessionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SessionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"detect_intent",
"streaming_detect_intent",
"match_intent",
"fulfill_intent",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_sessions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.sessions.transports.SessionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_sessions_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.sessions.transports.SessionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_sessions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.sessions.transports.SessionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionsTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_sessions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SessionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_sessions_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SessionsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport,],
)
@requires_google_auth_gte_1_25_0
def test_sessions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport,],
)
@requires_google_auth_lt_1_25_0
def test_sessions_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SessionsGrpcTransport, grpc_helpers),
(transports.SessionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_sessions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_sessions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_sessions_host_no_port():
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_sessions_host_with_port():
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_sessions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SessionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_sessions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SessionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_sessions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.SessionsGrpcTransport, transports.SessionsGrpcAsyncIOTransport],
)
def test_sessions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
agent = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/entityTypes/{entity_type}".format(
project=project, location=location, agent=agent, entity_type=entity_type,
)
actual = SessionsClient.entity_type_path(project, location, agent, entity_type)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"entity_type": "mussel",
}
path = SessionsClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_entity_type_path(path)
assert expected == actual
def test_flow_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
flow = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
actual = SessionsClient.flow_path(project, location, agent, flow)
assert expected == actual
def test_parse_flow_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"flow": "octopus",
}
path = SessionsClient.flow_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_flow_path(path)
assert expected == actual
def test_intent_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
intent = "mussel"
expected = "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
actual = SessionsClient.intent_path(project, location, agent, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "winkle",
"location": "nautilus",
"agent": "scallop",
"intent": "abalone",
}
path = SessionsClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_intent_path(path)
assert expected == actual
def test_page_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
page = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
actual = SessionsClient.page_path(project, location, agent, flow, page)
assert expected == actual
def test_parse_page_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"flow": "winkle",
"page": "nautilus",
}
path = SessionsClient.page_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_page_path(path)
assert expected == actual
def test_session_path():
project = "scallop"
location = "abalone"
agent = "squid"
session = "clam"
expected = "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}".format(
project=project, location=location, agent=agent, session=session,
)
actual = SessionsClient.session_path(project, location, agent, session)
assert expected == actual
def test_parse_session_path():
expected = {
"project": "whelk",
"location": "octopus",
"agent": "oyster",
"session": "nudibranch",
}
path = SessionsClient.session_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_session_path(path)
assert expected == actual
def test_session_entity_type_path():
project = "cuttlefish"
location = "mussel"
agent = "winkle"
session = "nautilus"
entity_type = "scallop"
expected = "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}/entityTypes/{entity_type}".format(
project=project,
location=location,
agent=agent,
session=session,
entity_type=entity_type,
)
actual = SessionsClient.session_entity_type_path(
project, location, agent, session, entity_type
)
assert expected == actual
def test_parse_session_entity_type_path():
expected = {
"project": "abalone",
"location": "squid",
"agent": "clam",
"session": "whelk",
"entity_type": "octopus",
}
path = SessionsClient.session_entity_type_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_session_entity_type_path(path)
assert expected == actual
def test_transition_route_group_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
flow = "mussel"
transition_route_group = "winkle"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
actual = SessionsClient.transition_route_group_path(
project, location, agent, flow, transition_route_group
)
assert expected == actual
def test_parse_transition_route_group_path():
expected = {
"project": "nautilus",
"location": "scallop",
"agent": "abalone",
"flow": "squid",
"transition_route_group": "clam",
}
path = SessionsClient.transition_route_group_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_transition_route_group_path(path)
assert expected == actual
def test_webhook_path():
project = "whelk"
location = "octopus"
agent = "oyster"
webhook = "nudibranch"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = SessionsClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
"agent": "winkle",
"webhook": "nautilus",
}
path = SessionsClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SessionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = SessionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = SessionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = SessionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = SessionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = SessionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = SessionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = SessionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SessionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = SessionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SessionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SessionsTransport, "_prep_wrapped_messages"
) as prep:
client = SessionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SessionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SessionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
apache-2.0
| 7,197,941,075,215,062,000 | 36.350857 | 139 | 0.661802 | false |
zjj/trac_hack
|
trac/db_default.py
|
1
|
13648
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
from trac.db import Table, Column, Index
# Database version identifier. Used for automatic upgrades.
db_version = 26
def __mkreports(reports):
"""Utility function used to create report data in same syntax as the
default data. This extra step is done to simplify editing the default
reports."""
result = []
for report in reports:
result.append((None, report[0], report[2], report[1]))
return result
##
## Database schema
##
schema = [
# Common
Table('system', key='name')[
Column('name'),
Column('value')],
Table('permission', key=('username', 'action'))[
Column('username'),
Column('action')],
Table('auth_cookie', key=('cookie', 'ipnr', 'name'))[
Column('cookie'),
Column('name'),
Column('ipnr'),
Column('time', type='int')],
Table('session', key=('sid', 'authenticated'))[
Column('sid'),
Column('authenticated', type='int'),
Column('last_visit', type='int'),
Index(['last_visit']),
Index(['authenticated'])],
Table('session_attribute', key=('sid', 'authenticated', 'name'))[
Column('sid'),
Column('authenticated', type='int'),
Column('name'),
Column('value')],
Table('cache', key='id')[
Column('id'),
Column('generation', type='int')],
# Attachments
Table('attachment', key=('type', 'id', 'filename'))[
Column('type'),
Column('id'),
Column('filename'),
Column('size', type='int'),
Column('time', type='int64'),
Column('description'),
Column('author'),
Column('ipnr')],
# Wiki system
Table('wiki', key=('name', 'version'))[
Column('name'),
Column('version', type='int'),
Column('time', type='int64'),
Column('author'),
Column('ipnr'),
Column('text'),
Column('comment'),
Column('readonly', type='int'),
Index(['time'])],
# Wiki permission
Table('wiki_permission',key=('username','pagename'))[
Column('username'),
Column('pagename'),
Column('perm_v',type='int'),
Column('perm_w',type='int')],
# Version control cache
Table('repository', key=('id', 'name'))[
Column('id', type='int'),
Column('name'),
Column('value')],
Table('revision', key=('repos', 'rev'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('time', type='int64'),
Column('author'),
Column('message'),
Index(['repos', 'time'])],
Table('node_change', key=('repos', 'rev', 'path', 'change_type'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('path', key_size=255),
Column('node_type', size=1),
Column('change_type', size=1, key_size=2),
Column('base_path'),
Column('base_rev'),
Index(['repos', 'rev'])],
# Ticket system
Table('ticket', key='id')[
Column('id', auto_increment=True),
Column('type'),
Column('time', type='int64'),
Column('changetime', type='int64'),
Column('component'),
Column('severity'),
Column('priority'),
Column('owner'),
Column('reporter'),
Column('cc'),
Column('version'),
Column('milestone'),
Column('status'),
Column('resolution'),
Column('summary'),
Column('description'),
Column('keywords'),
Index(['time']),
Index(['status'])],
Table('ticket_change', key=('ticket', 'time', 'field'))[
Column('ticket', type='int'),
Column('time', type='int64'),
Column('author'),
Column('field'),
Column('oldvalue'),
Column('newvalue'),
Index(['ticket']),
Index(['time'])],
Table('ticket_custom', key=('ticket', 'name'))[
Column('ticket', type='int'),
Column('name'),
Column('value')],
Table('enum', key=('type', 'name'))[
Column('type'),
Column('name'),
Column('value')],
Table('component', key='name')[
Column('name'),
Column('owner'),
Column('description')],
Table('milestone', key='name')[
Column('name'),
Column('due', type='int64'),
Column('completed', type='int64'),
Column('description')],
Table('version', key='name')[
Column('name'),
Column('time', type='int64'),
Column('description')],
# Report system
Table('report', key='id')[
Column('id', auto_increment=True),
Column('author'),
Column('title'),
Column('query'),
Column('description')],
]
##
## Default Reports
##
def get_reports(db):
return (
('Active Tickets',
"""
* List all active tickets by priority.
* Color each row based on priority.
""",
"""
SELECT p.value AS __color__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY """ + db.cast('p.value', 'int') + """, milestone, t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Version',
"""
This report shows how to color results by priority,
while grouping results by version.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""
SELECT p.value AS __color__,
version AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (version IS NULL),version, """ + db.cast('p.value', 'int') +
""", t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Milestone',
"""
This report shows how to color results by priority,
while grouping results by milestone.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""
SELECT p.value AS __color__,
%s AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (milestone IS NULL),milestone, %s, t.type, time
""" % (db.concat("'Milestone '", 'milestone'), db.cast('p.value', 'int'))),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner',
"""
List accepted tickets, group by ticket owner, sorted by priority.
""",
"""
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner (Full Description)',
"""
List tickets accepted, group by ticket owner.
This report demonstrates the use of full-row display.
""",
"""
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
description AS _description_,
changetime AS _changetime, reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('All Tickets By Milestone (Including closed)',
"""
A more complex example to show how to make advanced reports.
""",
"""
SELECT p.value AS __color__,
t.milestone AS __group__,
(CASE status
WHEN 'closed' THEN 'color: #777; background: #ddd; border-color: #ccc;'
ELSE
(CASE owner WHEN $USER THEN 'font-weight: bold' END)
END) AS __style__,
id AS ticket, summary, component, status,
resolution,version, t.type AS type, priority, owner,
changetime AS modified,
time AS _time,reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
ORDER BY (milestone IS NULL), milestone DESC, (status = 'closed'),
(CASE status WHEN 'closed' THEN changetime ELSE (-1) * %s END) DESC
""" % db.cast('p.value', 'int')),
#----------------------------------------------------------------------------
('My Tickets',
"""
This report demonstrates the use of the automatically set
USER dynamic variable, replaced with the username of the
logged in user when executed.
""",
"""
SELECT p.value AS __color__,
(CASE status WHEN 'accepted' THEN 'Accepted' ELSE 'Owned' END) AS __group__,
id AS ticket, summary, component, version, milestone,
t.type AS type, priority, time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE t.status <> 'closed' AND owner = $USER
ORDER BY (status = 'accepted') DESC, """ + db.cast('p.value', 'int') +
""", milestone, t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets, Mine first',
"""
* List all active tickets by priority.
* Show all tickets owned by the logged in user in a group first.
""",
"""
SELECT p.value AS __color__,
(CASE owner
WHEN $USER THEN 'My Tickets'
ELSE 'Active Tickets'
END) AS __group__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (COALESCE(owner, '') = $USER) DESC, """
+ db.cast('p.value', 'int') + """, milestone, t.type, time
"""))
##
## Default database values
##
# (table, (column1, column2), ((row1col1, row1col2), (row2col1, row2col2)))
def get_data(db):
return (('component',
('name', 'owner'),
(('component1', 'somebody'),
('component2', 'somebody'))),
('milestone',
('name', 'due', 'completed'),
(('milestone1', 0, 0),
('milestone2', 0, 0),
('milestone3', 0, 0),
('milestone4', 0, 0))),
('version',
('name', 'time'),
(('1.0', 0),
('2.0', 0))),
('enum',
('type', 'name', 'value'),
(('resolution', 'fixed', 1),
('resolution', 'invalid', 2),
('resolution', 'wontfix', 3),
('resolution', 'duplicate', 4),
('resolution', 'worksforme', 5),
('priority', 'blocker', 1),
('priority', 'critical', 2),
('priority', 'major', 3),
('priority', 'minor', 4),
('priority', 'trivial', 5),
('ticket_type', 'defect', 1),
('ticket_type', 'enhancement', 2),
('ticket_type', 'task', 3))),
('permission',
('username', 'action'),
(('anonymous', 'LOG_VIEW'),
('anonymous', 'FILE_VIEW'),
('anonymous', 'WIKI_VIEW'),
('authenticated', 'WIKI_CREATE'),
('authenticated', 'WIKI_MODIFY'),
('anonymous', 'SEARCH_VIEW'),
('anonymous', 'REPORT_VIEW'),
('anonymous', 'REPORT_SQL_VIEW'),
('anonymous', 'TICKET_VIEW'),
('authenticated', 'TICKET_CREATE'),
('authenticated', 'TICKET_MODIFY'),
('anonymous', 'BROWSER_VIEW'),
('anonymous', 'TIMELINE_VIEW'),
('anonymous', 'CHANGESET_VIEW'),
('anonymous', 'ROADMAP_VIEW'),
('anonymous', 'MILESTONE_VIEW'))),
('system',
('name', 'value'),
(('database_version', str(db_version)),
('initial_database_version', str(db_version)))),
('report',
('author', 'title', 'query', 'description'),
__mkreports(get_reports(db))))
|
bsd-3-clause
| -2,227,831,291,867,234,300 | 32.698765 | 80 | 0.543083 | false |
sredmond/menlo-mathsci-help
|
db_repository/versions/006_migration.py
|
1
|
1180
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
user = Table('user', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('email', String(length=120)),
Column('hashed_password', String(length=60)),
Column('first_name', String(length=20)),
Column('last_name', String(length=20)),
Column('grade', SmallInteger),
Column('created', DateTime),
Column('last_logged_in', DateTime),
Column('role', SmallInteger, default=ColumnDefault(0)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['user'].columns['created'].create()
post_meta.tables['user'].columns['last_logged_in'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['user'].columns['created'].drop()
post_meta.tables['user'].columns['last_logged_in'].drop()
|
mit
| -7,481,781,320,161,326,000 | 32.714286 | 68 | 0.689831 | false |
abalakh/robottelo
|
docs/conf.py
|
1
|
3707
|
"""Sphinx documentation generator configuration file.
The full set of configuration options is listed on the Sphinx website:
http://sphinx-doc.org/config.html
"""
import sys
import os
# pylint:disable=invalid-name
# Add the Robottelo root directory to the system path. This allows references
# such as :mod:`robottelo` to be processed correctly.
sys.path.insert(
0,
os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir
))
)
# Project Information ---------------------------------------------------------
project = u'Robottelo'
copyright = u'2012, Og Maciel <omaciel@redhat.com>' # pylint:disable=W0622
# `version` should be a short X.Y version, and `release` should be a full
# version string. Robottelo has thus far had little use for versions, which is
# why it is still at 0.0.1.
version = '0.0.1'
release = version
# General configuration -------------------------------------------------------
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build']
nitpicky = True
nitpick_ignore = [
('py:obj', 'bool'),
('py:obj', 'dict'),
('py:obj', 'int'),
('py:obj', 'sequence'),
('py:obj', 'str'),
('py:obj', 'tuple'),
]
autodoc_default_flags = ['members', 'undoc-members']
# Format-Specific Options -----------------------------------------------------
htmlhelp_basename = 'Robottelodoc'
latex_documents = [(
'index',
'Robottelo.tex',
u'Robottelo Documentation',
u'Og Maciel \\textless{}omaciel@redhat.com\\textgreater{}',
'manual'
)]
man_pages = [(
'index',
'robottelo',
u'Robottelo Documentation',
[u'Og Maciel <omaciel@redhat.com>'],
1
)]
texinfo_documents = [(
'index',
'Robottelo',
u'Robottelo Documentation',
u'Og Maciel <omaciel@redhat.com>',
'Robottelo',
'One line description of project.',
'Miscellaneous'
)]
epub_title = u'Robottelo'
epub_author = u'Og Maciel <omaciel@redhat.com>'
epub_publisher = u'Og Maciel <omaciel@redhat.com>'
epub_copyright = u'2012, Og Maciel <omaciel@redhat.com>'
# -- Monkey patch ddt.data ----------------------------------------------------
# Monkey patch ddt.data and robottelo's ddt.data wrapper, preventing the
# decorators from generating new test methods. Without this monkey patch,
# Sphinx might document the following methods:
#
# * test_something_1_some_value
# * test_something_2_another_value
# * test_something_3_yet_another_value
#
# But with this monkey patch, Sphinx will document only one test method:
#
# * test_something
#
# As a result, the API documentation is much more concise.
import ddt # noqa
import robottelo.decorators # noqa pylint:disable=import-error
# robttelo.common.decorators can only be imported if the `sys.path.insert` at
# the top of this document is executed. pylint tries to be a static code
# analyzer, so that does not happen, and it therefore cannot find this module.
def monkey_data(*values):
"""Monkey patch function for ddt.data
This function bypasses ddt.data functionality and allow Sphinx generates
cleaner docs
"""
# It's OK that the ``values`` argument is OK. This function just needs to
# match the signature of ``ddt.data``.
# pylint:disable=unused-argument
return lambda func: func
# Cache the robottelo wrapper docstring
robottelo_data_docstring = robottelo.decorators.data.__doc__
# Do the monkey patch on ddt.data and robottelo wrapper
ddt.data = robottelo.decorators.data = monkey_data
# Copy back the docstring to allow Sphinx generate the documentation for the
# robottelo wrapper
robottelo.decorators.data.__doc__ = robottelo_data_docstring
|
gpl-3.0
| 2,605,090,931,222,616,600 | 29.138211 | 79 | 0.661991 | false |
bigswitch/nova
|
nova/tests/functional/api_sample_tests/test_preserve_ephemeral_rebuild.py
|
1
|
3070
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import api as compute_api
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.image import fake
CONF = nova.conf.CONF
class PreserveEphemeralOnRebuildJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-preserve-ephemeral-rebuild'
def _get_flags(self):
f = super(PreserveEphemeralOnRebuildJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.preserve_ephemeral_rebuild.'
'Preserve_ephemeral_rebuild')
return f
def _test_server_rebuild_preserve_ephemeral(self, value, resp_tpl=None):
uuid = self._post_server()
image = fake.get_valid_image_id()
subs = {'host': self._get_host(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'preserve_ephemeral': str(value).lower(),
'action': 'rebuild',
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
old_rebuild = compute_api.API.rebuild
def fake_rebuild(self_, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
self.assertEqual(kwargs['preserve_ephemeral'], value)
if resp_tpl:
return old_rebuild(self_, context, instance, image_href,
admin_password, files_to_inject=None,
**kwargs)
self.stub_out('nova.compute.api.API.rebuild', fake_rebuild)
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild-preserve-ephemeral',
subs)
if resp_tpl:
del subs['uuid']
self._verify_response(resp_tpl, subs, response, 202)
else:
self.assertEqual(202, response.status_code)
def test_server_rebuild_preserve_ephemeral_true(self):
self._test_server_rebuild_preserve_ephemeral(True)
def test_server_rebuild_preserve_ephemeral_false(self):
self._test_server_rebuild_preserve_ephemeral(False,
resp_tpl='server-action-rebuild-preserve-ephemeral-resp')
|
apache-2.0
| 4,872,734,969,193,002,000 | 41.054795 | 78 | 0.606515 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.