id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1895060
|
<reponame>deepkick/PyRockSim<filename>py4science-master/src/chapter4/module_1.py
# ファイル名: module_1.py """
print("module_1 is imported.")
wgt = 60.5 # 初期体重 [kg]
def teacher(x):
if x > 60:
print("体重オーバーです")
else:
print("適正体重です")
|
StarcoderdataPython
|
278042
|
<filename>chapter_8/D/resolve.py
def resolve():
'''
code here
'''
import collections
N = int(input())
A_list = [[int(item) for item in input().split()] for _ in range(N)]
A_list.sort(key=lambda x:x[0])
if __name__ == "__main__":
resolve()
|
StarcoderdataPython
|
1827770
|
<reponame>optimor/reverse-proxy
from django import forms
from .models import ProxySite
class SelectSiteForm(forms.Form):
proxy_site = forms.ModelChoiceField(queryset=ProxySite.objects.all())
class Meta:
fields = ["proxy_site"]
class ProxySiteForm(forms.ModelForm):
class Meta:
model = ProxySite
exclude = []
def clean(self):
cleaned_data = super(ProxySiteForm, self).clean()
subdomain_name = cleaned_data.get("subdomain_name")
subdomain_full_url = cleaned_data.get("subdomain_full_url")
if any([subdomain_name, subdomain_full_url]) and not all(
[subdomain_name, subdomain_full_url]
):
self.add_error("subdomain_name", "Both fields must be filled in.")
self.add_error("subdomain_full_url", "Both fields must be filled in.")
|
StarcoderdataPython
|
119325
|
<filename>trainer/__init__.py<gh_stars>1-10
from .train import train
from .preview import preview
from logging import NullHandler, getLogger
getLogger(__name__).addHandler(NullHandler())
|
StarcoderdataPython
|
6595946
|
import os, time, shutil, datetime
from stat import S_ISDIR, S_ISREG
from sphinx.application import Sphinx
from sphinx.util.docutils import docutils_namespace, patch_docutils
from . import config
# class FileSnapshot:
# '''An object that represents a snapshot in time of a File'''
# def __init__(self, lastModified:int, path:str):
# '''
# Parameters
# ------------
# lastModified
# A int value that represents the time the File object was created
# path
# A string value that represents the path of the file for the file object
# '''
# self.last_modified = lastModified
# self.path = path
# def _is_valid_operand(self, other):
# return hasattr(other, "last_modified")
# def __ne__(self, other) -> bool:
# if self._is_valid_operand(other):
# return self.last_modified != other.last_modified
# else:
# return NotImplemented
class DirectorySnapshot:
'''An object that represents a snapshot in time of files in a directory'''
def __init__(self, path:str) -> None:
'''
Parameters
-----------
path : str
The directory path to take a snapshot of
'''
self._snapshot = {}
self.path = path
self.walk(self.path)
self.modified_path = None
def walk(self, path):
'''
Recursively walks the snapshot directory saving the times a file has been modified last and the path of the file
Parameters
-----------
path : str
The directory path to walk
'''
for f in os.listdir(path):
pathname = os.path.join(path,f)
mode = os.stat(pathname).st_mode
if S_ISDIR(mode):
self.walk(pathname)
elif S_ISREG(mode):
file = (os.stat(pathname).st_mtime, pathname)
# file = FileSnapshot(os.stat(pathname).st_mtime, pathname)
self._snapshot[os.stat(pathname).st_ino] = file
def _is_valid_operand(self, other):
if hasattr(other, "_snapshot"):
if len(other._snapshot) > 0:
return True
return False
def __ne__(self, other) -> bool:
if self._is_valid_operand(other):
for i in self._snapshot.keys():
if other._snapshot[i][0] != self._snapshot[i][0]:
self.modified_path = self._snapshot[i][1]
return True
return False
else:
return NotImplemented
class Watcher:
def __init__(self, source_path, build_path, interval = .5) -> None:
self.source_path = source_path
self.build_path = build_path
self.interval = interval
def run(self):
print("\nstarting watcher")
global lastBuild
try:
while True:
app = None
snapshot_one:DirectorySnapshot = DirectorySnapshot(self.source_path)
time.sleep(self.interval)
snapshot_two:DirectorySnapshot = DirectorySnapshot(self.source_path)
if snapshot_one != snapshot_two:
print(u"\u001b[38;5;201mchange detected at source directory \u279D rebuilding project ({})\u001b[0m".format(datetime.datetime.now().time().strftime("%H:%m:%S")))
print(u" \u001b[38;5;208m\u21B3 {}\n\u001b[0m".format(snapshot_one.modified_path))
shutil.rmtree(self.build_path, ignore_errors=True)
with patch_docutils(self.source_path), docutils_namespace():
app = Sphinx(srcdir = self.source_path, confdir = self.source_path, doctreedir = self.build_path, outdir = self.build_path, buildername="html", status=None)
app.add_js_file(None, body="""const myInterval=setInterval(reloader,1000);function reloader(){fetch("/__reloader__").then((res)=>{res.ok?205===res.status&&window.location.reload():clearInterval(myInterval)}).catch(e=>{clearInterval(myInterval)})}""")
app.build()
if app:
if app.statuscode == 0:
config.lastBuild = time.time()
print(u"\u001b[38;5;28mbuild created, refreshing webpage on next check\n\u001b[0m")
except KeyboardInterrupt:
raise KeyboardInterrupt
|
StarcoderdataPython
|
3239758
|
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsDdbEncryptTable(RemediationBase):
"""Remediation that creates a KMS key and uses it to encrypt DDB table"""
@classmethod
def _id(cls) -> str:
return 'DDB.EncryptTable'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client('dynamodb').update_table(
TableName=resource['Name'], SSESpecification={
'Enabled': True,
'SSEType': 'KMS'
}
)
|
StarcoderdataPython
|
80898
|
<reponame>noaa-ocs-hydrography/brute
from nbs.bruty.world_raster_database import *
def slow_process():
# slow_tif = r"C:\Data\\H13222_MB_50cm_MLLW_1of1_interp_1.csar.tif"
# slow_tif = r"\\nos.noaa\ocs\HSD\Projects\NBS\NBS_Data\PBG_Gulf_UTM14N_MLLW\NOAA_NCEI_OCS\BAGs\Manual\H13222_MB_50cm_MLLW_1of1_interp_1.csar.tif"
# slow_bag = r"\\nos.noaa\ocs\HSD\Projects\NBS\NBS_Data\PBG_Gulf_UTM14N_MLLW\NOAA_NCEI_OCS\BAGs\Original\H13222\H13222_MB_50cm_MLLW_1of1.bag"
# slow_points = r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data_Pre-Review\PBG_Gulf_UTM16N_MLLW\GMRT\TOPO-MASK\Manual\PBG_Gulf_UTM16N_MLLW_16m_0_clip.bruty.npy"
npy = r"C:\Data\nbs\utm14\2016_NCMP_TX_14RPP7984_BareEarth_1mGrid_transformed.bruty.npy"
gpkg = r"C:\Data\nbs\utm14\2016_NCMP_TX_14RPP7992_BareEarth_1mGrid_transformed.bruty.gpkg"
bag_name = r"C:\Data\nbs\utm14\F00734_4m_MLLW_Xof4.bag"
tif = r"C:\Data\nbs\utm14\VT_06_MTT_20150305_CS_B_12_MLT_PARTIAL_4m_interp_3.csar.tif"
db_path = r"C:\Data\nbs\array_speed_testing"
if os.path.exists(db_path):
shutil.rmtree(db_path, onerror=onerr)
db = WorldDatabase(
UTMTileBackendExactRes(4, 4, 26914, RasterHistory, DiskHistory, TiffStorage, db_path))
trans_id = db.add_transaction_group("INSERT", datetime.now())
time.sleep(3)
trans_id2 = db.add_transaction_group("INSERT", datetime.now())
time.sleep(4)
trans_id3 = db.add_transaction_group("NOT_REMOVE", datetime.now())
db.insert_survey(gpkg, override_epsg=26914, contrib_id=1, survey_score=1, transaction_id=trans_id)
db.insert_survey(tif, override_epsg=26914, contrib_id=2, survey_score=1, transaction_id=trans_id)
db.insert_survey(npy, override_epsg=26914, contrib_id=3, survey_score=1, transaction_id=trans_id2)
db.insert_survey(bag_name, override_epsg=26914, contrib_id=4, survey_score=1, transaction_id=trans_id3)
def try_sqlite():
# update a metadata.pickle to sqlite
for pickle_file in (pathlib.Path(r"E:\bruty_databases\pbg_gulf_utm14n_mllw\wdb_metadata.pickle"),
pathlib.Path(r"E:\bruty_databases\pbg_gulf_utm16n_mllw\wdb_metadata.pickle"),
):
# "C:\data\nbs\pbg14_metadata_not_json.pickle")
meta = pickle.load(open(pickle_file, 'rb'))
pickle.dump({'class': meta['class'], 'module': meta['module']}, open(pickle_file.with_suffix(".class"), "wb"))
metadb = IncludedIds(pickle_file.with_suffix(".sqlite"))
for pth, rec in list(meta['survey_paths'].items()):
nbs_id = rec[0]
record = list(rec[1:])
metadb[nbs_id] = [pth] + record
# print(list(metadb.keys()))
# print(metadb[564364])
# metasurv = IncludedSurveys(r"C:\data\nbs\pbg14_metadata_not_json.db")
# print(metasurv[metadb[564364][0]])
# del metadb[564364]
metadb = StartedIds(pickle_file.with_suffix(".sqlite"))
for pth, rec in list(meta['started_paths'].items()):
nbs_id = rec[0]
record = list(rec[1:])
metadb[nbs_id] = [pth] + record[:2]
# print(list(metadb.keys()))
# print(metadb[564364])
# metasurv = StartedSurveys(r"C:\data\nbs\pbg14_metadata_not_json.db")
# print(metasurv[metadb[564364][0]])
del metadb
def csar_conversions():
# test reading the new geopackage, npy and tifs from fuse
paths = [
'\\\\nos.noaa\\OCS\\HSD\\Projects\\NBS\\NBS_Data\\PBG_Gulf_UTM14N_MLLW\\USACE\\eHydro_Galveston_CESWG\\Manual\\GI_24_BIL_20210514_CS_4m_interp.csar.tif',
# r"C:\Data\nbs\geopackage_samples\H12425_MB_50cm_MLLW_6of17.tif",
# r"C:\Data\nbs\geopackage_samples\H12425_MB_50cm_MLLW_7of17.tif",
# r"C:\Data\nbs\geopackage_samples\H12425_MB_50cm_MLLW_12of17.tif",
# r"C:\Data\nbs\geopackage_samples\2020_NCMP_PostSally_AL_16RDU3447_BareEarth_1mGrid_transformed.gpkg",
# r"C:\Data\nbs\geopackage_samples\2020_NCMP_PostSally_AL_16RDU3447_BareEarth_1mGrid_transformed.npy",
# r"C:\Data\nbs\geopackage_samples\2020_NCMP_PostSally_AL_16RDU3546_BareEarth_1mGrid_transformed.gpkg",
# r"C:\Data\nbs\geopackage_samples\2020_NCMP_PostSally_AL_16RDU3546_BareEarth_1mGrid_transformed.npy",
# r"C:\Data\nbs\geopackage_samples\H11835_MB_50cm_MLLW_1of5.tif",
# r"C:\Data\nbs\geopackage_samples\H11835_VB_2m_MLLW_5of5.tif",
# r"C:\Data\nbs\geopackage_samples\H13133_MB_1m_MLLW_3of3.tif",
]
resx = resy = 32
# NAD823 zone 19 = 26919. WGS84 would be 32619
epsg = 26914
# use this to align the database to something else (like caris for testing)
offset_x = 0
offset_y = 0
db_path = r"C:\Data\nbs\test_remove_reinsert\utm16_new_csar_exports"
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for contrib_id, survey_path in enumerate(paths):
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db_path = r"C:\Data\nbs\test_remove_reinsert\utm16_npy_exports"
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for contrib_id, survey_path in enumerate(paths):
if ".npy" in survey_path.lower():
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db_path = r"C:\Data\nbs\test_remove_reinsert\utm16_gpkg_exports"
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for contrib_id, survey_path in enumerate(paths):
if ".gpkg" in survey_path.lower():
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db_path = r"C:\Data\nbs\test_remove_reinsert\utm16_tif_exports"
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for contrib_id, survey_path in enumerate(paths):
if ".tif" in survey_path.lower():
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
def try_removal():
# test removing real data
paths = [
r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data\PBC_Northeast_UTM19N_MLLW\NOAA_NCEI_OCS\BPS\Processed\H09170.csar",
r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data\PBC_Northeast_UTM19N_MLLW\NOAA_NCEI_OCS\BPS\Processed\H10350.csar",
r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data\PBC_Northeast_UTM19N_MLLW\NOAA_NCEI_OCS\BPS\Processed\H06443.csar",
r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data\PBC_Northeast_UTM19N_MLLW\NOAA_NCEI_OCS\BAGs\Processed\H12137_MB_VR_MLLW.csar",
r"\\nos.noaa\OCS\HSD\Projects\NBS\NBS_Data\PBC_Northeast_UTM19N_MLLW\NOAA_NCEI_OCS\BPS\Manual\H10795.csar",
]
out_paths = [
(r"C:\Data\nbs\test_remove_reinsert\H09170.csar.csv", 1),
(r"C:\Data\nbs\test_remove_reinsert\H10350.csar.csv", 2),
(r"C:\Data\nbs\test_remove_reinsert\H06443.csar.csv", 3),
(r"C:\Data\nbs\test_remove_reinsert\H12137_MB_VR_MLLW.csar.csv.npy", 4),
(r"C:\Data\nbs\test_remove_reinsert\H10795.csar.csv", 5),
]
resx = resy = 64
# NAD823 zone 19 = 26919. WGS84 would be 32619
epsg = 26919
# use this to align the database to something else (like caris for testing)
offset_x = 0
offset_y = 0
# db_path = r"C:\Data\nbs\test_remove_reinsert\utm19_removals_64m"
# db = WorldDatabase(
# UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y, zoom_level=10))
# for survey_path, contrib_id in out_paths:
# db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db_path = r"C:\Data\nbs\test_remove_reinsert\utm19_removals_64m_remove_4"
if os.path.exists(db_path):
shutil.rmtree(db_path, onerror=onerr)
try:
shutil.copytree(db_path + " - Copy", db_path)
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
except FileNotFoundError:
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for survey_path, contrib_id in out_paths:
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db.remove_and_recompute(4)
out_paths.reverse()
# db_path = r"C:\Data\nbs\test_remove_reinsert\utm19_removals_64m_reverse"
# db = WorldDatabase(
# UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y, zoom_level=10))
# for survey_path, contrib_id in out_paths:
# db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db_path = r"C:\Data\nbs\test_remove_reinsert\utm19_removals_64m_reverse_remove_4"
db = WorldDatabase(
UTMTileBackendExactRes(resx, resy, epsg, RasterHistory, DiskHistory, TiffStorage, db_path, offset_x=offset_x, offset_y=offset_y,
zoom_level=10))
for survey_path, contrib_id in out_paths:
db.insert_survey(survey_path, override_epsg=epsg, contrib_id=contrib_id, survey_score=contrib_id)
db.remove_and_recompute(4)
def try_customarea():
data_dir = pathlib.Path(r"G:\Data\NBS\H11305_for_Bruty")
# orig_db = CustomArea(26916, 395813.2, 3350563.98, 406818.2, 3343878.98, 4, 4, data_dir.joinpath('bruty'))
new_db = CustomArea(None, 395813.20000000007, 3350563.9800000004, 406818.20000000007, 3343878.9800000004, 4, 4,
data_dir.joinpath('bruty_debug_center'))
# use depth band for uncertainty since it's not in upsample data
new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\1of3.tif", 0, uncert_band=1, override_epsg=None)
# new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\2of3.tif", 0, uncert_band=1, override_epsg=None)
# new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\3of3.tif", 0, uncert_band=1, override_epsg=None)
new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\H11305_VB_5m_MLLW_1of3.bag", 1, override_epsg=None)
# new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\H11305_VB_5m_MLLW_2of3.bag", 1, override_epsg=None)
# new_db.insert_survey_gdal(r"G:\Data\NBS\H11305_for_Bruty\H11305_VB_5m_MLLW_3of3.bag", 1, override_epsg=None)
new_db.export(r"G:\Data\NBS\H11305_for_Bruty\combine_new_centers.tif")
def mississippi():
fname = r"G:\Data\NBS\Speed_test\H11045_VB_4m_MLLW_2of2.bag"
ds = gdal.Open(fname)
x1, resx, dxy, y1, dyx, resy = ds.GetGeoTransform()
numx = ds.RasterXSize
numy = ds.RasterYSize
epsg = rasterio.crs.CRS.from_string(ds.GetProjection()).to_epsg()
epsg = 26918
ds = None
# db = WorldDatabase(UTMTileBackendExactRes(4, 4, epsg, RasterHistory, DiskHistory, TiffStorage,
# r"G:\Data\NBS\Speed_test\test_db_world"))
db = CustomArea(epsg, x1, y1, x1 + (numx + 1) * resx, y1 + (numy + 1) * resy, 4, 4, r"G:\Data\NBS\Speed_test\test_cust4")
db.insert_survey_gdal(fname, override_epsg=epsg)
db.export(r"G:\Data\NBS\Speed_test\test_cust4\export.tif")
raise Exception("Done")
# from nbs.bruty.history import MemoryHistory
# from nbs.bruty.raster_data import MemoryStorage, RasterDelta, RasterData, LayersEnum, arrays_match
from nbs.bruty.utils import save_soundings_from_image
# from tests.test_data import master_data, data_dir
# use_dir = data_dir.joinpath('tile4_vr_utm_db')
# db = WorldDatabase(UTMTileBackend(26919, RasterHistory, DiskHistory, TiffStorage, use_dir)) # NAD823 zone 19. WGS84 would be 32619
# db.export_area_old(use_dir.joinpath("export_tile_old.tif"), 255153.28, 4515411.86, 325721.04, 4591064.20, 8)
# db.export_area(use_dir.joinpath("export_tile_new.tif"), 255153.28, 4515411.86, 325721.04, 4591064.20, 8)
build_mississippi = True
export_mississippi = False
process_utm_15 = True
output_res = (4, 4) # desired output size in meters
data_dir = pathlib.Path(r'G:\Data\NBS\Mississipi')
if process_utm_15:
export_dir = data_dir.joinpath("UTM15")
epsg = 26915
max_lon = -90
min_lon = -96
max_lat = 35
min_lat = 0
use_dir = data_dir.joinpath('vrbag_utm15_debug_db')
data_files = [(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13194_MB_VR_LWRP.bag", 92),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13193_MB_VR_LWRP.bag", 100),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13330_MB_VR_LWRP.bag", 94),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13188_MB_VR_LWRP.bag", 95),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13189_MB_VR_LWRP.bag", 96),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_MB_VR_LWRP.bag", 97),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13191_MB_VR_LWRP.bag", 98),
(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13192_MB_VR_LWRP.bag", 99),
# (r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_MB_VR_LWRP.bag.resampled_4m.uncert.tif", 77),
# (r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13192_MB_VR_LWRP.bag.resampled_4m.uncert.tif", 79),
]
resamples = []
# [r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_MB_VR_LWRP.bag",
# r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13192_MB_VR_LWRP.bag",]
for vr_path in resamples:
resampled_path = vr_path + ".resampled_4m.tif"
bag.VRBag_to_TIF(vr_path, resampled_path, 4, use_blocks=False)
resampled_with_uncertainty = resampled_path = resampled_path[:-4] + ".uncert.tif"
add_uncertainty_layer(resampled_path, resampled_with_uncertainty)
data_files.append(resampled_with_uncertainty)
else:
export_dir = data_dir.joinpath("UTM16")
epsg = 26916
max_lon = -84
min_lon = -90
max_lat = 35
min_lat = 0
use_dir = data_dir.joinpath('vrbag_utm16_debug_db')
data_files = [(r"G:\Data\NBS\Mississipi\UTM16\NCEI\H13195_MB_VR_LWRP.bag", 93),
(r"G:\Data\NBS\Mississipi\UTM16\NCEI\H13196_MB_VR_LWRP.bag", 91),
(r"G:\Data\NBS\Mississipi\UTM16\NCEI\H13193_MB_VR_LWRP.bag", 100),
(r"G:\Data\NBS\Mississipi\UTM16\NCEI\H13194_MB_VR_LWRP.bag", 92),
]
if build_mississippi:
if os.path.exists(use_dir):
shutil.rmtree(use_dir, onerror=onerr)
db = WorldDatabase(UTMTileBackendExactRes(*output_res, epsg, RasterHistory, DiskHistory, TiffStorage,
use_dir)) # NAD823 zone 19. WGS84 would be 32619
if 0: # find a specific point in the tiling database
y, x = 30.120484, -91.030685
px, py = crs_transform.transform(x, y)
tile_index_x, tile_index_y = db.db.tile_scheme.xy_to_tile_index(px, py)
if build_mississippi:
for data_file, score in data_files:
# bag_file = directory.joinpath(directory.name + "_MB_VR_LWRP.bag")
if _debug:
if 'H13190' not in data_file:
print("Skipped for debugging", data_file)
continue
if 'H13194' in data_file: # this file is encoded in UTM16 even in the UTM15 area
override_epsg = 26916
elif 'H13193' in data_file: # this file is encoded in UTM15 even in the UTM16 area
override_epsg = 26915
else:
override_epsg = epsg
# db.insert_survey_gdal(bag_file, override_epsg=epsg) # single res
if str(data_file)[-4:] in (".bag",):
db.insert_survey_vr(data_file, survey_score=score, override_epsg=override_epsg)
elif str(data_file)[-4:] in ("tiff", ".tif"):
db.insert_survey_gdal(data_file, survey_score=score)
if export_mississippi:
area_shape_fname = r"G:\Data\NBS\Support_Files\MCD_Bands\Band5\Band5_V6.shp"
ds = gdal.OpenEx(area_shape_fname)
# ds.GetLayerCount()
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
export_epsg = rasterio.crs.CRS.from_string(srs.ExportToWkt()).to_epsg()
lyr.GetFeatureCount()
lyrdef = lyr.GetLayerDefn()
for i in range(lyrdef.GetFieldCount()):
flddef = lyrdef.GetFieldDefn(i)
if flddef.name == "CellName":
cell_field = i
break
crs_transform = get_crs_transformer(export_epsg, db.db.tile_scheme.epsg)
inv_crs_transform = get_crs_transformer(db.db.tile_scheme.epsg, export_epsg)
for feat in lyr:
geom = feat.GetGeometryRef()
# geom.GetGeometryCount()
minx, maxx, miny, maxy = geom.GetEnvelope() # (-164.7, -164.39999999999998, 67.725, 67.8)
# output in WGS84
cx = (minx + maxx) / 2.0
cy = (miny + maxy) / 2.0
# crop to the area around Mississippi
if cx > min_lon and cx < max_lon and cy > min_lat and cy < max_lat:
cell_name = feat.GetField(cell_field)
if _debug:
##
## vertical stripes in lat/lon
## "US5MSYAF" for example
# if cell_name not in ("US5MSYAF",): # , 'US5MSYAD'
# continue
## @fixme There is a resolution issue at ,
## where the raw VR is at 4.2m which leaves stripes at 4m export so need to add
## an upsampled dataset to fill the area (with lower score so it doesn't overwrite the VR itself)
if cell_name not in ('US5BPGBD',): # 'US5BPGCD'):
continue
# @fixme missing some data in US5PLQII, US5PLQMB US5MSYAE -- more upsampling needed?
print(cell_name)
# convert user res (4m in testing) size at center of cell for resolution purposes
dx, dy = compute_delta_coord(cx, cy, *output_res, crs_transform, inv_crs_transform)
bag_options_dict = {'VAR_INDIVIDUAL_NAME': 'Chief, Hydrographic Surveys Division',
'VAR_ORGANISATION_NAME': 'NOAA, NOS, Office of Coast Survey',
'VAR_POSITION_NAME': 'Chief, Hydrographic Surveys Division',
'VAR_DATE': datetime.now().strftime('%Y-%m-%d'),
'VAR_VERT_WKT': 'VERT_CS["unknown", VERT_DATUM["unknown", 2000]]',
'VAR_ABSTRACT': "This multi-layered file is part of NOAA Office of Coast Survey’s National Bathymetry. The National Bathymetric Source is created to serve chart production and support navigation. The bathymetry is compiled from multiple sources with varying quality and includes forms of interpolation. Soundings should not be extracted from this file as source data is not explicitly identified. The bathymetric vertical uncertainty is communicated through the associated layer. More generic quality and source metrics will be added with 2.0 version of the BAG format.",
'VAR_PROCESS_STEP_DESCRIPTION': f'Generated By GDAL {gdal.__version__} and NBS',
'VAR_DATETIME': datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
'VAR_VERTICAL_UNCERT_CODE': 'productUncert',
# 'VAR_RESTRICTION_CODE=' + restriction_code,
# 'VAR_OTHER_CONSTRAINTS=' + other_constraints,
# 'VAR_CLASSIFICATION=' + classification,
# 'VAR_SECURITY_USER_NOTE=' + security_user_note
}
tif_tags = {'EMAIL_ADDRESS': '<EMAIL>',
'ONLINE_RESOURCE': 'https://www.ngdc.noaa.gov',
'LICENSE': 'License cc0-1.0',
}
export_path = export_dir.joinpath(cell_name + ".tif")
cnt, exported_dataset = db.export_area(export_path, minx, miny, maxx, maxy, (dx + dx * .1, dy + dy * .1), target_epsg=export_epsg)
# export_path = export_dir.joinpath(cell_name + ".bag")
# bag_options = [key + "=" + val for key, val in bag_options_dict.items()]
# cnt2, ex_ds = db.export_area(export_path, minx, miny, maxx, maxy, (dx+dx*.1, dy+dy*.1), target_epsg=export_epsg,
# driver='BAG', gdal_options=bag_options)
if cnt > 0:
# output in native UTM -- Since the coordinates "twist" we need to check all four corners,
# not just lower left and upper right
x1, y1, x2, y2 = transform_rect(minx, miny, maxx, maxy, crs_transform.transform)
cnt, utm_dataset = db.export_area(export_dir.joinpath(cell_name + "_utm.tif"), x1, y1, x2, y2, output_res)
else:
exported_dataset = None # close the gdal file
os.remove(export_path)
os.remove(export_path.with_suffix(".score.tif"))
test_soundings = False
if test_soundings:
soundings_files = [pathlib.Path(r"C:\Data\nbs\PBC19_Tile4_surveys\soundings\Tile4_4m_20210219_source.tiff"),
pathlib.Path(r"C:\Data\nbs\PBC19_Tile4_surveys\soundings\Tile4_4m_20201118_source.tiff"),
]
for soundings_file in soundings_files:
ds = gdal.Open(str(soundings_file))
# epsg = rasterio.crs.CRS.from_string(ds.GetProjection()).to_epsg()
xform = ds.GetGeoTransform() # x0, dxx, dyx, y0, dxy, dyy
d_val = ds.GetRasterBand(1)
col_size = d_val.XSize
row_size = d_val.YSize
del d_val, ds
x1, y1 = affine(0, 0, *xform)
x2, y2 = affine(row_size, col_size, *xform)
res = 50
res_x = res
res_y = res
# move the minimum to an origin based on the resolution so future exports would match
if x1 < x2:
x1 -= x1 % res_x
else:
x2 -= x2 % res_x
if y1 < y2:
y1 -= y1 % res_y
else:
y2 -= y2 % res_y
# note: there is an issue where the database image and export image are written in reverse Y direction
# because of this the first position for one is top left and bottom left for the other.
# when converting the coordinate of the cell it basically ends up shifting by one
# image = (273250.0, 50.0, 0.0, 4586700.0, 0.0, -50.0) db = (273250.0, 50, 0, 4552600.0, 0, 50)
# fixed by using cell centers rather than corners.
# Same problem could happen of course if the centers are the edges of the export tiff
# db = CustomArea(26919, x1, y1, x2, y2, res_x, res_y, soundings_file.parent.joinpath('debug')) # NAD823 zone 19. WGS84 would be 32619
# db.insert_survey_gdal(str(soundings_file))
# db.export_area_new(str(soundings_file.parent.joinpath("output_soundings_debug5.tiff")), x1, y1, x2, y2, (res_x, res_y), )
save_soundings_from_image(soundings_file, str(soundings_file) + "_3.gpkg", 50)
# test positions -- H13190, US5GPGBD, Mississipi\vrbag_utm15_full_db\4615\3227\_000001_.tif, Mississipi\UTM15\NCEI\H13190_MB_VR_LWRP_resampled.tif
# same approx position
# 690134.03 (m), 3333177.81 (m) is 41.7 in the H13190
# 690134.03 (m), 3333177.81 (m) is 42.4 in the resampled
# 690133.98 (m), 3333178.01 (m) is 42.3 in the \4615\3227\000001.tif
# 690133.60 (m), 3333177.74 (m) is 42.3 in the US5GPGBD
# seems to be the same Z value of 41.7
# 690134.03 (m), 3333177.81 (m) H13190
# 690138.14 (m), 3333177.79 (m) resample (right (east) one column)
# 690129.99 (m), 3333173.99 (m) \4615\3227\000001.tif (down+left (south west) one row+col)
# 690129.62 (m), 3333173.76 (m) US5GPGBD (down+left (south west) one row+col)
# from importlib import reload
# import HSTB.shared.gridded_coords
# bag.VRBag_to_TIF(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_MB_VR_LWRP.bag", r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_resample.tif", 4.105774879455566, bag.MEAN, nodata=1000000.)
# index2d = numpy.array([(655, 265)], dtype=numpy.int32)
# >>> print('x', refinement_llx + 9 * resolution_x + resolution_x / 2.0, 'y',refinement_lly + 8 * resolution_y + resolution_y / 2.0)
# x 690134.0489868548 y 3333177.797961975
# >>> print("x (cols)",xstarts[9],":", xends[9], "y (rows)",ystarts[8],":", yends[8])
# x (cols) 690131.9960994151 : 690136.1018732946 y (rows) 3333175.7450745353 : 3333179.850848415
# >>> print("rows",row_start_indices[8],":",row_end_indices[8], "cols",col_start_indices[9],":", col_end_indices[9])
# rows 4052 : 4053 cols 2926 : 2927
# >>> print('starts',HSTB.shared.gridded_coords.affine(row_start_indices[8], col_start_indices[9], *ds_val.GetGeoTransform()), ', ends',HSTB.shared.gridded_coords.affine(row_end_indices[8], col_end_indices[9], *ds_val.GetGeoTransform()))
# starts (690131.995748028, 3333183.9557557716) , ends (690136.1015229075, 3333179.849980892)
# >>> ds_val.GetGeoTransform(), sr_grid.geotransform
# ((678118.498450741, 4.105774879455566, 0.0, 3349820.5555673256, 0.0, -4.105774879455566),
# (678118.498450741, 4.105774879455566, 0, 3303552.578450741, 0, 4.105774879455566))
# ds = gdal.Open(r"G:\Data\NBS\Mississipi\UTM15\NCEI\H13190_resample4.tif")
# b = ds.GetRasterBand(1)
# dep = b.ReadAsArray()
# b.GetNoDataValue()
# (dep!=0.0).any()
r""" # script to add transaction records to original bruty databases
bruty_paths = [
r"E:\bruty_databases\pbg_gulf_utm14n_mllw",
r"E:\bruty_databases\pbg_gulf_utm14n_mllw_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm14n_mllw_prereview",
r"E:\bruty_databases\pbg_gulf_utm14n_mllw_prereview_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm14n_mllw_sensitive",
r"E:\bruty_databases\pbg_gulf_utm14n_mllw_sensitive_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw_prereview",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw_prereview_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw_sensitive",
r"E:\bruty_databases\pbg_gulf_utm15n_mllw_sensitive_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw_prereview",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw_prereview_not_for_navigation",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw_sensitive",
r"E:\bruty_databases\pbg_gulf_utm16n_mllw_sensitive_not_for_navigation"]
for pth in bruty_paths:
db = world_raster_database.WorldDatabase.open(pth)
db.transaction_groups.add_oid_record(("INSERT", datetime.datetime(2021, 9, 25, 0, 0, 0)))
inc = db.included_ids.cur.execute("UPDATE included set (transaction_id)=(1)").rowcount
start = db.included_ids.cur.execute("UPDATE started set (transaction_id)=(1)").rowcount
db.included_ids.conn.commit()
print(pth, inc, start)
"""
if __name__ == "__main__":
slow_process()
|
StarcoderdataPython
|
4967846
|
import sys
import fandango as fd
assert sys.argv[1:], 'Start date required (e.g. 2017-08-01)'
tables = {
'att_array_devdouble_ro':'adr',
'att_array_devlong_ro':'alr',
'att_array_devshort_ro':'ahr',
'att_array_devstring_ro':'asr',
'att_array_devstate_ro':'atr',
'att_scalar_devdouble_ro':'sdr',
'att_scalar_devdouble_rw':'sdw',
'att_scalar_devfloat_ro':'sfr',
'att_scalar_devlong_ro':'slr',
'att_scalar_devlong_rw':'slw',
'att_scalar_devstate_ro':'str',
'att_scalar_devstring_ro':'ssr',
'att_scalar_devshort_ro':'shr',
'att_scalar_devshort_rw':'shw',
}
start_date = sys.argv[1] # '2017-08-01'
npartitions = 20
counter = 0
def inc_months(date,count):
y,m,d = map(int,date.split('-'))
m = m+count
r = m%12
if r:
y += int(m/12)
m = m%12
else:
y += int(m/12)-1
m = 12
return '%04d-%02d-%02d'%(y,m,d)
head = "ALTER TABLE %s PARTITION BY RANGE(TO_DAYS(data_time)) ("
line = "PARTITION %s%s VALUES LESS THAN (TO_DAYS('%s'))"
lines = []
for t,p in tables.items():
lines.append(head%t)
for i in range(0,npartitions):
date = inc_months(start_date,i)
end = inc_months(date,1)
l = line%(p,date.replace('-',''),end)
if i<(npartitions-1): l+=','
lines.append(l)
lines.append(');\n\n')
print('\n'.join(lines))
|
StarcoderdataPython
|
3329443
|
<filename>auto_schema/host.py
import re
import sys
import time
from .bash import run
# NOTE: Hosts here are the same sense of instance.
# Meaning an actual host can have multiple of them
# Do not shutdown or general actions using this class on a
# multiinstance host
class Host(object):
def __init__(self, host, section):
self.host = host
self.section = section
if re.findall(r'\w1\d{3}', host):
self.dc = 'eqiad'
else:
self.dc = 'codfw'
self.fqn = '{}.{}.wmnet'.format(host.split(':')[0], self.dc)
self.dbs = []
def run_sql(self, sql):
args = '-h{} -P{}'.format(self.host.split(':')[0], self.host.split(':')[
1]) if ':' in self.host else '-h' + self.host
if '"' in sql:
sql = sql.replace('"', '\\"')
if '`' in sql:
sql = sql.replace('`', '\\`')
if '\n' in sql:
sql = sql.replace('\n', ' ')
if not sql.strip().endswith(';'):
sql += ';'
return run('mysql.py {} -e "{}"'.format(args, sql))
def run_on_host(self, command):
if '"' in command:
command = command.replace('"', '\\"')
return run('cumin {} "{}"'.format(self.fqn, command))
def depool(self, ticket):
# TODO: check if it's depoolable
run('dbctl instance {} depool'.format(self.host))
run('dbctl config commit -b -m "Depooling {} ({})"'.format(self.host, ticket))
while True:
if self.has_traffic() and '--run' in sys.argv:
print('Sleeping for the traffic to drain')
time.sleep(60)
else:
break
def has_traffic(self):
# TODO: Make the users check more strict and include root
result = self.run_sql(
'SELECT * FROM information_schema.processlist WHERE User like \'%wiki%\';')
return bool(result)
def get_replag(self):
query_res = self.run_sql(
"SELECT greatest(0, TIMESTAMPDIFF(MICROSECOND, max(ts), UTC_TIMESTAMP(6)) - 500000)/1000000 AS lag FROM heartbeat.heartbeat WHERE datacenter='{}' GROUP BY shard HAVING shard = '{}';".format(
self.dc,
self.section))
replag = None
if not query_res:
return 1000 if '--run' in sys.argv else 0
for line in query_res.split('\n'):
if not line.strip():
continue
count = line.strip()
if count == 'lag':
continue
try:
count = float(count)
except BaseException:
continue
replag = count
return replag
def repool(self, ticket):
replag = 1000
while replag > 1:
replag = self.get_replag()
if ((replag is None) or (replag > 1)) and '--run' in sys.argv:
print('Waiting for replag to catch up')
time.sleep(60)
for percent in [10, 25, 75, 100]:
run('dbctl instance {} pool -p {}'.format(self.host, percent))
run('dbctl config commit -b -m "After maintenance {} ({})"'.format(self.host, ticket))
if '--run' in sys.argv and percent != 100:
print('Waiting for the next round')
time.sleep(900)
def downtime(self, ticket, hours, more_to_downtime=[]):
more_to_downtime.append(self)
hosts = ','.join([i.fqn for i in more_to_downtime])
run('cookbook sre.hosts.downtime --hours {} -r "Maintenance {}" {}'.format(hours, ticket, hosts))
def has_replicas(self):
return self.run_sql('show slave hosts;').strip() != ''
def get_replicas(self, recursive=False):
res = self.run_sql('show slave hosts;')
hosts = [
Host('{}:{}'.format(i[0], i[1]), self.section)
for i in re.findall(r'(\S+)\.(?:eqiad|codfw)\.wmnet\s*(\d+)', res)
]
if not recursive:
return hosts
replicas_to_check = hosts.copy()
while replicas_to_check:
replica_replicas = replicas_to_check.pop().get_replicas(False)
hosts += replica_replicas.copy()
replicas_to_check += replica_replicas.copy()
return hosts
|
StarcoderdataPython
|
1892064
|
<filename>model.py
from typing import Dict
from PySide2.QtCore import Signal, Slot, QObject, QTimer
import cv2, h5py, math
import numpy as np
import matplotlib.pyplot as plt
# YOLOv4 & DeepSORT code is taken from :
# https://github.com/theAIGuysCode/yolov4-deepsort
# deep sort imports
from deep_sort import preprocessing, nn_matching
from deep_sort import tracker
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.python.saved_model import tag_constants
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
from core.yolov4 import filter_boxes
from core.config import cfg
import core.utils as utils
from tools import generate_detections as gdet
MAX_DETECTION_NUM = 50
nn_budget = None
nms_max_overlap = 1.0
input_size = 416
model_filename = 'model_data/mars-small128.pb'
weights_path = './checkpoints/yolov4-416'
class_id_map = {
'none' : '0',
'truck' : '1',
'car' : '2',
'bus' : '3'
}
class_id_map.update({item[1]: item[0] for item in class_id_map.items()})
class Model(QObject):
frame_update_signal = Signal(np.ndarray, int)
max_frame_update_signal = Signal(int)
process_done_signal = Signal()
error_signal = Signal(str)
vehicle_count_signal = Signal(int,int,int,np.ndarray)
def __init__(self):
super().__init__()
# Definition of the parameters
self.sess = None
self.infer = None
self.encoder = None
self.saved_model_loaded = None
self.max_cosine_distance = 0.4
self.iou_thresh = 0.45
self.score_thresh = 0.7
self.input_video_path = ''
self.output_video_path = ''
self.output_data_path = ''
self.mask_path = ''
self.cache_data = None
self.vid = None
self.detected_vehicles = None
self.frame_counter = 0
self.finishLine = (0,0,0,0)
self.stop_inference = True
self.stop_counting = True
self.count_method = 0
self.imgMask = None
self.initialize_counting()
#initialize color map
cmap = plt.get_cmap('tab20b')
self.colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
#======================= Setters ===========================
def initialize_counting(self):
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
def setInputVideoPath(self, path):
self.input_video_path = path
self.vid = cv2.VideoCapture(self.input_video_path)
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.frame_update_signal.emit(frame, 0)
def setOutputVideoPath(self, path):
self.output_video_path = path
def setOutputDataPath(self, path):
self.output_data_path = path
def setCacheDataPath(self, path):
self.cache_data_path = path
# parse cache data and send signal with max frame num
cache = h5py.File(self.cache_data_path, 'r')
cache_data = cache.get('dataset_1')
self.cache_data = np.array(cache_data)
self.max_frame_update_signal.emit(self.cache_data.shape[0])
def setMaskFile(self, path):
self.mask_path = path
mask = h5py.File(self.mask_path, 'r')
mask = mask.get('mask')
self.imgMask = np.array(mask)
def saveMask(self, path, mask):
self.imgMask = mask
data = h5py.File(path, 'w')
data.create_dataset('mask', data=self.imgMask)
data.close()
def getMask(self):
return self.imgMask
def setParams(self, params:dict):
self.imgMask = params['mask']
self.iou_thresh = params['iou_thresh']
self.score_thresh = params['score_thresh']
self.max_cosine_distance = params['cos_dist']
self.filt_x_vec = params['x_vect']
self.filt_y_vec = params['y_vect']
self.filt_width = params['filt_width']
self.filt_dist = params['filt_dist']
self.filt_frame = params['filt_frames']
self.finishFrames = params['finish_frames']
self.finishLine = params['finish_line']
self.count_method = params['count_method']
#==================== Counting Functions ========================
def countVehicles(self, frame, frame_num, detection) -> bool:
class_id = detection[0]
uid = str(detection[1])
# xmin, ymin, xmax, ymax
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
width = x_max - x_min
height = y_max - y_min
cx = x_min + (width / 2)
cy = y_min + (height / 2)
centroid = [cx, cy]
tracker_dict = self.detected_vehicles[str(class_id)]
# detecting for the first time
if uid not in tracker_dict.keys():
tracker_dict[uid] = {
'initial_centroid' : [cx, cy],
'prev_centroid': [cx, cy],
'prev_frame_num': frame_num,
'dist': 0,
'counted': False
}
return False
# already counted this car, skip
elif tracker_dict[uid]['counted'] == True:
return True
# count with vector filter method
if self.count_method == 0:
# reset distance travelled if previous detected frame is too far off
if frame_num - tracker_dict[uid]['prev_frame_num'] > self.filt_frame:
tracker_dict[uid]['prev_centroid'] = centroid
# compute distance traveled
prev_centroid = tracker_dict[uid]['prev_centroid']
tracker_dict[uid]['dist'] = tracker_dict[uid]['dist'] + math.dist(prev_centroid, centroid)
tracker_dict[uid]['prev_centroid'] = centroid
tracker_dict[uid]['prev_frame_num'] = frame_num
# count the object if distance traveled exceeds a threshold
if tracker_dict[uid]['dist'] > self.filt_dist:
# computer direction vector
initial_centroid = tracker_dict[uid]['initial_centroid']
vect = [cx - initial_centroid[0], cy - initial_centroid[1]]
# only count vehicles travelling south
x_min = self.filt_x_vec - self.filt_width
x_max = self.filt_x_vec + self.filt_width
if (x_min < vect[0] < x_max) and (vect[1] > 0) == (self.filt_y_vec > 0):
tracker_dict[uid]['counted'] = True
cnt = sum([param['counted'] for id, param in tracker_dict.items()])
img = self.getVehicleImage(detection, frame)
self.vehicle_count_signal.emit(class_id, int(uid), cnt, img)
return True
# count with finishing line method
elif self.count_method == 1:
bx = self.finishLine[0]
by = self.finishLine[1]
bw = self.finishLine[2]
bh = self.finishLine[3]
# check if centroid within bounds of finish line
if (cx > bx) and (cx < bx + bw) and (cy > by) and (cy < by + bh):
tracker_dict[uid]['dist'] += 1
if tracker_dict[uid]['dist'] > self.finishFrames:
tracker_dict[uid]['counted'] = True
cnt = sum([param['counted'] for id, param in tracker_dict.items()])
img = self.getVehicleImage(detection, frame)
self.vehicle_count_signal.emit(class_id, int(uid), cnt, img)
return True
return False
@Slot()
def startCounting(self):
if not self.validateInputFiles():
return
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
# tally total frame num in cahce data and video
if total_frames != self.cache_data.shape[0]:
self.error_signal.emit('Video and cache frame count does not match')
return
# reinitialize dict for counting
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
for frame_num, frame_data in enumerate(self.cache_data):
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame, frame, mask=self.imgMask)
for detection in frame_data:
self.countVehicles(frame, frame_num, detection)
self.process_done_signal.emit()
@Slot()
def analyzeFrames(self):
if not self.counting_timer.isActive():
self.counting_timer.setInterval(30)
self.counting_timer.start()
return
success , frame = self.vid.read()
if success and not self.stop_counting:
frame_original = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame_original, frame_original, mask=self.imgMask)
frame_data = self.cache_data[self.frame_counter]
for detection in frame_data:
class_name = self.getClassName(str(detection[0]))
uid = detection[1]
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
detected = self.countVehicles(frame, self.frame_counter, detection)
frame = self.drawBoundingBox(frame_original, class_name, uid, x_min, y_min, x_max, y_max, detected)
self.frame_counter += 1
self.frame_update_signal.emit(frame, self.frame_counter)
else:
self.stop_counting = True
self.counting_timer.stop()
self.frame_counter = 0
self.process_done_signal.emit()
@Slot()
def stopCountingAnalysis(self):
self.stop_counting = True
@Slot()
def startCountingAnalysis(self):
self.counting_timer = QTimer()
self.counting_timer.timeout.connect(self.analyzeFrames)
if not self.validateInputFiles():
return
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
# tally total frame num in cahce data and video
if total_frames != self.cache_data.shape[0]:
self.error_signal.emit('Video and cache frame count does not match')
return
# reinitialize dict for counting
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
self.stop_counting = False
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
self.analyzeFrames()
def validateInputFiles(self) -> bool:
if self.cache_data is None:
self.error_signal.emit('Cache data not specified!')
return False
elif self.vid is None:
self.error_signal.emit('No input video specified')
return False
else:
return True
@Slot(int)
def previewFrame(self, frame_num):
if not self.validateInputFiles():
return
# go to specified frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# draw bb box
for detection in self.cache_data[frame_num]:
class_name = self.getClassName(str(detection[0]))
uid = detection[1]
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
frame = self.drawBoundingBox(frame, class_name, uid, x_min, y_min, x_max, y_max)
# draw counting annotation
# update frame signal
self.frame_update_signal.emit(frame, frame_num)
#==================== Inference Functions ========================
def stopInference(self):
self.stop_inference = True
@Slot()
def startInference(self):
if self.vid is None:
self.error_signal.emit('No input video specified')
return
self.stop_inference = False
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", self.max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load standard tensorflow saved model for YOLO and Deepsort
if self.sess is None:
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
self.sess = Session(config=config)
self.saved_model_loaded = tf.saved_model.load(weights_path)
self.infer = self.saved_model_loaded.signatures['serving_default']
self.encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# begin video capture
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
self.max_frame_update_signal.emit(total_frames)
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
# get video ready to save locally
# by default VideoCapture returns float instead of int
width = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(self.vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(self.output_video_path, codec, fps, (width, height))
# initialize buffer to store cache
cache = []
# buffer to track and count vehicles
cars = {}
trucks = {}
car_cnt = 0
truck_cnt = 0
frame_num = 0
# while video is running
while not self.stop_inference:
frame_data = np.zeros((MAX_DETECTION_NUM, 6), dtype=int)
return_value, frame = self.vid.read()
if return_value:
frame_original = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame_original, frame_original, mask=self.imgMask)
else:
print('Video has ended or failed, try a different video format!')
break
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
batch_data = tf.constant(image_data)
pred_bbox = self.infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=MAX_DETECTION_NUM,
max_total_size=MAX_DETECTION_NUM,
iou_threshold= self.iou_thresh,
score_threshold= self.score_thresh
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0] # first item, because batch size = 1
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['truck', 'car', 'bus']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = self.encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
obj_num = 0
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
x_min = int(bbox[0])
y_min = int(bbox[1])
x_max = int(bbox[2])
y_max = int(bbox[3])
id = int(track.track_id)
# add to hdf buffer
class_id = self.getClassId(class_name)
frame_data[obj_num] = [class_id, id, x_min, y_min, x_max, y_max]
# Count vehicles
detected = self.countVehicles(frame, frame_num, frame_data[obj_num])
# draw bbox on screen
frame = self.drawBoundingBox(frame_original, class_name, id, x_min, y_min, x_max, y_max, highlight=detected)
obj_num = obj_num + 1
result = np.asarray(frame)
result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
out.write(result)
cache.append(frame_data)
# update frame on UI
self.frame_update_signal.emit(frame, frame_num)
print('Frame #: ', frame_num)
frame_num = frame_num + 1
# Save cache file as hdf file
cache_data = h5py.File(self.output_data_path, 'w')
cache = np.asarray(cache, dtype=int)
cache_data.create_dataset('dataset_1', data=cache)
cache_data.close()
self.process_done_signal.emit()
#==================== Helper Functions ========================
def getVehicleImage(self, detection, frame) -> np.ndarray:
# xmin, ymin, xmax, ymax
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
width = x_max - x_min
height = y_max - y_min
img = frame[y_min:y_max, x_min:x_max]
return np.ascontiguousarray(img)
def getClassId(self, class_name:str) -> int:
id = class_id_map.get(class_name)
if id is None:
id = 0
return id
def getClassName(self, class_id:int) -> str:
name = class_id_map.get(class_id)
return name
def drawBoundingBox(self, frame:np.ndarray, class_name:str, id:int, x_min, y_min, x_max, y_max, highlight=False):
color = self.colors[id % len(self.colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, 2)
cv2.rectangle(frame, (x_min, y_min-30), (x_min+(len(class_name)+len(str(id)) )*17, y_min), color, -1)
cv2.putText(frame, class_name + "-" + str(id),(x_min, int(y_min-10)),0, 0.75, (255,255,255),2)
if highlight:
# highlight in green
frame[y_min:y_max, x_min:x_max, 0] = 0
frame[y_min:y_max, x_min:x_max, 2] = 0
return frame
|
StarcoderdataPython
|
4882133
|
<filename>tests/test_storage_interface.py
import pytest
from chaosplt_scheduling.storage.interface import BaseSchedulingService
def test_cannot_instanciate_scheduling_interface():
try:
BaseSchedulingService()
except TypeError as e:
return
else:
pytest.fail("BaseSchedulingService should remain abstract")
|
StarcoderdataPython
|
4912094
|
<reponame>Meemaw/Eulers-Project<gh_stars>0
coins = [1,2,5,10,20,50,100,200]
#Testing inner function memoization
def memoizator(f):
tabela = dict()
def inner(a,b):
h = (a, tuple(b))
if h in tabela:
return tabela[h]
else:
tabela[h] = f(a,b)
return tabela[h]
return inner
@memoizator
def vseMoznosti(vsota, coins):
if vsota == 0:
return 1
if vsota < 0:
return 0
if len(coins) == 0:
return 0
return vseMoznosti(vsota,coins[1:]) + vseMoznosti(vsota-coins[0], coins)
#Solution:
tabela = dict()
def navadno(vsota, coins):
h = (vsota, tuple(coins))
if h in tabela:
return tabela[h]
if vsota == 0:
return 1
if vsota < 0:
return 0
if len(coins) == 0:
return 0
tabela[h] = navadno(vsota, coins[1:]) + navadno(vsota-coins[0], coins)
return tabela[h]
print(vseMoznosti(200,coins))
|
StarcoderdataPython
|
5107135
|
<reponame>Santhu15rsk/C110-TA
import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics
import random
import pandas as pd
import csv
df = pd.read_csv("data.csv")
data = df["temp"].tolist()
def random_set_of_mean(counter):
dataset = []
for i in range(0, counter):
random_index= random.randint(0,len(data)-1)
value = data[random_index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
def show_fig(mean_list):
df = mean_list
mean = statistics.mean(mean_list)
def setup():
mean_list = []
for i in range(0,1000):
set_of_means= random_set_of_mean(100)
mean_list.append(set_of_means)
show_fig(mean_list)
mean = statistics.mean(mean_list)
setup()
# code to find the standard deviation of the sample data
def standard_deviation():
mean_list = [] #Empty List
for i in range(0,1000):
# Calling the function random_set_of_mean and the result is stored inside set_of_means
set_of_means= random_set_of_mean(100) #1000 values are stored inside set_of_means
mean_list.append(set_of_means)#adding 1000 values to list
std_deviation = statistics.stdev(mean_list)
print("Standard deviation of sampling distribution:- ", std_deviation)
standard_deviation() #Calling function
|
StarcoderdataPython
|
193970
|
<filename>EnvMS/tests.py
# -*- coding: utf-8 -*-
# 测试环境管理API
# Created: 2016-7-27
# Copyright: (c) 2016<<EMAIL>>
from django.test import TestCase
|
StarcoderdataPython
|
3314186
|
<reponame>GCerar/pysnesens
"""Python API for interaction with SNE-SENS-V1.1.0 sensor board.
Contains:
- (IC1) TMP75:
- (IC3) LPS331AP:
- (IC4) SHT21:
- (IC5) SI1143:
- (IC6) TCS3772:
- (IC7) ADMP521:
"""
from .sht21 import SHT21
from .lps331ap import LPS331AP
|
StarcoderdataPython
|
166597
|
<gh_stars>0
from scowclient import ScowClient
import json
def listProcs():
sclient = ScowClient()
jsonObj = sclient.get('processDefinitions')
prettyPrintJson(jsonObj['processDefinitions'][0:4])
def prettyPrintJson(obj):
print json.dumps(obj, sort_keys=True, indent=4)
def main():
listProcs()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
12820952
|
<filename>bgjobs/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-25 16:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [("projectroles", "0005_update_uuid")]
operations = [
migrations.CreateModel(
name="BackgroundJob",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"date_modified",
models.DateTimeField(auto_now=True, help_text="DateTime of last modification"),
),
(
"sodar_uuid",
models.UUIDField(
default=uuid.uuid4, help_text="BG Job SODAR UUID", unique=True
),
),
("job_type", models.CharField(help_text="Type of the job", max_length=512)),
("name", models.CharField(max_length=512)),
("description", models.TextField()),
(
"status",
models.CharField(
choices=[
("initial", "initial"),
("running", "running"),
("done", "done"),
("failed", "failed"),
],
default="initial",
max_length=50,
),
),
(
"project",
models.ForeignKey(
help_text="Project in which this objects belongs",
on_delete=django.db.models.deletion.CASCADE,
to="projectroles.Project",
),
),
],
)
]
|
StarcoderdataPython
|
6617866
|
<reponame>hadware/pyannote-audio
import numpy as np
import torch
from pyannote.audio.utils.permutation import permutate
def test_permutate_torch():
num_frames, num_speakers = 10, 3
actual_permutations = [
(0, 1, 2),
(0, 2, 1),
(1, 0, 2),
(1, 2, 0),
(2, 0, 1),
(2, 1, 0),
]
batch_size = len(actual_permutations)
y2 = torch.randn((num_frames, num_speakers))
y1 = torch.zeros((batch_size, num_frames, num_speakers))
for p, permutation in enumerate(actual_permutations):
y1[p] = y2[:, permutation]
permutated_y2, permutations = permutate(y1, y2)
assert actual_permutations == permutations
for p, permutation in enumerate(actual_permutations):
np.testing.assert_allclose(permutated_y2[p], y2[:, permutation])
def test_permutate_numpy():
num_frames, num_speakers = 10, 3
actual_permutations = [
(0, 1, 2),
(0, 2, 1),
(1, 0, 2),
(1, 2, 0),
(2, 0, 1),
(2, 1, 0),
]
batch_size = len(actual_permutations)
y2 = np.random.randn(num_frames, num_speakers)
y1 = np.zeros((batch_size, num_frames, num_speakers))
for p, permutation in enumerate(actual_permutations):
y1[p] = y2[:, permutation]
permutated_y2, permutations = permutate(y1, y2)
assert actual_permutations == permutations
for p, permutation in enumerate(actual_permutations):
np.testing.assert_allclose(permutated_y2[p], y2[:, permutation])
def test_permutate_less_speakers():
num_frames = 10
actual_permutations = [
(0, 1, None),
(0, None, 1),
(1, 0, None),
(1, None, 0),
(None, 0, 1),
(None, 1, 0),
]
batch_size = len(actual_permutations)
y2 = np.random.randn(num_frames, 2)
y1 = np.zeros((batch_size, num_frames, 3))
for p, permutation in enumerate(actual_permutations):
for i, j in enumerate(permutation):
if j is not None:
y1[p, :, i] = y2[:, j]
permutated_y2, permutations = permutate(y1, y2)
assert permutations == actual_permutations
def test_permutate_more_speakers():
num_frames = 10
actual_permutations = [
(0, 1),
(0, 2),
(1, 0),
(1, 2),
(2, 0),
(2, 1),
]
batch_size = len(actual_permutations)
y2 = np.random.randn(num_frames, 3)
y1 = np.zeros((batch_size, num_frames, 2))
for p, permutation in enumerate(actual_permutations):
for i, j in enumerate(permutation):
y1[p, :, i] = y2[:, j]
permutated_y2, permutations = permutate(y1, y2)
assert permutations == actual_permutations
np.testing.assert_allclose(permutated_y2, y1)
|
StarcoderdataPython
|
4849346
|
<reponame>Ventu012/P1_Facial_Keypoints
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
## self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)
self.conv1 = nn.Conv2d(1, 32, 5)
# output size = (W-F)/S +1 = (224-5)/1 +1 = 220
# the output Tensor for one image, will have the dimensions: (32, 220, 220)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.conv1.weight)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool1 = nn.MaxPool2d(2, 2)
# 220/2 = 110
# the output Tensor for one image, will have the dimensions: (32, 110, 110)
## self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)
self.conv2 = nn.Conv2d(32, 64, 3)
# output size = (W-F)/S +1 = (110-3)/1 + 1 = 108
# the output Tensor for one image, will have the dimensions: (64, 108, 108)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.conv2.weight)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool2 = nn.MaxPool2d(2, 2)
# 108/2 = 54
# the output Tensor for one image, will have the dimensions: (64, 54, 54)
## self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)
self.conv3 = nn.Conv2d(64, 128, 3)
# output size = (W-F)/S +1 = (54-3)/1 + 1 = 52
# the output Tensor for one image, will have the dimensions: (128, 52, 52)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.conv3.weight)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool3 = nn.MaxPool2d(2, 2)
# 52/2 = 26
# the output Tensor for one image, will have the dimensions: (128, 26, 26)
## self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)
self.conv4 = nn.Conv2d(128, 256, 3)
# output size = (W-F)/S +1 = (26-3)/1 + 1 = 24
# the output Tensor for one image, will have the dimensions: (256, 24, 24)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.conv4.weight)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool4 = nn.MaxPool2d(2, 2)
# 24/2 = 12
# the output Tensor for one image, will have the dimensions: (256, 12, 12)
## self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0)
self.conv5 = nn.Conv2d(256, 512, 3)
# output size = (W-F)/S +1 = (12-3)/1 + 1 = 10
# the output Tensor for one image, will have the dimensions: (512, 10, 10)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.conv5.weight)
# maxpool layer
# pool with kernel_size=2, stride=2
#self.pool5 = nn.MaxPool2d(2, 2)
# 10/2 = 5
# the output Tensor for one image, will have the dimensions: (512, 5, 5)
# Fully-connected (linear) layers
self.fc1 = nn.Linear(512*10*10, 1024)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.fc1.weight)
self.fc2 = nn.Linear(1024, 512)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.fc2.weight)
self.fc3 = nn.Linear(512, 68*2)
# Initialize the weights by performing Xavier initialization
nn.init.xavier_normal_(self.fc3.weight)
# Dropout
self.dropout = nn.Dropout(p=0.25)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
# 5 conv/relu + pool layers
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
# x = self.pool5(F.relu(self.conv5(x)))
x = F.relu(self.conv5(x))
# Prep for linear layer / Flatten
x = x.view(x.size(0), -1)
# linear layers with dropout in between
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
|
StarcoderdataPython
|
4822959
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
def track(df: pd.DataFrame,
curves: list = None,
lims: list = None,
clims: list = None,
dtick: bool = False,
scale: str ='linear',
curvetitle: str ='Track',
colormap: str='plasma',
ax=None,
fontsize=8,
correlation: pd.DataFrame = None,
grid_numbers : list = [11,51],
steps: list = None,
legend:bool = True,
grid:bool = True,
track_kw=[],
corr_kw={},
depth_ref:str='md',
):
"""track [summary]
Parameters
----------
df : pd.DataFrame
[description]
curves : list, optional
[description], by default None
lims : list, optional
[description], by default None
clims : list, optional
[description], by default None
dtick : bool, optional
[description], by default False
scale : str, optional
[description], by default 'linear'
curvetitle : str, optional
[description], by default 'Track'
colormap : str, optional
[description], by default 'plasma'
ax : [type], optional
[description], by default None
fontsize : int, optional
[description], by default 8
correlation : pd.DataFrame, optional
[description], by default None
grid_numbers : list, optional
[description], by default [11,51]
steps : list, optional
[description], by default None
legend : bool, optional
[description], by default True
grid : bool, optional
[description], by default True
track_kw : list, optional
[description], by default []
corr_kw : dict, optional
[description], by default {}
depth_ref : str, optional
[description], by default 'md'
"""
#get number of curves to build the colormap
n_curves = len(curves)
cmap = mpl.cm.get_cmap(colormap,n_curves)
tax=ax or plt.gca()
defkwa = {
'color': 'black',
'linestyle':'-',
'linewidth': 1
}
def_corr_kw = {
'color': 'red',
'linestyle':'--',
'linewidth': 2
}
for (k,v) in def_corr_kw.items():
if k not in corr_kw:
corr_kw[k]=v
depth = df.index if depth_ref=='md' else df[depth_ref]
#Plot main Lines
if curves is not None:
for i,g in enumerate(curves):
if len(track_kw)<i+1:
track_kw.append(defkwa)
track_kw[i]['color']=cmap(i)
for (k,v) in defkwa.items():
if k not in track_kw[i]:
track_kw[i][k]=v
scatter = track_kw[i].pop('scatter',False)
if scatter:
tax.scatter(df[g],depth,label=g,**track_kw[i])
else:
tax.plot(df[g],depth,label=g,**track_kw[i])
if lims==None: #Depth Limits
lims=[depth.min(),depth.max()]
tax.set_ylim([lims[1],lims[0]])
#Set the vertical grid spacing
if steps is None:
mayor_grid = np.linspace(lims[0],lims[1],grid_numbers[0])
minor_grid = np.linspace(lims[0],lims[1],grid_numbers[1])
else:
mayor_grid = np.arange(lims[0],lims[1],steps[0])
minor_grid = np.arange(lims[0],lims[1],steps[1])
tax.set_xlabel(curvetitle)
tax.set_xscale(scale)
if clims==None:
clims=[0,1]
tax.set_xlim(clims)
if scale=='log':
ticks=np.round(np.power(10,np.linspace(np.log10(clims[0]),np.log10(clims[1]),int(np.log10(clims[1]/clims[0])+1))),decimals=1)
else:
ticks = np.round(np.linspace(clims[0],clims[1],4),decimals=1)
tax.set_xticks(ticks)
tax.set_xticklabels(ticks)
tax.xaxis.tick_top()
tax.xaxis.set_label_position("top")
tax.tick_params("both",labelsize=fontsize)
tax.set_yticks(mayor_grid)
tax.set_yticks(minor_grid,minor=True)
if grid == True:
tax.grid(True,linewidth=1.0)
tax.grid(True,which='minor', linewidth=0.5)
if dtick==True:
tax.set_yticklabels(np.linspace(lims[0],lims[1],11))
else:
tax.set_yticklabels([])
if legend:
tax.legend()
#Add Correlation Line
if correlation is not None:
cor_ann = corr_kw.pop('ann',False)
for i in correlation.iterrows():
tax.hlines(i[1]['depth'],clims[0],clims[1], **corr_kw)
if cor_ann:
try:
tax.annotate(f"{i[1]['depth']} - {i[1]['comment']} ",xy=(clims[1]-3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',bbox={'boxstyle':'roundtooth', 'fc':'0.8'})
except:
tax.annotate(f"{i[1]['depth']}",xy=(clims[1]-3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',
bbox={'boxstyle':'roundtooth', 'fc':'0.8'})
|
StarcoderdataPython
|
286027
|
<reponame>samir-nasibli/sdc
import os
import unittest
from sdc.tests.tests_perf.test_perf_utils import *
class TestBase(unittest.TestCase):
iter_number = 5
results_class = TestResults
@classmethod
def create_test_results(cls):
drivers = []
if is_true(os.environ.get('SDC_TEST_PERF_EXCEL', True)):
drivers.append(ExcelResultsDriver('perf_results.xlsx'))
if is_true(os.environ.get('SDC_TEST_PERF_CSV', False)):
drivers.append(CSVResultsDriver('perf_results.csv'))
results = cls.results_class(drivers)
if is_true(os.environ.get('LOAD_PREV_RESULTS')):
results.load()
return results
@classmethod
def setUpClass(cls):
cls.test_results = cls.create_test_results()
cls.total_data_length = []
cls.num_threads = int(os.environ.get('NUMBA_NUM_THREADS', config.NUMBA_NUM_THREADS))
cls.threading_layer = os.environ.get('NUMBA_THREADING_LAYER', config.THREADING_LAYER)
@classmethod
def tearDownClass(cls):
cls.test_results.print()
cls.test_results.dump()
|
StarcoderdataPython
|
12804188
|
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
from config import *
def Querry(sql):
db = MySQLdb.connect(user=USERDATABASE, passwd=<PASSWORD>ABASE, host="localhost", db=DATABASE)
cursor, sql = db.cursor(), str(sql)
cursor.execute(sql)
db.commit()
data = cursor.fetchall()
db.close()
return data
|
StarcoderdataPython
|
4825894
|
<filename>raven/__init__.py
"""
raven
~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import os.path
__all__ = ('VERSION', 'Client', 'get_version')
VERSION = '6.1.0.dev0'
def _get_git_revision(path):
revision_file = os.path.join(path, 'refs', 'heads', 'master')
if not os.path.exists(revision_file):
return None
fh = open(revision_file, 'r')
try:
return fh.read().strip()[:7]
finally:
fh.close()
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, os.pardir, os.pardir))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None
def get_version():
base = VERSION
if __build__:
base = '%s (%s)' % (base, __build__)
return base
__build__ = get_revision()
__docformat__ = 'restructuredtext en'
# Declare child imports last to prevent recursion
from raven.base import * # NOQA
from raven.conf import * # NOQA
from raven.versioning import * # NOQA
|
StarcoderdataPython
|
3356734
|
<reponame>jacobic/redmapper
#!/usr/bin/env python
import os
import sys
import subprocess
import multiprocessing
import argparse
parser = argparse.ArgumentParser(description='Run multiple redmapper pixels on the same node')
parser.add_argument('-c', '--command', action='store', type=str, required=True, help='Command to run')
parser.add_argument('-P', '--pixels', action='store', type=str, required=True, help='Comma-separated list of pixels')
args = parser.parse_args()
pixels = args.pixels.split(',')
class RunCommand(object):
def __init__(self, command):
self.command = command
def __call__(self, pixel):
full_command = self.command + ' -p ' + pixel
print(full_command)
subprocess.call(full_command, shell=True)
runCommand = RunCommand(args.command)
pool = multiprocessing.Pool(processes=len(pixels))
results = []
for pixel in pixels:
results.append(pool.apply_async(runCommand, (pixel, )))
pool.close()
pool.join()
for res in results:
res.get()
|
StarcoderdataPython
|
3262952
|
from prometheus_client.registry import CollectorRegistry
from app.prom.metrics.general.io_stall import IOStall, NAME, READ, WRITE, STALL, QUEUED_READ, QUEUED_WRITE
def test_should_collect():
test_data_1 = {NAME: 'test_1', READ: 300, WRITE: 100, STALL: 500, QUEUED_READ: 100, QUEUED_WRITE: 100}
test_data_2 = {NAME: 'test_2', READ: 3, WRITE: 1, STALL: 5, QUEUED_READ: 1, QUEUED_WRITE: 1}
io_stall = IOStall(CollectorRegistry())
io_stall.collect(rows=(_ for _ in [test_data_1, test_data_2]))
samples = next(iter(io_stall.metric.collect())).samples
iter_samples = iter(samples)
assert_sample_metric(iter_samples, test_data_1, READ)
assert_sample_metric(iter_samples, test_data_1, WRITE)
assert_sample_metric(iter_samples, test_data_1, QUEUED_READ)
assert_sample_metric(iter_samples, test_data_1, QUEUED_WRITE)
assert_sample_metric(iter_samples, test_data_2, READ)
assert_sample_metric(iter_samples, test_data_2, WRITE)
assert_sample_metric(iter_samples, test_data_2, QUEUED_READ)
assert_sample_metric(iter_samples, test_data_2, QUEUED_WRITE)
samples = next(iter(io_stall.metric_total.collect())).samples
iter_samples = iter(samples)
assert_sample_metric_total(iter_samples, test_data_1)
assert_sample_metric_total(iter_samples, test_data_2)
def assert_sample_metric(iter_samples, test_data, stall_type):
sample = next(iter_samples)
assert test_data[stall_type] == sample.value
assert test_data[NAME] == sample.labels['database']
def assert_sample_metric_total(iter_samples, test_data_1):
sample = next(iter_samples)
assert test_data_1[STALL] == sample.value
assert test_data_1[NAME] == sample.labels['database']
|
StarcoderdataPython
|
1600528
|
from reliapy._messages import *
from scipy.stats import norm
import numpy as np
from reliapy.math import spectral_decomposition, cholesky_decomposition
class Random:
"""
``Random`` simple random sampling.
**Input:**
* **distribution_obj** (`object`)
Object of ``JointDistribution``.
**Attributes:**
* **distribution_obj** (`object`)
Object of ``JointDistribution``.
* **marginal** (`list`)
A list of objects of marginal distribution.
* **correlation** (`ndarray`)
Correlation matrix.
* **nrv** (`int`)
Number of random variables.
* **random_state** (`float`, `int`)
Random seed.
* **decomposition** (`str`)
Decomposition of the correlation method: `spectral` or `cholesky`.
* **mean** (`ndarray`)
Array of means.
* **std** (`ndarray`)
Array of standard deviations.
"""
def __init__(self, distribution_obj=None):
if not isinstance(distribution_obj.marginal, list):
type_error('distributions', 'list')
self.distribution_obj = distribution_obj
self.marginal = distribution_obj.marginal
self.Cz = distribution_obj.Cz
self.nrv = len(distribution_obj.marginal)
self.random_state = distribution_obj.random_state
self.decomposition = distribution_obj.decomposition
mean = []
std = []
for i in range(self.nrv):
m = distribution_obj.marginal[i].stats[0]
s = np.sqrt(distribution_obj.marginal[i].stats[1])
mean.append(m)
std.append(s)
self.mean = np.array(mean)
self.std = np.array(std)
def rvs(self, n_sim=1):
"""
Get random samples from the joint PDF using the simple sampling.
**Input:**
* **n_sim** (`float`)
Number of samples.
**Output:**
* **x** (`ndarray`)
Random samples.
"""
if self.decomposition == 'spectral':
_, Jzy = spectral_decomposition(self.Cz)
elif self.decomposition == 'cholesky':
_, Jzy = cholesky_decomposition(self.Cz)
else:
not_implemented_error()
y = norm.rvs(loc=0, scale=1, size=(self.nrv, n_sim), random_state=self.random_state)
z = Jzy @ y
x = []
for i in range(n_sim):
u = norm.cdf(z[:, i], loc=0, scale=1)
xj = []
for j in range(self.nrv):
x_ = self.marginal[j].icdf(u[j])
xj.append(x_)
x.append(xj)
x = np.array(x)
return x
|
StarcoderdataPython
|
5070477
|
<reponame>Christovis/wys-ars<filename>src/astrild/rays/voids/tunnels/gadget.py
import os.path
import numpy as np
from astrild.rays.voids.tunnels.miscellaneous import (
throwError,
throwWarning,
charToString,
)
GadgetFileType = {1: "Gadget1", 2: "Gadget2", 3: "HDF5", -1: "Unknown"}
class GadgetParticles:
""" Class used for storing and accessing the data stored in a Gadget file. It keeps track of the properties of Dark Matter particles: position, velocity, identity and mass. """
def __init__(self):
self.noParticles = np.uint64(0)
self.hasHeader = False
self.hasPos = False
self.hasVel = False
self.hasIds = False
self.hasMass = False
def AddHeader(self, Header):
self.header = Header
self.hasHeader = True
def AddPos(self, positions):
self.pos = positions
self.pos.shape = (-1, 3)
self.hasPos = True
self.UpdateNoParticles(self.pos.size / 3, "positions")
def AddVel(self, velocities):
self.vel = velocities
self.vel.shape = (-1, 3)
self.hasVel = True
self.UpdateNoParticles(self.vel.size / 3, "velocities")
def AddIds(self, ids):
self.ids = ids
self.ids.shape = -1
self.hasIds = True
self.UpdateNoParticles(self.ids.size, "identities")
def AddMass(self, masses):
self.mass = masses
if self.mass is not None:
self.mass.shape = -1
self.hasMass = True
self.UpdateNoParticles(self.mass.size, "masses")
def Header(self):
if self.hasHeader:
return self.header
else:
throwError(
"No header was added to class 'GadgetParticles' via the function 'AddHeader'. Hence you cannot use the function 'Header' to access the Gadget header."
)
def Pos(self):
if self.hasPos:
return self.pos
else:
throwError(
"No position array was added to class 'GadgetParticles' via the function 'AddPos'. Hence you cannot use the function 'Pos' to access the position array."
)
def Vel(self):
if self.hasVel:
return self.vel
else:
throwError(
"No velocity array was added to class 'GadgetParticles' via the function 'AddVel'. Hence you cannot use the function 'Vel' to access the velocities array."
)
def Ids(self):
if self.hasIds:
return self.ids
else:
throwError(
"No particle identities array was added to class 'GadgetParticles' via the function 'AddIds'. Hence you cannot use the function 'Ids' to access the position array."
)
def Mass(self):
if self.hasMass:
return self.mass
elif self.hasHeader and self.header.mass[1] != 0.0:
return np.array([self.header.mass[1]], "f4")
else:
throwError(
"No particle mass array was added to class 'GadgetParticles' via the function 'AddMass'. Hence you cannot use the function 'Mass' to access the position array."
)
def VariableMass(self):
if self.hasMass:
return True
else:
return False
def Update(self, selection):
"""Update the particle information using only the particles selected via 'selection'"""
if selection.size != self.noParticles:
throwError(
"In function 'GadgetParticles::Update'. You are trying to update the particle selection using a numpy array that has a different size than the number of particles in class 'GadgetParticles'. Number of particles = %i while the selection array has length = %i."
% (self.noParticles, selection.size)
)
self.noParticles = np.sum(selection)
if self.hasHeader:
self.header.npart[1] = self.noParticles
self.header.npartTotal[1] = self.noParticles
self.pos = self.pos[selection, :]
self.vel = self.vel[selection, :]
self.ids = self.ids[selection]
if self.VariableMass():
self.mass = self.mass[selection]
def UpdateNoParticles(self, numberPart, arrayName):
if numberPart != self.noParticles and self.noParticles != 0:
throwWarning(
"The Gadget particle array '%s' has a different number of particles than the previous array/arrays."
% arrayName
)
if self.noParticles == 0:
self.noParticles = numberPart
def CheckDataCompletness(
self, HEADER=True, POS=True, VEL=True, ID=True, MASS=True
):
completness = True
if HEADER and not self.hasHeader:
throwWarning(
"'GadgetParticles' completness check failled: Gadget header is missing. The Gadget header must be added to the class using the function 'AddHeader(header)'."
)
completness = False
if POS and not self.hasPos:
throwWarning(
"'GadgetParticles' completness check failled: Gadget particle position array is missing. The position array must be added to the class using the function 'AddPos(position)'."
)
completness = False
if VEL and not self.hasVel:
throwWarning(
"'GadgetParticles' completness check failled: Gadget particle velocity array is missing. The velocity array must be added to the class using the function 'AddVel(velocity)'."
)
completness = False
if ID and not self.hasIds:
throwWarning(
"'GadgetParticles' completness check failled: Gadget particle id array is missing. The id array must be added to the class using the function 'AddIds(ids)'."
)
completness = False
if MASS and not self.hasMass and self.Header().mass[1] == 0.0:
throwWarning(
"'GadgetParticles' completness check failled: Gadget particle mass array is missing (and there is no particle mass in the Gadget header). The masses array must be added to the class using the function 'AddMass(masses)'."
)
completness = False
return completness
def SameNumberOfParticles(self, POS=True, VEL=True, ID=True, MASS=True):
noParticles = 0
sameNumber = True
self.CheckDataCompletness(
HEADER=False, POS=POS, VEL=VEL, ID=ID, MASS=MASS
)
if POS and self.hasPos:
if noParticles == 0:
noParticles = self.Pos().size / 3
elif noParticles != self.Pos().size / 3:
sameNumber = False
if VEL and self.hasVel:
if noParticles == 0:
noParticles = self.Vel().size / 3
elif noParticles != self.Vel().size / 3:
sameNumber = False
if ID and self.hasIds:
if noParticles == 0:
noParticles = self.Ids().size
elif noParticles != self.Ids().size:
sameNumber = False
if MASS and self.hasMass:
if noParticles == 0:
noParticles = self.Mass().size
elif noParticles != self.Mass().size:
sameNumber = False
return (noParticles, sameNumber)
class GadgetHeader:
""" A class used for reading and storing the header of a Gadget snapshot file. It uses the numpy class to define the variables. """
headerSize = 256
fillSize = headerSize - 20 * 8 - 12 * 4
def __init__(self):
self.npart = np.zeros(6, dtype=np.uint32)
self.mass = np.zeros(6, dtype=np.float64)
self.time = np.float64(0.0)
self.redshift = np.float64(0.0)
self.flag_sfr = np.int32(0)
self.flag_feedback = np.int32(0)
self.npartTotal = np.zeros(6, dtype=np.uint32)
self.flag_cooling = np.int32(0)
self.num_files = np.int32(1)
self.BoxSize = np.float64(0.0)
self.Omega0 = np.float64(0.0)
self.OmegaLambda = np.float64(0.0)
self.HubbleParam = np.float64(0.0)
self.flag_stellarage = np.int32(0)
self.flag_metals = np.int32(0)
self.num_total_particles_hw = np.zeros(6, dtype=np.uint32)
self.flag_entropy_instead_u = np.int32(0)
self.flag_doubleprecision = np.int32(0)
self.flag_ic_info = np.int32(0)
self.lpt_scalingfactor = np.float32(0.0)
self.fill = np.zeros(GadgetHeader.fillSize, dtype="c")
def SetType(self):
self.time = np.float64(self.time)
self.redshift = np.float64(self.redshift)
self.flag_sfr = np.int32(self.flag_sfr)
self.flag_feedback = np.int32(self.flag_feedback)
self.flag_cooling = np.int32(self.flag_cooling)
self.num_files = np.int32(self.num_files)
self.BoxSize = np.float64(self.BoxSize)
self.Omega0 = np.float64(self.Omega0)
self.OmegaLambda = np.float64(self.OmegaLambda)
self.HubbleParam = np.float64(self.HubbleParam)
self.flag_stellarage = np.int32(self.flag_stellarage)
self.flag_metals = np.int32(self.flag_metals)
self.flag_entropy_instead_u = np.int32(self.flag_entropy_instead_u)
self.flag_doubleprecision = np.int32(self.flag_doubleprecision)
self.flag_ic_info = np.int32(self.flag_ic_info)
self.lpt_scalingfactor = np.float32(self.lpt_scalingfactor)
def nbytes(self):
__size = (
self.npart.nbytes
+ self.mass.nbytes
+ self.time.nbytes
+ self.redshift.nbytes
+ self.flag_sfr.nbytes
+ self.flag_feedback.nbytes
+ self.npartTotal.nbytes
+ self.flag_cooling.nbytes
+ self.num_files.nbytes
+ self.BoxSize.nbytes
+ self.Omega0.nbytes
+ self.OmegaLambda.nbytes
+ self.HubbleParam.nbytes
+ self.flag_stellarage.nbytes
+ self.flag_metals.nbytes
+ self.num_total_particles_hw.nbytes
+ self.flag_entropy_instead_u.nbytes
+ self.flag_doubleprecision.nbytes
+ self.flag_ic_info.nbytes
+ self.lpt_scalingfactor.nbytes
+ self.fill.nbytes
)
return __size
def dtype(self):
__dt = np.dtype(
[
("npart", np.uint32, 6),
("mass", np.float64, 6),
("time", np.float64),
("redshift", np.float64),
("flag_sfr", np.int32),
("flag_feedback", np.int32),
("npartTotal", np.uint32, 6),
("flag_cooling", np.int32),
("num_files", np.int32),
("BoxSize", np.float64),
("Omega0", np.float64),
("OmegaLambda", np.float64),
("HubbleParam", np.float64),
("flag_stellarage", np.int32),
("flag_metals", np.int32),
("num_total_particles_hw", np.uint32, 6),
("flag_entropy_instead_u", np.int32),
("flag_doubleprecision", np.int32),
("flag_ic_info", np.int32),
("lpt_scalingfactor", np.float32),
("fill", "c", GadgetHeader.fillSize),
]
)
return __dt
def TupleAsString(self):
return "( self.npart, self.mass, self.time, self.redshift, self.flag_sfr, self.flag_feedback, self.npartTotal, self.flag_cooling, self.num_files, self.BoxSize, self.Omega0, self.OmegaLambda, self.HubbleParam, self.flag_stellarage, self.flag_metals, self.num_total_particles_hw, self.flag_entropy_instead_u, self.flag_doubleprecision, self.flag_ic_info, self.lpt_scalingfactor, self.fill )"
def Tuple(self):
return eval(self.TupleAsString())
def fromfile(
self, f, BUFFER=True, bufferType=np.dtype("i4"), switchEndian=False
):
if BUFFER:
__buffer1 = np.fromfile(f, bufferType, 1)[0]
A = np.fromfile(f, self.dtype(), 1)[0]
if switchEndian:
for i in range(len(A)):
if A[i].ndim >= 1:
A[i][:] = A[i].byteswap()
else:
A[i] = A[i].byteswap()
if BUFFER:
__buffer2 = np.fromfile(f, bufferType, 1)[0]
if switchEndian:
__buffer1 = __buffer1.byteswap()
__buffer2 = __buffer2.byteswap()
if __buffer1 != __buffer2 or __buffer1 != GadgetHeader.headerSize:
throwError(
"Error reading the header of the Gadget file. 'buffer1'=%i while 'buffer2'=%i when both should be %i."
% (__buffer1, __buffer2, GadgetHeader.headerSize)
)
exec("%s = A" % self.TupleAsString())
def tofile(self, f, BUFFER=True, bufferType=np.dtype("i4")):
self.SetType()
__A = np.array([self.Tuple()], dtype=self.dtype())
__buffer = np.array([__A.nbytes], dtype=bufferType)
if __A.nbytes != GadgetHeader.headerSize:
throwError(
"When writing the Gadget snapshot header to file. The header size is %i which is different from the expected size of %i."
% (__A.nbytes, GadgetHeader.headerSize)
)
if BUFFER:
__buffer.tofile(f)
__A.tofile(f)
if BUFFER:
__buffer.tofile(f)
def PrintValues(self):
print("The values contained in the Gadget header:")
print(" npart = ", self.npart)
print(" mass = ", self.mass)
print(" time = ", self.time)
print(" redshift = ", self.redshift)
print(" npartTotal = ", self.npartTotal)
print(" num_files = ", self.num_files)
print(" BoxSize = ", self.BoxSize)
print(" Omega0 = ", self.Omega0)
print(" OmegaLambda = ", self.OmegaLambda)
print(" HubbleParam = ", self.HubbleParam)
print(" fill = ", charToString(self.fill))
print()
def Description(self):
__description = [
(
"npartTotal",
"=%s - the total number of particles in the given Gadget snapshot"
% " ".join(str(self.npartTotal).split()),
),
(
"mass",
"=%s - the mass of each particle in the Gadget snapshot (10^10 M_0/h)"
% " ".join(str(self.mass).split()),
),
("time", "=%f - the scaling factor of the snapshot" % self.time),
("redshift", "=%f - the redshift of the snapshot" % self.redshift),
(
"BoxSize",
"=%f - the size of the simulation box in comoving corodinates (kpc/h)"
% self.BoxSize,
),
("Omega0", "=%f - the matter density" % self.Omega0),
(
"OmegaLambda",
"=%f - the Lambda energy density" % self.OmegaLambda,
),
(
"HubbleParam",
"=%f - Hubble parameter 'h' where the Hubble constant H=100 km/s h"
% self.HubbleParam,
),
(
"gadgetFill",
"='%s' - additional information" % charToString(self.fill),
),
]
return __description
def AddProgramCommands(self, commands):
"""Adds the program options used to obtain the current results to the 'fill' array in the header."""
newCommands = self.fill.tostring().rstrip("\x00") + commands + " ; "
choice = int(len(newCommands) < GadgetHeader.fillSize)
newLen = [GadgetHeader.fillSize, len(newCommands)][choice]
newOff = [len(newCommands) - GadgetHeader.fillSize, 0][choice]
self.fill[:newLen] = newCommands[newOff:newLen]
def getGadgetFileType(fileName, bufferType=np.dtype("i4")):
"""Retuns the type of gadget file and other properties related to the data format."""
gadgetFileType = GadgetFileType[1]
switchEndian = False
# read the 1st 4 bites and determine the type of the gadget file
entry = np.fromfile(fileName, bufferType, 1)
if entry[0] == 8:
gadgetFileType = GadgetFileType[2]
elif entry[0] != 256: # try to switch endian type
entry2 = entry.byteswap()[0]
switchEndian = True
if entry2 == 8:
gadgetFileType = GadgetFileType[2]
elif entry2 != 256: # test for HDF5 file
gadgetFileType = GadgetFileType[-1]
offsetSize = 0
if gadgetFileType == GadgetFileType[2]:
offsetSize = 16 # jump 16 bytes when reading Gadget2 snapshot files
return gadgetFileType, switchEndian, offsetSize
def gadgetMultipleFiles(rootName, fileIndex):
""" Returns the name of gadget file 'fileIndex' when a snapshot is saved in multiple binary files.
It takes 2 arguments: root name of the files and the file number whose name is requested (from 0 to GadgetHeader.num_files-1)."""
return rootName + "%i" % fileIndex
def readArrayEntries(array, file, startPosition, noEntries):
"""Reads from file 'file' 'noEntries' entries into the array 'array'. The entries are written starting at position 'startPosition' as in a flattened array."""
maxChunckOfData = 1024 ** 3
noChuncks = noEntries / maxChunckOfData
for i in range(noChuncks):
array[startPosition : startPosition + maxChunckOfData] = np.fromfile(
file, array.dtype, maxChunckOfData
)
startPosition += maxChunckOfData
noLeftOver = noEntries - maxChunckOfData * noChuncks
array[startPosition : startPosition + noLeftOver] = np.fromfile(
file, array.dtype, noLeftOver
)
return array
def gadgetTotalParticleCount(fileRoot, noFiles, VERBOSE=True):
"""Computes the total number of particles found in the file."""
output = np.zeros(6, np.int64)
for i in range(noFiles):
h = readGadgetHeader(fileRoot, INDEX=i, VERBOSE=False)
output[:] += h.npart[:]
if VERBOSE:
print("Total number of particles: ", output)
return output
def gadgetDataType(
file,
noParticles,
hasMass,
offsetSize,
switchEndian,
bufferType=np.int32,
VERBOSE=True,
):
"""Finds what is the type of the gadget data."""
floatTypes = {4: np.float32, 8: np.float64}
intTypes = {4: np.int32, 8: np.int64}
f = open(file, "rb")
# read header
f.seek(offsetSize, 1)
buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(buffer1 + buffer1.nbytes, 1)
# read positions block
f.seek(offsetSize, 1)
buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(buffer1 + buffer1.nbytes, 1)
posSize = buffer1 / noParticles / 3
posType = floatTypes[posSize]
# read velocity block
f.seek(offsetSize, 1)
buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(buffer1 + buffer1.nbytes, 1)
velSize = buffer1 / noParticles / 3
velType = floatTypes[velSize]
# read id block
f.seek(offsetSize, 1)
buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(buffer1 + buffer1.nbytes, 1)
idSize = buffer1 / noParticles
idType = intTypes[idSize]
# read mass block
massSize = 4
if hasMass:
f.seek(offsetSize, 1)
buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(buffer1 + buffer1.nbytes, 1)
massSize = buffer1 / noParticles
massType = floatTypes[massSize]
f.close()
if VERBOSE:
print(
"Gadget data types: pos <-> ",
posType,
" vel <-> ",
velType,
" id <-> ",
idType,
# end=" ",
)
if hasMass:
print(" mass <-> ", massType)
else:
print("")
return (
posType,
posSize,
velType,
velSize,
idType,
idSize,
massType,
massSize,
)
def readGadgetHeader(file, INDEX=0, VERBOSE=True):
""" Reads only the Gadget header from the given Gadget file. It returns the results as the class 'GadgetHeader'.
Takes as argument the name (or root anme) of the file from where to read the header.
Can use VERBOSE = False to turn off the message."""
header = GadgetHeader()
tempName = file
if not os.path.isfile(tempName):
tempName = gadgetMultipleFiles(file, INDEX)
if not os.path.isfile(tempName):
throwError(
"Cannot find the gadget snapshot file. There are no '%s' or '%s' files."
% (file, tempName)
)
if VERBOSE:
print("Reading the header of the Gadget file '%s' ... " % tempName)
gadgetFileType, switchEndian, offsetSize = getGadgetFileType(tempName)
f = open(tempName, "rb")
f.seek(offsetSize, 1)
header.fromfile(f, switchEndian=switchEndian)
f.close()
return header
def readGadgetData(
file,
HEADER=True,
POS=True,
VEL=True,
ID=True,
MASS=True,
VERBOSE=True,
NO_FILES=-1,
):
""" Reads the header and data in a Gadget snapshot (single or multiple files).
It returns the class 'GadgetParticles' which keeps track of the gadget header (class 'GadgetHeader' - if HEADER=True) and the data in numpy array/arrays (depending on what data to read).
Can choose what options to choose via the boolean parameters (default ALL True): HEADER = True/False; POS; VEL; ID; MASS;
Can use VERBOSE = False to turn off the messages."""
if VERBOSE:
print(
"\nReading the data in the Gadget snapshot file/s '%s' ... "
% (file)
)
print("This functions reads only particle type 1 data!")
# Gadget snapshot file variables
NO_DIM = 3 # the number of spatial dimensions
bufferType = np.dtype("i4") # the buffer type before each block of data
bufferSize = np.array(0, bufferType).nbytes # the buffer size
# read the header to find in how many files the snapshot is
tempName = file
if not os.path.isfile(tempName):
tempName = gadgetMultipleFiles(file, 0)
if not os.path.isfile(tempName):
throwError(
"Cannot find the gadget snapshot file. There are no '%s' or '%s' files."
% (file, tempName)
)
gadgetFileType, switchEndian, offsetSize = getGadgetFileType(tempName)
firstFile = tempName
header = readGadgetHeader(file, VERBOSE=False)
if NO_FILES >= 1:
header.num_files = NO_FILES
if header.num_files > 1:
for i in range(header.num_files):
tempName = gadgetMultipleFiles(file, i)
if not os.path.isfile(tempName):
throwError(
"Cannot find the gadget snapshot file number %i with expected name '%s'."
% (i, tempName)
)
# variables where to store the data
noTotParticles = gadgetTotalParticleCount(
file, header.num_files, VERBOSE=VERBOSE
)[1]
hasMass = False
if header.mass[1] == np.float64(0.0):
hasMass = True
(
posType,
posSize,
velType,
velSize,
idType,
idSize,
massType,
massSize,
) = gadgetDataType(
firstFile,
header.npart.sum(),
hasMass,
offsetSize,
switchEndian,
bufferType=bufferType,
VERBOSE=VERBOSE,
)
positions, velocities, ids, masses = None, None, None, None
if POS:
positions = np.empty(NO_DIM * noTotParticles, posType)
if VEL:
velocities = np.empty(NO_DIM * noTotParticles, velType)
if ID:
ids = np.empty(noTotParticles, idType)
if MASS and hasMass:
masses = np.empty(noTotParticles, massType)
# read the data from each file
startPosition = 0
for i in range(header.num_files):
tempName = file
if not os.path.isfile(tempName):
tempName = gadgetMultipleFiles(file, i)
if not os.path.isfile(tempName):
throwError(
"Cannot find the gadget snapshot file number %i with expected name '%s'."
% (i, tempName)
)
if VERBOSE:
print(
"Reading the data in the Gadget file '%s' which is file %i of %i files ... "
% (tempName, i + 1, header.num_files)
)
# read the header
f = open(tempName, "rb")
tempHeader = GadgetHeader()
f.seek(offsetSize, 1)
tempHeader.fromfile(
f, BUFFER=True, bufferType=bufferType, switchEndian=switchEndian
)
dataSize = tempHeader.npart[1]
skipBefore, skipAfter = tempHeader.npart[0], tempHeader.npart[2:6].sum()
# read the positions
f.seek(offsetSize, 1)
if POS:
__buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(skipBefore * posSize * 3, 1)
positions = readArrayEntries(
positions, f, NO_DIM * startPosition, NO_DIM * dataSize
)
f.seek(skipAfter * posSize * 3, 1)
__buffer2 = np.fromfile(f, bufferType, 1)[0]
if __buffer1 != __buffer2:
throwError(
"While reading the position block in file '%s'. The buffers before (value=%i) and after (value=%i) the data block do not agree."
% (tempName, __buffer1, __buffer2)
)
else:
f.seek(NO_DIM * posSize * dataSize + 2 * bufferSize, 1)
# read the velocities
f.seek(offsetSize, 1)
if VEL:
__buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(skipBefore * velSize * 3, 1)
velocities = readArrayEntries(
velocities, f, NO_DIM * startPosition, NO_DIM * dataSize
)
f.seek(skipAfter * velSize * 3, 1)
__buffer2 = np.fromfile(f, bufferType, 1)[0]
if __buffer1 != __buffer2:
throwError(
"While reading the velocities block in file '%s'. The buffers before (value=%i) and after (value=%i) the data block do not agree."
% (tempName, __buffer1, __buffer2)
)
else:
f.seek(NO_DIM * posSize * dataSize + 2 * bufferSize, 1)
# read the identities
f.seek(offsetSize, 1)
if ID:
__buffer1 = np.fromfile(f, bufferType, 1)[0]
f.seek(skipBefore * idSize, 1)
ids = readArrayEntries(ids, f, startPosition, dataSize)
f.seek(skipAfter * idSize, 1)
__buffer2 = np.fromfile(f, bufferType, 1)[0]
if __buffer1 != __buffer2:
throwError(
"While reading the identities block in file '%s'. The buffers before (value=%i) and after (value=%i) the data block do not agree."
% (tempName, __buffer1, __buffer2)
)
else:
f.seek(idSize * dataSize + 2 * bufferSize, 1)
# read the masses if any
if MASS and tempHeader.mass[1] == np.float64(0.0):
s = tempHeader.mass == np.float64(0.0)
skipBefore = s[0] * skipBefore
skipAfter = (s[2:6] * tempHeader.npart[2:6]).sum()
f.seek(offsetSize, 1)
f.seek(skipBefore * massSize, 1)
__buffer1 = np.fromfile(f, bufferType, 1)[0]
masses = readArrayEntries(masses, f, startPosition, dataSize)
f.seek(skipAfter * massSize, 1)
__buffer2 = np.fromfile(f, bufferType, 1)[0]
if __buffer1 != __buffer2:
throwError(
"While reading the masses block in file '%s'. The buffers before (value=%i) and after (value=%i) the data block do not agree."
% (tempName, __buffer1, __buffer2)
)
f.close()
startPosition += dataSize
# output the results
gadgetParticles = GadgetParticles()
if HEADER:
gadgetParticles.AddHeader(header)
if POS:
gadgetParticles.AddPos(positions)
if VEL:
gadgetParticles.AddVel(velocities)
if ID:
gadgetParticles.AddIds(ids)
if MASS:
gadgetParticles.AddMass(masses)
return gadgetParticles
def writeGadgetData(file, gadgetParticles, VERBOSE=True):
""" Writes a single Gadget file.
It takes two arguments: the name of the output file and the data to be written as the class 'GadgetParticles'. """
bufferType = np.dtype("i4") # the buffer type before each block of data
# do some error checking
if VERBOSE:
print("Writing the gadget data to the file '%s' ... " % file)
if not gadgetParticles.CheckDataCompletness():
throwError(
"Cannot continue writing the Gadget snapshot since the data failled the completness test."
)
(noParticles, sameSize) = gadgetParticles.SameNumberOfParticles()
if not sameSize:
throwError(
"Cannot continue writing the Gadget snapshot since the not all the Gadget particle data arrays have the same (or expected) size."
)
if gadgetParticles.Header().npartTotal[1] != noParticles:
throwError(
"Cannot continue writing the Gadget snapshot since the length of the Gadget particle data does is not the same as the expected nuber of particles from the Gadget header. Length of Gadget particle data is %i versus expected size %i (from the header)."
% (noParticles, gadgetParticles.Header().npartTotal[1])
)
gadgetParticles.Header().num_files = np.int32(1)
# write the header to the file
f = open(file, "wb")
gadgetParticles.Header().tofile(f)
# write the data to the file
pos = gadgetParticles.Pos().reshape(-1)
pos.shape = -1
__buffer = np.zeros(1, dtype=bufferType)
__buffer[0] = pos.size * pos[0].nbytes
__buffer.tofile(f)
pos.tofile(f)
__buffer.tofile(f)
pos = gadgetParticles.Vel().reshape(-1)
__buffer[0] = pos.size * pos[0].nbytes
__buffer.tofile(f)
pos.tofile(f)
__buffer.tofile(f)
pos = gadgetParticles.Ids().reshape(-1)
__buffer[0] = pos.size * pos[0].nbytes
__buffer.tofile(f)
pos.tofile(f)
__buffer.tofile(f)
if gadgetParticles.hasMass:
pos = gadgetParticles.Mass().reshape(-1)
__buffer[0] = pos.size * pos[0].nbytes
__buffer.tofile(f)
pos.tofile(f)
__buffer.tofile(f)
f.close()
if VERBOSE:
print("Done.")
def gadgetCombine(p1, p2):
"""Combines two GadgetParticle classes into a single one. """
nTotal = p1.noParticles + p2.noParticles
res = GadgetParticles()
res.AddHeader(p1.header)
res.AddPos(np.append(p1.pos, p2.pos))
res.AddVel(np.append(p1.vel, p2.vel))
res.AddIds(np.append(p1.ids, p2.ids))
if p1.VariableMass():
res.AddMass(np.append(p1.mass, p2.mass))
res.header.npart[1] = nTotal
res.header.npartTotal[1] = nTotal
return res
def boxFullyContained(mainBox, smallBox):
"""Checks if the 'smallBox' is fully contained inside 'mainBox'."""
for i in range(3):
if not (
smallBox[2 * i] >= mainBox[2 * i]
and smallBox[2 * i] <= mainBox[2 * i + 1]
):
return False
if not (
smallBox[2 * i + 1] >= mainBox[2 * i]
and smallBox[2 * i + 1] <= mainBox[2 * i + 1]
):
return False
return True
def boxOverlap(box1, box2):
"""Checks if the two boxes overlap."""
for i in range(3):
if box1[2 * i] >= box2[2 * i + 1] or box1[2 * i + 1] <= box2[2 * i]:
return False
return True
def selectParticlesInBox(
particles, box, periodicLength, periodic=True, VERBOSE=True
):
"""Selects only the particle positions in the box of interest. It uses periodic boundary conditions to translate the box to outside its regions if the region of interest is partially outside the periodic box."""
if VERBOSE:
print("\nFinding the particles in the subBox %s ..." % str(box))
mainBox = 0.0, periodicLength, 0.0, periodicLength, 0.0, periodicLength
pos = particles.pos
# code for when the box is fully contained in the periodic one
if boxFullyContained(mainBox, box) or not periodic:
particleInside = (
(pos[:, 0] >= box[0])
* (pos[:, 0] <= box[1])
* (pos[:, 1] >= box[2])
* (pos[:, 1] <= box[3])
* (pos[:, 2] >= box[4])
* (pos[:, 2] <= box[5])
)
particles.Update(particleInside)
if VERBOSE:
print(
"\tfound %i particles (%.2f%%) out of the total of %i"
% (
particles.pos.shape[0],
100.0 * particles.pos.shape[0] / pos.shape[0],
pos.shape[0],
)
)
return particles
# now dealing with the case when the box crosses outside the periodic box
if VERBOSE:
print(
"The subBox in question extends outside the periodic box. Taking this into account."
)
n = np.zeros(27, np.int64)
intersect = np.zeros(27, np.bool)
select = []
for i1 in range(-1, 2):
for i2 in range(-1, 2):
for i3 in range(-1, 2):
tempBox = (
box[0] + i1 * periodicLength,
box[1] + i1 * periodicLength,
box[2] + i2 * periodicLength,
box[3] + i2 * periodicLength,
box[4] + i3 * periodicLength,
box[5] + i3 * periodicLength,
)
index = (i1 + 1) * 9 + (i2 + 1) * 3 + (i3 + 1)
intersect[index] = boxOverlap(mainBox, tempBox)
if intersect[index]:
tempSelect = (
(pos[:, 0] >= tempBox[0])
* (pos[:, 0] <= tempBox[1])
* (pos[:, 1] >= tempBox[2])
* (pos[:, 1] <= tempBox[3])
* (pos[:, 2] >= tempBox[4])
* (pos[:, 2] <= tempBox[5])
)
n[index] = np.sum(tempSelect)
select.append(tempSelect)
# reserve memory for the output
if VERBOSE:
print(
"\tfound the intersection of the region of interest with %i periodic translations"
% len(select)
)
nTotal = np.sum(n)
result = GadgetParticles()
result.AddHeader(particles.header)
result.AddPos(np.empty((nTotal, 3), np.float32))
result.AddVel(np.empty((nTotal, 3), np.float32))
result.AddIds(np.empty(nTotal, np.int32))
if particles.VariableMass():
result.AddMass(np.empty(nTotal, np.float32))
# loop again and copy the required particles
count = 0
No = 0
for i1 in range(-1, 2):
for i2 in range(-1, 2):
for i3 in range(-1, 2):
index = (i1 + 1) * 9 + (i2 + 1) * 3 + (i3 + 1)
if not intersect[index]:
continue
result.pos[No : No + n[index], 0] = (
particles.pos[select[count], 0] - i1 * periodicLength
)
result.pos[No : No + n[index], 1] = (
particles.pos[select[count], 1] - i2 * periodicLength
)
result.pos[No : No + n[index], 2] = (
particles.pos[select[count], 2] - i3 * periodicLength
)
result.vel[No : No + n[index], :] = particles.vel[
select[count], :
]
result.ids[No : No + n[index]] = particles.ids[select[count]]
if particles.VariableMass():
result.mass[No : No + n[index]] = particles.mass[
select[count]
]
count += 1
No += n[index]
if VERBOSE:
print(
"\tfound %i particles (%.2f%%) out of the total of %i"
% (nTotal, 100.0 * nTotal / pos.shape[0], pos.shape[0])
)
return result
|
StarcoderdataPython
|
9673899
|
#!/usr/bin/env python3
import json
def vc_value_str(config, ratio_dict):
if "worker_sku_cnt" not in config or "sku_mapping" not in config:
print(
"Warning: no default value would be added to VC table. Need to manually specify"
)
return "", "", [], ""
worker_sku_cnt, sku_mapping = config["worker_sku_cnt"], config[
"sku_mapping"]
quota_dict = {}
old_meta = {}
resource_quota = {"cpu": {}, "memory": {}, "gpu": {}, "gpu_memory": {}}
for sku, cnt in worker_sku_cnt.items():
gpu_type = sku_mapping.get(sku, {}).get("gpu-type", "None")
num_gpu_per_node = sku_mapping.get(sku, {}).get("gpu", 0)
quota_dict[gpu_type] = quota_dict.get(gpu_type,
0) + cnt * num_gpu_per_node
old_meta[gpu_type] = {"num_gpu_per_node": num_gpu_per_node}
sku_name_in_map = sku if sku in sku_mapping else ""
meta_tmp = sku_mapping.get(sku_name_in_map, {})
for r_type in resource_quota.keys():
resource_quota[r_type][
sku_name_in_map] = resource_quota[r_type].get(
sku_name_in_map, 0) + meta_tmp.get(r_type, 0) * cnt
for r_type in ["cpu", "memory"]:
for sku, val in resource_quota[r_type].items():
resource_quota[r_type][sku] *= config.get("schedulable_ratio", 0.9)
# default value of quota and metadata are based on the assumption that there's only one default VC, this is not reasonable, and
# these 2 fields would also finally get removed
quota = json.dumps(quota_dict, separators=(",", ":"))
metadata = json.dumps(old_meta, separators=(",", ":"))
# TODO use cluster_resource.py to simplify code:
# res_obj = ClusterResource(resource_quota), and directly multiply the ratio.
res_quota = {}
for vc, ratio in ratio_dict.items():
res_quota_dict = {}
for res, res_q in resource_quota.items():
tmp_res_quota = {}
for sku, cnt in res_q.items():
cnt_p = cnt * ratio
if "memory" in res:
cnt_p = '{}Gi'.format(cnt)
tmp_res_quota[sku] = cnt_p
res_quota_dict[res] = tmp_res_quota
res_quota[vc] = json.dumps(res_quota_dict, separators=(",", ":"))
res_meta_dict = {}
for r_type in ["cpu", "memory", "gpu", "gpu_memory"]:
tmp_res_meta = {}
for sku in worker_sku_cnt:
sku_name_in_map = sku if sku in sku_mapping else ""
pernode_cnt = sku_mapping.get(sku_name_in_map, {}).get(r_type, 0)
if "memory" in r_type:
pernode_cnt = '{}Gi'.format(pernode_cnt)
tmp_res_meta[sku_name_in_map] = {"per_node": pernode_cnt}
if r_type in ["cpu", "memory"]:
tmp_res_meta[sku_name_in_map]["schedulable_ratio"] = 0.9
if r_type == "gpu":
tmp_res_meta[sku_name_in_map]["gpu_type"] = sku_mapping.get(
sku_name_in_map, {}).get("gpu-type", "None")
res_meta_dict[r_type] = tmp_res_meta
res_meta = json.dumps(res_meta_dict, separators=(",", ":"))
return quota, metadata, res_quota, res_meta
|
StarcoderdataPython
|
5005715
|
<filename>scripts/matchingbio.py
#!/usr/bin/env python3
import sys
def argparser():
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('file1')
ap.add_argument('file2')
return ap
def process_streams(f1, f2, options):
matched, total = 0, 0
sent1, sent2 = [], []
for ln, l1 in enumerate(f1, start=1):
try:
l2 = next(f2)
except StopIteration:
print('unexpected eof in {} at line {}'.format(f2.name, ln),
file=sys.stderr)
return False
l1, l2 = l1.rstrip('\n'), l2.rstrip('\n')
if not l1 or l1.isspace():
if l2 and not l2.isspace():
print('desyncronized at line {}: "{}" vs "{}"'.format(ln, l1, l2),
file=sys.stderr)
return False
if sent1 == sent2:
matched += 1
for s in sent1:
print(s)
print()
total += 1
sent1, sent2 = [], []
else:
if not l2 or l2.isspace():
print('desyncronized at line {}: "{}" vs "{}"'.format(ln, l1, l2),
file=sys.stderr)
return False
sent1.append(l1)
sent2.append(l2)
try:
l2 = next(f2)
print('unexpected eof in {} at line {}'.format(f1.name, ln),
file=sys.stderr)
return False
except StopIteration:
pass
print('done, found {}/{} matching sentences'.format(matched, total),
file=sys.stderr)
return True
def process(file1, file2, options):
with open(file1) as f1:
with open(file2) as f2:
return process_streams(f1, f2, options)
def main(argv):
args = argparser().parse_args(argv[1:])
process(args.file1, args.file2, args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
6619742
|
<gh_stars>1-10
# =========================================================================
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==========================================================================
class MangaSite():
def __init__(self, name, xpaths, test_link = ''):
self.name = name
self.xpaths = xpaths
self.optimized = False
self.test_link = test_link
|
StarcoderdataPython
|
3296560
|
<gh_stars>0
# !/usr/bin/python3
# -*- coding:utf-8 -*-
# @File: __init__.py.py
# @Author: BradyHu
# @Email: <EMAIL>
# @Time: 2022/2/8 上午9:17
from .base import *
|
StarcoderdataPython
|
3260539
|
<reponame>jimmeak/graveyard
from django import forms
from django.core.validators import MinLengthValidator
from ddcz.notifications import Audience
class News(forms.Form):
text = forms.CharField(
label="",
widget=forms.Textarea(
attrs={"class": "comment__textarea", "cols": 80, "rows": 30},
),
validators=[MinLengthValidator(80)],
)
audience = forms.ChoiceField(
choices=((i.name, i.value) for i in Audience), label="Komu poslat e-mail"
)
|
StarcoderdataPython
|
185519
|
class BeeSQLError(Exception):
pass
|
StarcoderdataPython
|
6532374
|
<reponame>CoderLeague/research_aiohttp
import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from multidict import CIMultiDict
from . import hdrs
from .helpers import (PY_36, content_disposition_header, guess_filename,
parse_mimetype, sentinel)
from .streams import DEFAULT_LIMIT
__all__ = ('PAYLOAD_REGISTRY', 'get_payload', 'payload_type', 'Payload',
'BytesPayload', 'StringPayload',
'IOBasePayload', 'BytesIOPayload', 'BufferedReaderPayload',
'TextIOPayload', 'StringIOPayload', 'JsonPayload')
TOO_LARGE_BYTES_BODY = 2 ** 20
class LookupError(Exception):
pass
def get_payload(data, *args, **kwargs):
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(factory, type):
PAYLOAD_REGISTRY.register(factory, type)
class payload_type:
def __init__(self, type):
self.type = type
def __call__(self, factory):
register_payload(factory, self.type)
return factory
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self):
self._registry = []
def get(self, data, *args, **kwargs):
if isinstance(data, Payload):
return data
for factory, type in self._registry:
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(self, factory, type):
self._registry.append((factory, type))
class Payload(ABC):
_size = None
_headers = None
_content_type = 'application/octet-stream'
def __init__(self, value, *, headers=None, content_type=sentinel,
filename=None, encoding=None, **kwargs):
self._value = value
self._encoding = encoding
self._filename = filename
if headers is not None:
self._headers = CIMultiDict(headers)
if content_type is sentinel and hdrs.CONTENT_TYPE in self._headers:
content_type = self._headers[hdrs.CONTENT_TYPE]
if content_type is sentinel:
content_type = None
self._content_type = content_type
@property
def size(self):
"""Size of the payload."""
return self._size
@property
def filename(self):
"""Filename of the payload."""
return self._filename
@property
def headers(self):
"""Custom item headers"""
return self._headers
@property
def encoding(self):
"""Payload encoding"""
return self._encoding
@property
def content_type(self):
"""Content type"""
if self._content_type is not None:
return self._content_type
elif self._filename is not None:
mime = mimetypes.guess_type(self._filename)[0]
return 'application/octet-stream' if mime is None else mime
else:
return Payload._content_type
def set_content_disposition(self, disptype, quote_fields=True, **params):
"""Sets ``Content-Disposition`` header."""
if self._headers is None:
self._headers = CIMultiDict()
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, **params)
@abstractmethod
async def write(self, writer):
"""Write payload.
writer is an AbstractPayloadWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value, *args, **kwargs):
assert isinstance(value, (bytes, bytearray, memoryview)), \
"value argument must be byte-ish (%r)" % type(value)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/octet-stream'
super().__init__(value, *args, **kwargs)
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
if PY_36:
kwargs = {'source': self}
else:
kwargs = {}
warnings.warn("Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead", ResourceWarning,
**kwargs)
async def write(self, writer):
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value.encode(encoding),
encoding=encoding, content_type=content_type, *args, **kwargs)
class StringIOPayload(StringPayload):
def __init__(self, value, *args, **kwargs):
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
def __init__(self, value, disposition='attachment', *args, **kwargs):
if 'filename' not in kwargs:
kwargs['filename'] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
await writer.write(chunk)
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class TextIOPayload(IOBasePayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value,
content_type=content_type, encoding=encoding, *args, **kwargs)
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
await writer.write(chunk.encode(self._encoding))
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class BytesIOPayload(IOBasePayload):
@property
def size(self):
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class JsonPayload(BytesPayload):
def __init__(self, value,
encoding='utf-8', content_type='application/json',
dumps=json.dumps, *args, **kwargs):
super().__init__(
dumps(value).encode(encoding),
content_type=content_type, encoding=encoding, *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(
BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
|
StarcoderdataPython
|
11367837
|
<filename>Model1/image_loader.py
# 数据处理
import os
import torch
from torch.utils import data
from PIL import Image
import numpy as np
from torchvision import transforms
#不管传进来的什么图,直接先切成100*100然后随机旋转
img_transform = transforms.Compose([
transforms.RandomCrop(100),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
label_transform = transforms.Compose([
transforms.ToTensor()
])
#定义自己的数据集合
class FlameSet(data.Dataset):
def __init__(self,root):
# 所有图片的绝对路径
imgs=os.listdir(root)
self.imgs=[os.path.join(root,k) for k in imgs]
self.transforms=img_transform
#噪声图像会被替换成空白占位符,图像出错就返回占位符
self.pad_data = torch.zeros((3,100,100))
self.pad_i = torch.as_tensor(int(0))
def __getitem__(self, index):
img_path = self.imgs[index]
#部分图片有可能是4通道的,最典型的例子就是png,改成3通道
pil_img = Image.open(img_path).convert('RGB')
#从名字当中分离出label
i = img_path.split('\\')[1].split('.')[0].split('_')[0]
i = torch.as_tensor(int(i))
if self.transforms:
try:
data = self.transforms(pil_img)
except:
return self.pad_data,self.pad_i
else:
pil_img = np.asarray(pil_img)
data = torch.from_numpy(pil_img)
return data, i
def __len__(self):
return len(self.imgs)
if __name__ == '__main__':
dataSet = FlameSet('./Group_Icon')
trainloader = torch.utils.data.DataLoader(dataSet, batch_size=2, shuffle=True)
for i_batch,batch_data in enumerate(trainloader):
print(i_batch)#打印batch编号
print(batch_data[0].size())#打印该batch里面图片的大小
print(batch_data[1][0].size())
|
StarcoderdataPython
|
4906266
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.ni.daqmx
~~~~~~~~~~~~~~~~~~~~~~
Implements bindings to the DAQmx (windows) National Instruments libraries.
Sources::
- DAQmx Reference manual
- DAQmx Base Reference manual
- pylibnidaqmx
http://pylibnidaqmx.googlecode.com
:company: National Instruments
:description:
:website: http://www.ni.com/
----
:copyright: 2012 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .base import System, Task, Channel, Device
from .channels import *
from .tasks import *
from .constants import Constants, Types
|
StarcoderdataPython
|
325689
|
# Copyright 2018 <NAME>
#
# Licensed under the 3-clause BSD license. See the LICENSE file.
class InvalidPackageNameError(Exception):
"""Invalid package name or non-existing package."""
def __init__(self, frontend, pkg_name):
self.frontend = frontend
self.pkg_name = pkg_name
def __str__(self):
return (f'The package {self.pkg_name} could not be found by frontend '
f'{self.frontend}')
class InvalidPackageVersionError(Exception):
"""Invalid or non-existing version of a valid package"""
def __init__(self, frontend, pkg_name, version):
self.frontend = frontend
self.pkg_name = pkg_name
self.version = version
def __str__(self):
return (f'Version {self.version} of package {self.pkg_name} '
f'could not be found by frontend {self.frontend}')
class UnhandledFrontendError(Exception):
"""Frontend not supported by backend."""
def __init__(self, backend, frontend):
self.backend = backend
self.frontend = frontend
def __str__(self):
return (f'The {self.backend} backend does not work well with the '
f'{self.frontend} frontend (yet!). Unable to generate a '
f'package definition.')
|
StarcoderdataPython
|
8000698
|
<reponame>arithmetic1728/ssl_grpc_example<filename>service_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import service_pb2 as service__pb2
class ServerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Foo = channel.unary_unary(
'/Server/Foo',
request_serializer=service__pb2.Empty.SerializeToString,
response_deserializer=service__pb2.Empty.FromString,
)
class ServerServicer(object):
# missing associated documentation comment in .proto file
pass
def Foo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'Foo': grpc.unary_unary_rpc_method_handler(
servicer.Foo,
request_deserializer=service__pb2.Empty.FromString,
response_serializer=service__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Server', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
StarcoderdataPython
|
4821020
|
# Entrada de dados
Celsius = float(input('\033[34mInforme a temperatura em celsius(ºC) :'))
# Processamento
Fahrenheit = ((9 * Celsius) / 5) + 32
Kelvin = ((Celsius * 5) / 5)
# Saída de dados
print('A Temperatura em fahrenheit é {:.1f} ºF'.format(Fahrenheit))
print('A Temperatura em kelvin é {:.1f} ºK'.format(Kelvin))
|
StarcoderdataPython
|
11296268
|
CONSTRAINT_DICT_IN = \
{
"contextName": "appName",
"operator": "IN",
"values": [
"test",
"test2"
]
}
CONSTRAINT_DICT_NOTIN = \
{
"contextName": "appName",
"operator": "NOT_IN",
"values": [
"test",
"test2"
]
}
CONSTRAINT_DICT_STR_INVERT = \
{
"contextName": "customField",
"operator": "STR_CONTAINS",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": True,
"inverted": True
}
CONSTRAINT_DICT_STR_CONTAINS_CI = \
{
"contextName": "customField",
"operator": "STR_CONTAINS",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": True,
"inverted": False
}
CONSTRAINT_DICT_STR_CONTAINS_NOT_CI = \
{
"contextName": "customField",
"operator": "STR_CONTAINS",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": False,
"inverted": False
}
CONSTRAINT_DICT_STR_ENDS_WITH_CI = \
{
"contextName": "customField",
"operator": "STR_ENDS_WITH",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": True,
"inverted": False
}
CONSTRAINT_DICT_STR_ENDS_WITH_NOT_CI = \
{
"contextName": "customField",
"operator": "STR_ENDS_WITH",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": False,
"inverted": False
}
CONSTRAINT_DICT_STR_STARTS_WITH_CI = \
{
"contextName": "customField",
"operator": "STR_STARTS_WITH",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": True,
"inverted": False
}
CONSTRAINT_DICT_STR_STARTS_WITH_NOT_CI = \
{
"contextName": "customField",
"operator": "STR_STARTS_WITH",
"values": ["dog", "cat", "hAmStEr"],
"caseInsensitive": False,
"inverted": False
}
CONSTRAINT_NUM_EQ = \
{
"contextName": "customField",
"operator": "NUM_EQ",
"value": "5",
"inverted": False
}
CONSTRAINT_NUM_GT = \
{
"contextName": "customField",
"operator": "NUM_GT",
"value": "5",
"inverted": False
}
CONSTRAINT_NUM_GTE = \
{
"contextName": "customField",
"operator": "NUM_GTE",
"value": 5,
"inverted": False
}
CONSTRAINT_NUM_LT = \
{
"contextName": "customField",
"operator": "NUM_LT",
"value": "5",
"inverted": False
}
CONSTRAINT_NUM_LTE = \
{
"contextName": "customField",
"operator": "NUM_LTE",
"value": "5",
"inverted": False
}
CONSTRAINT_NUM_FLOAT = \
{
"contextName": "customField",
"operator": "NUM_LTE",
"value": "5.1",
"inverted": False
}
CONSTRAINT_DATE_AFTER = \
{
"contextName": "currentTime",
"operator": "DATE_AFTER",
"value": "2022-01-22T00:00:00.000Z",
"inverted": False
}
CONSTRAINT_DATE_BEFORE = \
{
"contextName": "currentTime",
"operator": "DATE_BEFORE",
"value": "2022-01-22T00:00:00.000Z",
"inverted": False
}
CONSTRAINT_DATE_ERROR = \
{
"contextName": "currentTime",
"operator": "DATE_AFTER",
"value": "abcd",
"inverted": False
}
CONSTRAINT_SEMVER_EQ = \
{
"contextName": "customField",
"operator": "SEMVER_EQ",
"value": "1.2.2",
"inverted": False
}
CONSTRAINT_SEMVER_GT = \
{
"contextName": "customField",
"operator": "SEMVER_GT",
"value": "1.2.2",
"inverted": False
}
CONSTRAINT_SEMVER_LT = \
{
"contextName": "customField",
"operator": "SEMVER_LT",
"value": "1.2.2",
"inverted": False
}
|
StarcoderdataPython
|
4900030
|
<gh_stars>0
from django.apps import AppConfig
import stripe
from .conf import settings
class DjangoStripeConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'django_stripe'
def ready(self):
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe_app_data = settings.STRIPE_APP_DATA
stripe.set_app_info(**stripe_app_data)
from . import signal_receivers
|
StarcoderdataPython
|
3377776
|
from pathlib import Path
from typing import List, Optional, Union, NamedTuple, Iterable
import importlib
import importlib.util
import warnings
import pytz
from .common import PathIsh, get_tmpdir, appdirs, default_output_dir
from .common import Res, Source
class Config(NamedTuple):
# TODO remove default from sources once migrated
SOURCES: List = []
# if not specified, uses user data dir
OUTPUT_DIR: Optional[PathIsh] = None
CACHE_DIR: Optional[PathIsh] = None
FILTERS: List[str] = []
#
# NOTE: INDEXERS is deprecated, use SOURCES instead
INDEXERS: List = []
@property
def sources(self) -> Iterable[Res[Source]]:
idx = self.INDEXERS
if len(self.INDEXERS) > 0:
warnings.warn("'INDEXERS' is deprecated. Please use 'SOURCES'!", DeprecationWarning)
raw = self.SOURCES + self.INDEXERS
if len(raw) == 0:
raise RuntimeError("Please specify SOURCES in the config! See https://github.com/karlicoss/promnesia#setup for more information")
for r in raw:
if isinstance(r, str):
# must be a raw module name?
try:
r = importlib.import_module(r)
except ModuleNotFoundError as e:
yield e
continue
if isinstance(r, Source):
yield r
else:
# kind of last resort measure..
yield Source(r)
@property
def cache_dir(self) -> Path:
cd = self.CACHE_DIR
# TODO maybe do not use cache if it's none?
assert cd is not None
res = Path(cd)
res.mkdir(exist_ok=True) # TODO not sure about parents=True
return res
# TODO make this optional, default to .cache or something?
# TODO also tmp dir -- perhaps should be in cache or at least possible to specify in config? not sure if useful
@property
def output_dir(self) -> Path:
odir = self.OUTPUT_DIR
if odir is not None:
return Path(odir)
else:
return default_output_dir()
instance: Optional[Config] = None
def has() -> bool:
return instance is not None
def get() -> Config:
assert instance is not None
return instance
def load_from(config_file: Path) -> None:
global instance
instance = import_config(config_file)
def reset() -> None:
global instance
assert instance is not None
instance = None
def import_config(config_file: PathIsh) -> Config:
p = Path(config_file)
# TODO just exec??
name = p.stem
spec = importlib.util.spec_from_file_location(name, p) # type: ignore
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
d = {}
for f in Config._fields:
if hasattr(mod, f):
d[f] = getattr(mod, f)
return Config(**d)
|
StarcoderdataPython
|
11249048
|
import json
def output(videos, courseName):
"""
Write results of analysis to json file.
:param videos:
:param courseName:
:return:
"""
fileName = courseName + '_video_statistics.json'
with open(fileName, "w") as out:
courses = set()
for video in videos:
courses.add(video.course)
courses_j = []
for course_name in courses:
sections = set()
for video in videos:
if video.course == course_name:
sections.add(video.section)
course = dict()
sections_j = []
for section in sections:
subsections = set()
for video in videos:
if (video.course == course_name) and (video.section == section):
subsections.add(video.subsection)
section_j = dict()
subsections_j = []
for subsection in subsections:
subsection_j = dict()
videos_j = []
for video in videos:
# todo: can be empty
if (video.course == course_name) and (video.section == section) and (video.subsection == subsection):
video_j = dict()
video_j["user_percent"] = video.user_percent
video_j["watched_percent"] = video.watched_percent
video_j["intervals_number"] = video.intervals_number
video_j["review_intervals"] = video.review_intervals
videos_j.append(video_j)
# todo: the sane with test
subsection_j["subsection_name"] = subsection
subsection_j["videos"] = videos_j
subsections_j.append(subsection_j)
section_j["section_name"] = section
section_j["subsections"] = subsections_j
sections_j.append(section_j)
course["course_name"] = course_name
course["sections"] = sections_j
courses_j.append(course)
out.write(json.dumps(courses_j))
return fileName
|
StarcoderdataPython
|
12862439
|
<gh_stars>1-10
# Generated by Django 3.0.8 on 2020-07-28 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_product_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='discounted_price',
field=models.FloatField(default=None),
preserve_default=False,
),
]
|
StarcoderdataPython
|
193198
|
<gh_stars>1-10
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# [MS-DCOM] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
# Author:
# <NAME> (@agsolino)
#
# ToDo:
# [X] Use the same DCE connection for all the calls. Right now is connecting to the remote machine
# for each call, making it slower.
# [X] Implement a ping mechanism, otherwise the garbage collector at the server shuts down the objects if
# not used, returning RPC_E_DISCONNECTED
#
from __future__ import division
from __future__ import print_function
import socket
from struct import pack
from threading import Timer, current_thread
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDRPOINTER, NDRUniConformantArray, NDRTLSTRUCT, UNKNOWNDATA
from impacket.dcerpc.v5.dtypes import LPWSTR, ULONGLONG, HRESULT, GUID, USHORT, WSTR, DWORD, LPLONG, LONG, PGUID, ULONG, \
UUID, WIDESTR, NULL
from impacket import hresult_errors, LOG
from impacket.uuid import string_to_bin, uuidtup_to_bin, generate
from impacket.dcerpc.v5.rpcrt import TypeSerialization1, RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, RPC_C_AUTHN_LEVEL_NONE, \
RPC_C_AUTHN_LEVEL_PKT_PRIVACY, RPC_C_AUTHN_GSS_NEGOTIATE, RPC_C_AUTHN_WINNT, DCERPCException
from impacket.dcerpc.v5 import transport
CLSID_ActivationContextInfo = string_to_bin('000001a5-0000-0000-c000-000000000046')
CLSID_ActivationPropertiesIn = string_to_bin('00000338-0000-0000-c000-000000000046')
CLSID_ActivationPropertiesOut = string_to_bin('00000339-0000-0000-c000-000000000046')
CLSID_CONTEXT_EXTENSION = string_to_bin('00000334-0000-0000-c000-000000000046')
CLSID_ContextMarshaler = string_to_bin('0000033b-0000-0000-c000-000000000046')
CLSID_ERROR_EXTENSION = string_to_bin('0000031c-0000-0000-c000-000000000046')
CLSID_ErrorObject = string_to_bin('0000031b-0000-0000-c000-000000000046')
CLSID_InstanceInfo = string_to_bin('000001ad-0000-0000-c000-000000000046')
CLSID_InstantiationInfo = string_to_bin('000001ab-0000-0000-c000-000000000046')
CLSID_PropsOutInfo = string_to_bin('00000339-0000-0000-c000-000000000046')
CLSID_ScmReplyInfo = string_to_bin('000001b6-0000-0000-c000-000000000046')
CLSID_ScmRequestInfo = string_to_bin('000001aa-0000-0000-c000-000000000046')
CLSID_SecurityInfo = string_to_bin('000001a6-0000-0000-c000-000000000046')
CLSID_ServerLocationInfo = string_to_bin('000001a4-0000-0000-c000-000000000046')
CLSID_SpecialSystemProperties = string_to_bin('000001b9-0000-0000-c000-000000000046')
IID_IActivation = uuidtup_to_bin(('4d9f4ab8-7d1c-11cf-861e-0020af6e7c57','0.0'))
IID_IActivationPropertiesIn = uuidtup_to_bin(('000001A2-0000-0000-C000-000000000046','0.0'))
IID_IActivationPropertiesOut = uuidtup_to_bin(('000001A3-0000-0000-C000-000000000046','0.0'))
IID_IContext = uuidtup_to_bin(('000001c0-0000-0000-C000-000000000046','0.0'))
IID_IObjectExporter = uuidtup_to_bin(('99fcfec4-5260-101b-bbcb-00aa0021347a','0.0'))
IID_IRemoteSCMActivator = uuidtup_to_bin(('000001A0-0000-0000-C000-000000000046','0.0'))
IID_IRemUnknown = uuidtup_to_bin(('00000131-0000-0000-C000-000000000046','0.0'))
IID_IRemUnknown2 = uuidtup_to_bin(('00000143-0000-0000-C000-000000000046','0.0'))
IID_IUnknown = uuidtup_to_bin(('00000000-0000-0000-C000-000000000046','0.0'))
IID_IClassFactory = uuidtup_to_bin(('00000001-0000-0000-C000-000000000046','0.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
if self.error_code in hresult_errors.ERROR_MESSAGES:
error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1]
return 'DCOM SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'DCOM SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# 2.2.1 OID
OID = ULONGLONG
class OID_ARRAY(NDRUniConformantArray):
item = OID
class POID_ARRAY(NDRPOINTER):
referent = (
('Data', OID_ARRAY),
)
# 2.2.2 SETID
SETID = ULONGLONG
# 2.2.4 error_status_t
error_status_t = ULONG
# 2.2.6 CID
CID = GUID
# 2.2.7 CLSID
CLSID = GUID
# 2.2.8 IID
IID = GUID
PIID = PGUID
# 2.2.9 IPID
IPID = GUID
# 2.2.10 OXID
OXID = ULONGLONG
# 2.2.18 OBJREF
FLAGS_OBJREF_STANDARD = 0x00000001
FLAGS_OBJREF_HANDLER = 0x00000002
FLAGS_OBJREF_CUSTOM = 0x00000004
FLAGS_OBJREF_EXTENDED = 0x00000008
# 2.2.18.1 STDOBJREF
SORF_NOPING = 0x00001000
# 2.2.20 Context
CTXMSHLFLAGS_BYVAL = 0x00000002
# 2.2.20.1 PROPMARSHALHEADER
CPFLAG_PROPAGATE = 0x00000001
CPFLAG_EXPOSE = 0x00000002
CPFLAG_ENVOY = 0x00000004
# 2.2.22.2.1 InstantiationInfoData
ACTVFLAGS_DISABLE_AAA = 0x00000002
ACTVFLAGS_ACTIVATE_32_BIT_SERVER = 0x00000004
ACTVFLAGS_ACTIVATE_64_BIT_SERVER = 0x00000008
ACTVFLAGS_NO_FAILURE_LOG = 0x00000020
# 2.2.22.2.2 SpecialPropertiesData
SPD_FLAG_USE_CONSOLE_SESSION = 0x00000001
# 172.16.31.10 IDL Range Constants
MAX_REQUESTED_INTERFACES = 0x8000
MAX_REQUESTED_PROTSEQS = 0x8000
MIN_ACTPROP_LIMIT = 1
MAX_ACTPROP_LIMIT = 10
################################################################################
# STRUCTURES
################################################################################
class handle_t(NDRSTRUCT):
structure = (
('context_handle_attributes',ULONG),
('context_handle_uuid',UUID),
)
def __init__(self, data=None, isNDR64=False):
NDRSTRUCT.__init__(self, data, isNDR64)
self['context_handle_uuid'] = b'\x00'*16
def isNull(self):
return self['context_handle_uuid'] == b'\x00'*16
# 2.2.11 COMVERSION
class COMVERSION(NDRSTRUCT):
default_major_version = 5
default_minor_version = 7
structure = (
('MajorVersion',USHORT),
('MinorVersion',USHORT),
)
@classmethod
def set_default_version(cls, major_version=None, minor_version=None):
# Set default dcom version for all new COMVERSION objects.
if major_version is not None:
cls.default_major_version = major_version
if minor_version is not None:
cls.default_minor_version = minor_version
def __init__(self, data = None,isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['MajorVersion'] = self.default_major_version
self['MinorVersion'] = self.default_minor_version
class PCOMVERSION(NDRPOINTER):
referent = (
('Data', COMVERSION),
)
# 2.2.13.1 ORPC_EXTENT
# This MUST contain an array of bytes that form the extent data.
# The array size MUST be a multiple of 8 for alignment reasons.
class BYTE_ARRAY(NDRUniConformantArray):
item = 'c'
class ORPC_EXTENT(NDRSTRUCT):
structure = (
('id',GUID),
('size',ULONG),
('data',BYTE_ARRAY),
)
# 2.2.13.2 ORPC_EXTENT_ARRAY
# ThisMUSTbeanarrayofORPC_EXTENTs.ThearraysizeMUSTbeamultipleof2for alignment reasons.
class PORPC_EXTENT(NDRPOINTER):
referent = (
('Data', ORPC_EXTENT),
)
class EXTENT_ARRAY(NDRUniConformantArray):
item = PORPC_EXTENT
class PEXTENT_ARRAY(NDRPOINTER):
referent = (
('Data', EXTENT_ARRAY),
)
class ORPC_EXTENT_ARRAY(NDRSTRUCT):
structure = (
('size',ULONG),
('reserved',ULONG),
('extent',PEXTENT_ARRAY),
)
class PORPC_EXTENT_ARRAY(NDRPOINTER):
referent = (
('Data', ORPC_EXTENT_ARRAY),
)
# 2.2.13.3 ORPCTHIS
class ORPCTHIS(NDRSTRUCT):
structure = (
('version',COMVERSION),
('flags',ULONG),
('reserved1',ULONG),
('cid',CID),
('extensions',PORPC_EXTENT_ARRAY),
)
# 2.2.13.4 ORPCTHAT
class ORPCTHAT(NDRSTRUCT):
structure = (
('flags',ULONG),
('extensions',PORPC_EXTENT_ARRAY),
)
# 2.2.14 MInterfacePointer
class MInterfacePointer(NDRSTRUCT):
structure = (
('ulCntData',ULONG),
('abData',BYTE_ARRAY),
)
# 2.2.15 PMInterfacePointerInternal
class PMInterfacePointerInternal(NDRPOINTER):
referent = (
('Data', MInterfacePointer),
)
# 2.2.16 PMInterfacePointer
class PMInterfacePointer(NDRPOINTER):
referent = (
('Data', MInterfacePointer),
)
class PPMInterfacePointer(NDRPOINTER):
referent = (
('Data', PMInterfacePointer),
)
# 2.2.18 OBJREF
class OBJREF(NDRSTRUCT):
commonHdr = (
('signature',ULONG),
('flags',ULONG),
('iid',GUID),
)
def __init__(self, data = None,isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['signature'] = 0x574F454D
# 2.2.18.1 STDOBJREF
class STDOBJREF(NDRSTRUCT):
structure = (
('flags',ULONG),
('cPublicRefs',ULONG),
('oxid',OXID),
('oid',OID),
('ipid',IPID),
)
# 2.2.18.4 OBJREF_STANDARD
class OBJREF_STANDARD(OBJREF):
structure = (
('std',STDOBJREF),
('saResAddr',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_STANDARD
# 2.2.18.5 OBJREF_HANDLER
class OBJREF_HANDLER(OBJREF):
structure = (
('std',STDOBJREF),
('clsid',CLSID),
('saResAddr',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_HANDLER
# 2.2.18.6 OBJREF_CUSTOM
class OBJREF_CUSTOM(OBJREF):
structure = (
('clsid',CLSID),
('cbExtension',ULONG),
('ObjectReferenceSize',ULONG),
('pObjectData',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_CUSTOM
# 2.2.18.8 DATAELEMENT
class DATAELEMENT(NDRSTRUCT):
structure = (
('dataID',GUID),
('cbSize',ULONG),
('cbRounded',ULONG),
('Data',':'),
)
class DUALSTRINGARRAYPACKED(NDRSTRUCT):
structure = (
('wNumEntries',USHORT),
('wSecurityOffset',USHORT),
('aStringArray',':'),
)
def getDataLen(self, data, offset=0):
return self['wNumEntries']*2
# 2.2.18.7 OBJREF_EXTENDED
class OBJREF_EXTENDED(OBJREF):
structure = (
('std',STDOBJREF),
('Signature1',ULONG),
('saResAddr',DUALSTRINGARRAYPACKED),
('nElms',ULONG),
('Signature2',ULONG),
('ElmArray',DATAELEMENT),
)
def __init__(self, data = None, isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_EXTENDED
self['Signature1'] = 0x4E535956
self['Signature1'] = 0x4E535956
self['nElms'] = 0x4E535956
# 2.2.19 DUALSTRINGARRAY
class USHORT_ARRAY(NDRUniConformantArray):
item = '<H'
class PUSHORT_ARRAY(NDRPOINTER):
referent = (
('Data', USHORT_ARRAY),
)
class DUALSTRINGARRAY(NDRSTRUCT):
structure = (
('wNumEntries',USHORT),
('wSecurityOffset',USHORT),
('aStringArray',USHORT_ARRAY),
)
class PDUALSTRINGARRAY(NDRPOINTER):
referent = (
('Data',DUALSTRINGARRAY),
)
# 2.2.19.3 STRINGBINDING
class STRINGBINDING(NDRSTRUCT):
structure = (
('wTowerId',USHORT),
('aNetworkAddr',WIDESTR),
)
# 2.2.19.4 SECURITYBINDING
class SECURITYBINDING(NDRSTRUCT):
structure = (
('wAuthnSvc',USHORT),
('Reserved',USHORT),
('aPrincName',WIDESTR),
)
# 2.2.20.1 PROPMARSHALHEADER
class PROPMARSHALHEADER(NDRSTRUCT):
structure = (
('clsid',CLSID),
('policyId',GUID),
('flags',ULONG),
('cb',ULONG),
('ctxProperty',':'),
)
class PROPMARSHALHEADER_ARRAY(NDRUniConformantArray):
item = PROPMARSHALHEADER
# 2.2.20 Context
class Context(NDRSTRUCT):
structure = (
('MajorVersion',USHORT),
('MinVersion',USHORT),
('ContextId',GUID),
('Flags',ULONG),
('Reserved',ULONG),
('dwNumExtents',ULONG),
('cbExtents',ULONG),
('MshlFlags',ULONG),
('Count',ULONG),
('Frozen',ULONG),
('PropMarshalHeader',PROPMARSHALHEADER_ARRAY),
)
# 2.2.21.3 ErrorInfoString
class ErrorInfoString(NDRSTRUCT):
structure = (
('dwMax',ULONG),
('dwOffSet',ULONG),
('dwActual',IID),
('Name',WSTR),
)
# 2.2.21.2 Custom-Marshaled Error Information Format
class ORPC_ERROR_INFORMATION(NDRSTRUCT):
structure = (
('dwVersion',ULONG),
('dwHelpContext',ULONG),
('iid',IID),
('dwSourceSignature',ULONG),
('Source',ErrorInfoString),
('dwDescriptionSignature',ULONG),
('Description',ErrorInfoString),
('dwHelpFileSignature',ULONG),
('HelpFile',ErrorInfoString),
)
# 2.2.21.5 EntryHeader
class EntryHeader(NDRSTRUCT):
structure = (
('Signature',ULONG),
('cbEHBuffer',ULONG),
('cbSize',ULONG),
('reserved',ULONG),
('policyID',GUID),
)
class EntryHeader_ARRAY(NDRUniConformantArray):
item = EntryHeader
# 2.2.21.4 Context ORPC Extension
class ORPC_CONTEXT(NDRSTRUCT):
structure = (
('SignatureVersion',ULONG),
('Version',ULONG),
('cPolicies',ULONG),
('cbBuffer',ULONG),
('cbSize',ULONG),
('hr',ULONG),
('hrServer',ULONG),
('reserved',ULONG),
('EntryHeader',EntryHeader_ARRAY),
('PolicyData',':'),
)
def __init__(self, data = None, isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['SignatureVersion'] = 0x414E554B
# 2.2.22.1 CustomHeader
class CLSID_ARRAY(NDRUniConformantArray):
item = CLSID
class PCLSID_ARRAY(NDRPOINTER):
referent = (
('Data', CLSID_ARRAY),
)
class DWORD_ARRAY(NDRUniConformantArray):
item = DWORD
class PDWORD_ARRAY(NDRPOINTER):
referent = (
('Data', DWORD_ARRAY),
)
class CustomHeader(TypeSerialization1):
structure = (
('totalSize',DWORD),
('headerSize',DWORD),
('dwReserved',DWORD),
('destCtx',DWORD),
('cIfs',DWORD),
('classInfoClsid',CLSID),
('pclsid',PCLSID_ARRAY),
('pSizes',PDWORD_ARRAY),
('pdwReserved',LPLONG),
#('pdwReserved',LONG),
)
def getData(self, soFar = 0):
self['headerSize'] = len(TypeSerialization1.getData(self, soFar)) + len(
TypeSerialization1.getDataReferents(self, soFar))
self['cIfs'] = len(self['pclsid'])
return TypeSerialization1.getData(self, soFar)
# 2.2.22 Activation Properties BLOB
class ACTIVATION_BLOB(NDRTLSTRUCT):
structure = (
('dwSize',ULONG),
('dwReserved',ULONG),
('CustomHeader',CustomHeader),
('Property',UNKNOWNDATA),
)
def getData(self, soFar = 0):
self['dwSize'] = len(self['CustomHeader'].getData(soFar)) + len(
self['CustomHeader'].getDataReferents(soFar)) + len(self['Property'])
self['CustomHeader']['totalSize'] = self['dwSize']
return NDRTLSTRUCT.getData(self)
# 2.2.22.2.1 InstantiationInfoData
class IID_ARRAY(NDRUniConformantArray):
item = IID
class PIID_ARRAY(NDRPOINTER):
referent = (
('Data', IID_ARRAY),
)
class InstantiationInfoData(TypeSerialization1):
structure = (
('classId',CLSID),
('classCtx',DWORD),
('actvflags',DWORD),
('fIsSurrogate',LONG),
('cIID',DWORD),
('instFlag',DWORD),
('pIID',PIID_ARRAY),
('thisSize',DWORD),
('clientCOMVersion',COMVERSION),
)
# 2.2.22.2.2 SpecialPropertiesData
class SpecialPropertiesData(TypeSerialization1):
structure = (
('dwSessionId',ULONG),
('fRemoteThisSessionId',LONG),
('fClientImpersonating',LONG),
('fPartitionIDPresent',LONG),
('dwDefaultAuthnLvl',DWORD),
('guidPartition',GUID),
('dwPRTFlags',DWORD),
('dwOrigClsctx',DWORD),
('dwFlags',DWORD),
('Reserved0',DWORD),
('Reserved0',DWORD),
('Reserved', '32s=""'),
#('Reserved1',DWORD),
#('Reserved2',ULONGLONG),
#('Reserved3_1',DWORD),
#('Reserved3_2',DWORD),
#('Reserved3_3',DWORD),
#('Reserved3_4',DWORD),
#('Reserved3_5',DWORD),
)
# 2.2.22.2.3 InstanceInfoData
class InstanceInfoData(TypeSerialization1):
structure = (
('fileName',LPWSTR),
('mode',DWORD),
('ifdROT',PMInterfacePointer),
('ifdStg',PMInterfacePointer),
)
# 2.2.22.2.4.1 customREMOTE_REQUEST_SCM_INFO
class customREMOTE_REQUEST_SCM_INFO(NDRSTRUCT):
structure = (
('ClientImpLevel',DWORD),
('cRequestedProtseqs',USHORT),
('pRequestedProtseqs',PUSHORT_ARRAY),
)
class PcustomREMOTE_REQUEST_SCM_INFO(NDRPOINTER):
referent = (
('Data', customREMOTE_REQUEST_SCM_INFO),
)
# 2.2.22.2.4 ScmRequestInfoData
class ScmRequestInfoData(TypeSerialization1):
structure = (
('pdwReserved',LPLONG),
('remoteRequest',PcustomREMOTE_REQUEST_SCM_INFO),
)
# 2.2.22.2.5 ActivationContextInfoData
class ActivationContextInfoData(TypeSerialization1):
structure = (
('clientOK',LONG),
('bReserved1',LONG),
('dwReserved1',DWORD),
('dwReserved2',DWORD),
('pIFDClientCtx',PMInterfacePointer),
('pIFDPrototypeCtx',PMInterfacePointer),
)
# 2.2.22.2.6 LocationInfoData
class LocationInfoData(TypeSerialization1):
structure = (
('machineName',LPWSTR),
('processId',DWORD),
('apartmentId',DWORD),
('contextId',DWORD),
)
# 2.2.22.2.7.1 COSERVERINFO
class COSERVERINFO(NDRSTRUCT):
structure = (
('dwReserved1',DWORD),
('pwszName',LPWSTR),
('pdwReserved',LPLONG),
('dwReserved2',DWORD),
)
class PCOSERVERINFO(NDRPOINTER):
referent = (
('Data', COSERVERINFO),
)
# 172.16.58.3.7 SecurityInfoData
class SecurityInfoData(TypeSerialization1):
structure = (
('dwAuthnFlags',DWORD),
('pServerInfo',PCOSERVERINFO),
('pdwReserved',LPLONG),
)
# 2.2.22.2.8.1 customREMOTE_REPLY_SCM_INFO
class customREMOTE_REPLY_SCM_INFO(NDRSTRUCT):
structure = (
('Oxid',OXID),
('pdsaOxidBindings',PDUALSTRINGARRAY),
('ipidRemUnknown',IPID),
('authnHint',DWORD),
('serverVersion',COMVERSION),
)
class PcustomREMOTE_REPLY_SCM_INFO(NDRPOINTER):
referent = (
('Data', customREMOTE_REPLY_SCM_INFO),
)
# 2.2.22.2.8 ScmReplyInfoData
class ScmReplyInfoData(TypeSerialization1):
structure = (
('pdwReserved',DWORD),
('remoteReply',PcustomREMOTE_REPLY_SCM_INFO),
)
# 2.2.22.2.9 PropsOutInfo
class HRESULT_ARRAY(NDRUniConformantArray):
item = HRESULT
class PHRESULT_ARRAY(NDRPOINTER):
referent = (
('Data', HRESULT_ARRAY),
)
class MInterfacePointer_ARRAY(NDRUniConformantArray):
item = MInterfacePointer
class PMInterfacePointer_ARRAY(NDRUniConformantArray):
item = PMInterfacePointer
class PPMInterfacePointer_ARRAY(NDRPOINTER):
referent = (
('Data', PMInterfacePointer_ARRAY),
)
class PropsOutInfo(TypeSerialization1):
structure = (
('cIfs',DWORD),
('piid',PIID_ARRAY),
('phresults',PHRESULT_ARRAY),
('ppIntfData',PPMInterfacePointer_ARRAY),
)
# 2.2.23 REMINTERFACEREF
class REMINTERFACEREF(NDRSTRUCT):
structure = (
('ipid',IPID),
('cPublicRefs',LONG),
('cPrivateRefs',LONG),
)
class REMINTERFACEREF_ARRAY(NDRUniConformantArray):
item = REMINTERFACEREF
# 2.2.24 REMQIRESULT
class REMQIRESULT(NDRSTRUCT):
structure = (
('hResult',HRESULT),
('std',STDOBJREF),
)
# 2.2.25 PREMQIRESULT
class PREMQIRESULT(NDRPOINTER):
referent = (
('Data', REMQIRESULT),
)
# 2.2.26 REFIPID
REFIPID = GUID
################################################################################
# RPC CALLS
################################################################################
class DCOMCALL(NDRCALL):
commonHdr = (
('ORPCthis', ORPCTHIS),
)
class DCOMANSWER(NDRCALL):
commonHdr = (
('ORPCthat', ORPCTHAT),
)
# 3.1.2.5.1.1 IObjectExporter::ResolveOxid (Opnum 0)
class ResolveOxid(NDRCALL):
opnum = 0
structure = (
('pOxid', OXID),
('cRequestedProtseqs', USHORT),
('arRequestedProtseqs', USHORT_ARRAY),
)
class ResolveOxidResponse(NDRCALL):
structure = (
('ppdsaOxidBindings', PDUALSTRINGARRAY),
('pipidRemUnknown', IPID),
('pAuthnHint', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.2.5.1.2 IObjectExporter::SimplePing (Opnum 1)
class SimplePing(NDRCALL):
opnum = 1
structure = (
('pSetId', SETID),
)
class SimplePingResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.2.5.1.3 IObjectExporter::ComplexPing (Opnum 2)
class ComplexPing(NDRCALL):
opnum = 2
structure = (
('pSetId', SETID),
('SequenceNum', USHORT),
('cAddToSet', USHORT),
('cDelFromSet', USHORT),
('AddToSet', POID_ARRAY),
('DelFromSet', POID_ARRAY),
)
class ComplexPingResponse(NDRCALL):
structure = (
('pSetId', SETID),
('pPingBackoffFactor', USHORT),
('ErrorCode', error_status_t),
)
# 3.1.2.5.1.4 IObjectExporter::ServerAlive (Opnum 3)
class ServerAlive(NDRCALL):
opnum = 3
structure = (
)
class ServerAliveResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.2.5.1.5 IObjectExporter::ResolveOxid2 (Opnum 4)
class ResolveOxid2(NDRCALL):
opnum = 4
structure = (
('pOxid', OXID),
('cRequestedProtseqs', USHORT),
('arRequestedProtseqs', USHORT_ARRAY),
)
class ResolveOxid2Response(NDRCALL):
structure = (
('ppdsaOxidBindings', PDUALSTRINGARRAY),
('pipidRemUnknown', IPID),
('pAuthnHint', DWORD),
('pComVersion', COMVERSION),
('ErrorCode', error_status_t),
)
# 3.1.2.5.1.6 IObjectExporter::ServerAlive2 (Opnum 5)
class ServerAlive2(NDRCALL):
opnum = 5
structure = (
)
class ServerAlive2Response(NDRCALL):
structure = (
('pComVersion', COMVERSION),
('ppdsaOrBindings', PDUALSTRINGARRAY),
('pReserved', LPLONG),
('ErrorCode', error_status_t),
)
# 3.1.2.5.2.3.1 IActivation:: RemoteActivation (Opnum 0)
class RemoteActivation(NDRCALL):
opnum = 0
structure = (
('ORPCthis', ORPCTHIS),
('Clsid', GUID),
('pwszObjectName', LPWSTR),
('pObjectStorage', PMInterfacePointer),
('ClientImpLevel', DWORD),
('Mode', DWORD),
('Interfaces', DWORD),
('pIIDs', PIID_ARRAY),
('cRequestedProtseqs', USHORT),
('aRequestedProtseqs', USHORT_ARRAY),
)
class RemoteActivationResponse(NDRCALL):
structure = (
('ORPCthat', ORPCTHAT),
('pOxid', OXID),
('ppdsaOxidBindings', PDUALSTRINGARRAY),
('pipidRemUnknown', IPID),
('pAuthnHint', DWORD),
('pServerVersion', COMVERSION),
('phr', HRESULT),
('ppInterfaceData', PMInterfacePointer_ARRAY),
('pResults', HRESULT_ARRAY),
('ErrorCode', error_status_t),
)
# 3.1.2.5.2.3.2 IRemoteSCMActivator:: RemoteGetClassObject (Opnum 3)
class RemoteGetClassObject(NDRCALL):
opnum = 3
structure = (
('ORPCthis', ORPCTHIS),
('pActProperties', PMInterfacePointer),
)
class RemoteGetClassObjectResponse(NDRCALL):
structure = (
('ORPCthat', ORPCTHAT),
('ppActProperties', PMInterfacePointer),
('ErrorCode', error_status_t),
)
# 3.1.2.5.2.3.3 IRemoteSCMActivator::RemoteCreateInstance (Opnum 4)
class RemoteCreateInstance(NDRCALL):
opnum = 4
structure = (
('ORPCthis', ORPCTHIS),
('pUnkOuter', PMInterfacePointer),
('pActProperties', PMInterfacePointer),
)
class RemoteCreateInstanceResponse(NDRCALL):
structure = (
('ORPCthat', ORPCTHAT),
('ppActProperties', PMInterfacePointer),
('ErrorCode', error_status_t),
)
# 3.1.1.5.6.1.1 IRemUnknown::RemQueryInterface (Opnum 3)
class RemQueryInterface(DCOMCALL):
opnum = 3
structure = (
('ripid', REFIPID),
('cRefs', ULONG),
('cIids', USHORT),
('iids', IID_ARRAY),
)
class RemQueryInterfaceResponse(DCOMANSWER):
structure = (
('ppQIResults', PREMQIRESULT),
('ErrorCode', error_status_t),
)
# 3.1.1.5.6.1.2 IRemUnknown::RemAddRef (Opnum 4 )
class RemAddRef(DCOMCALL):
opnum = 4
structure = (
('cInterfaceRefs', USHORT),
('InterfaceRefs', REMINTERFACEREF_ARRAY),
)
class RemAddRefResponse(DCOMANSWER):
structure = (
('pResults', DWORD_ARRAY),
('ErrorCode', error_status_t),
)
# 3.1.1.5.6.1.3 IRemUnknown::RemRelease (Opnum 5)
class RemRelease(DCOMCALL):
opnum = 5
structure = (
('cInterfaceRefs', USHORT),
('InterfaceRefs', REMINTERFACEREF_ARRAY),
)
class RemReleaseResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
}
################################################################################
# HELPER FUNCTIONS
################################################################################
class DCOMConnection:
"""
This class represents a DCOM Connection. It is in charge of establishing the
DCE connection against the portmap, and then launch a thread that will be
pinging the objects created against the target.
In theory, there should be a single instance of this class for every target
"""
PINGTIMER = None
OID_ADD = {}
OID_DEL = {}
OID_SET = {}
PORTMAPS = {}
def __init__(self, target, username='', password='', domain='', lmhash='', nthash='', aesKey='', TGT=None, TGS=None,
authLevel=RPC_C_AUTHN_LEVEL_PKT_PRIVACY, oxidResolver=False, doKerberos=False, kdcHost=None):
self.__target = target
self.__userName = username
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = aesKey
self.__TGT = TGT
self.__TGS = TGS
self.__authLevel = authLevel
self.__portmap = None
self.__oxidResolver = oxidResolver
self.__doKerberos = doKerberos
self.__kdcHost = kdcHost
self.initConnection()
@classmethod
def addOid(cls, target, oid):
if (target in DCOMConnection.OID_ADD) is False:
DCOMConnection.OID_ADD[target] = set()
DCOMConnection.OID_ADD[target].add(oid)
if (target in DCOMConnection.OID_SET) is False:
DCOMConnection.OID_SET[target] = {}
DCOMConnection.OID_SET[target]['oids'] = set()
DCOMConnection.OID_SET[target]['setid'] = 0
@classmethod
def delOid(cls, target, oid):
if (target in DCOMConnection.OID_DEL) is False:
DCOMConnection.OID_DEL[target] = set()
DCOMConnection.OID_DEL[target].add(oid)
if (target in DCOMConnection.OID_SET) is False:
DCOMConnection.OID_SET[target] = {}
DCOMConnection.OID_SET[target]['oids'] = set()
DCOMConnection.OID_SET[target]['setid'] = 0
@classmethod
def pingServer(cls):
# Here we need to go through all the objects opened and ping them.
# ToDo: locking for avoiding race conditions
#print DCOMConnection.PORTMAPS
#print DCOMConnection.OID_SET
try:
for target in DCOMConnection.OID_SET:
addedOids = set()
deletedOids = set()
if target in DCOMConnection.OID_ADD:
addedOids = DCOMConnection.OID_ADD[target]
del(DCOMConnection.OID_ADD[target])
if target in DCOMConnection.OID_DEL:
deletedOids = DCOMConnection.OID_DEL[target]
del(DCOMConnection.OID_DEL[target])
objExporter = IObjectExporter(DCOMConnection.PORTMAPS[target])
if len(addedOids) > 0 or len(deletedOids) > 0:
if 'setid' in DCOMConnection.OID_SET[target]:
setId = DCOMConnection.OID_SET[target]['setid']
else:
setId = 0
resp = objExporter.ComplexPing(setId, 0, addedOids, deletedOids)
DCOMConnection.OID_SET[target]['oids'] -= deletedOids
DCOMConnection.OID_SET[target]['oids'] |= addedOids
DCOMConnection.OID_SET[target]['setid'] = resp['pSetId']
else:
objExporter.SimplePing(DCOMConnection.OID_SET[target]['setid'])
except Exception as e:
# There might be exceptions when sending packets
# We should try to continue tho.
LOG.error(str(e))
pass
DCOMConnection.PINGTIMER = Timer(120,DCOMConnection.pingServer)
try:
DCOMConnection.PINGTIMER.start()
except Exception as e:
if str(e).find('threads can only be started once') < 0:
raise e
def initTimer(self):
if self.__oxidResolver is True:
if DCOMConnection.PINGTIMER is None:
DCOMConnection.PINGTIMER = Timer(120, DCOMConnection.pingServer)
try:
DCOMConnection.PINGTIMER.start()
except Exception as e:
if str(e).find('threads can only be started once') < 0:
raise e
def initConnection(self):
stringBinding = r'ncacn_ip_tcp:%s' % self.__target
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if hasattr(rpctransport, 'set_credentials') and len(self.__userName) >=0:
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__userName, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, self.__TGT, self.__TGS)
rpctransport.set_kerberos(self.__doKerberos, self.__kdcHost)
self.__portmap = rpctransport.get_dce_rpc()
self.__portmap.set_auth_level(self.__authLevel)
if self.__doKerberos is True:
self.__portmap.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)
self.__portmap.connect()
DCOMConnection.PORTMAPS[self.__target] = self.__portmap
def CoCreateInstanceEx(self, clsid, iid):
scm = IRemoteSCMActivator(self.__portmap)
iInterface = scm.RemoteCreateInstance(clsid, iid)
self.initTimer()
return iInterface
def get_dce_rpc(self):
return DCOMConnection.PORTMAPS[self.__target]
def disconnect(self):
if DCOMConnection.PINGTIMER is not None:
del(DCOMConnection.PORTMAPS[self.__target])
del(DCOMConnection.OID_SET[self.__target])
if len(DCOMConnection.PORTMAPS) == 0:
# This means there are no more clients using this object, kill it
DCOMConnection.PINGTIMER.cancel()
DCOMConnection.PINGTIMER.join()
DCOMConnection.PINGTIMER = None
if self.__target in INTERFACE.CONNECTIONS:
del(INTERFACE.CONNECTIONS[self.__target][current_thread().name])
self.__portmap.disconnect()
#print INTERFACE.CONNECTIONS
class CLASS_INSTANCE:
def __init__(self, ORPCthis, stringBinding):
self.__stringBindings = stringBinding
self.__ORPCthis = ORPCthis
self.__authType = RPC_C_AUTHN_WINNT
self.__authLevel = RPC_C_AUTHN_LEVEL_PKT_PRIVACY
def get_ORPCthis(self):
return self.__ORPCthis
def get_string_bindings(self):
return self.__stringBindings
def get_auth_level(self):
if RPC_C_AUTHN_LEVEL_NONE < self.__authLevel < RPC_C_AUTHN_LEVEL_PKT_PRIVACY:
if self.__authType == RPC_C_AUTHN_WINNT:
return RPC_C_AUTHN_LEVEL_PKT_INTEGRITY
else:
return RPC_C_AUTHN_LEVEL_PKT_PRIVACY
return self.__authLevel
def set_auth_level(self, level):
self.__authLevel = level
def get_auth_type(self):
return self.__authType
def set_auth_type(self, authType):
self.__authType = authType
class INTERFACE:
# class variable holding the transport connections, organized by target IP
CONNECTIONS = {}
def __init__(self, cinstance=None, objRef=None, ipidRemUnknown=None, iPid=None, oxid=None, oid=None, target=None,
interfaceInstance=None):
if interfaceInstance is not None:
self.__target = interfaceInstance.get_target()
self.__iPid = interfaceInstance.get_iPid()
self.__oid = interfaceInstance.get_oid()
self.__oxid = interfaceInstance.get_oxid()
self.__cinstance = interfaceInstance.get_cinstance()
self.__objRef = interfaceInstance.get_objRef()
self.__ipidRemUnknown = interfaceInstance.get_ipidRemUnknown()
else:
if target is None:
raise Exception('No target')
self.__target = target
self.__iPid = iPid
self.__oid = oid
self.__oxid = oxid
self.__cinstance = cinstance
self.__objRef = objRef
self.__ipidRemUnknown = ipidRemUnknown
# We gotta check if we have a container inside our connection list, if not, create
if (self.__target in INTERFACE.CONNECTIONS) is not True:
INTERFACE.CONNECTIONS[self.__target] = {}
INTERFACE.CONNECTIONS[self.__target][current_thread().name] = {}
if objRef is not None:
self.process_interface(objRef)
def process_interface(self, data):
objRefType = OBJREF(data)['flags']
objRef = None
if objRefType == FLAGS_OBJREF_CUSTOM:
objRef = OBJREF_CUSTOM(data)
elif objRefType == FLAGS_OBJREF_HANDLER:
objRef = OBJREF_HANDLER(data)
elif objRefType == FLAGS_OBJREF_STANDARD:
objRef = OBJREF_STANDARD(data)
elif objRefType == FLAGS_OBJREF_EXTENDED:
objRef = OBJREF_EXTENDED(data)
else:
LOG.error("Unknown OBJREF Type! 0x%x" % objRefType)
if objRefType != FLAGS_OBJREF_CUSTOM:
if objRef['std']['flags'] & SORF_NOPING == 0:
DCOMConnection.addOid(self.__target, objRef['std']['oid'])
self.__iPid = objRef['std']['ipid']
self.__oid = objRef['std']['oid']
self.__oxid = objRef['std']['oxid']
if self.__oxid is None:
objRef.dump()
raise Exception('OXID is None')
def get_oxid(self):
return self.__oxid
def set_oxid(self, oxid):
self.__oxid = oxid
def get_oid(self):
return self.__oid
def set_oid(self, oid):
self.__oid = oid
def get_target(self):
return self.__target
def get_iPid(self):
return self.__iPid
def set_iPid(self, iPid):
self.__iPid = iPid
def get_objRef(self):
return self.__objRef
def set_objRef(self, objRef):
self.__objRef = objRef
def get_ipidRemUnknown(self):
return self.__ipidRemUnknown
def get_dce_rpc(self):
return INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['dce']
def get_cinstance(self):
return self.__cinstance
def set_cinstance(self, cinstance):
self.__cinstance = cinstance
def is_fdqn(self):
# I will assume the following
# If I can't socket.inet_aton() then it's not an IPv4 address
# Same for ipv6, but since socket.inet_pton is not available in Windows, I'll look for ':'. There can't be
# an FQDN with ':'
# Is it isn't both, then it is a FDQN
try:
socket.inet_aton(self.__target)
except:
# Not an IPv4
try:
self.__target.index(':')
except:
# Not an IPv6, it's a FDQN
return True
return False
def connect(self, iid = None):
if (self.__target in INTERFACE.CONNECTIONS) is True:
if current_thread().name in INTERFACE.CONNECTIONS[self.__target] and \
(self.__oxid in INTERFACE.CONNECTIONS[self.__target][current_thread().name]) is True:
dce = INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['dce']
currentBinding = INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['currentBinding']
if currentBinding == iid:
# We don't need to alter_ctx
pass
else:
newDce = dce.alter_ctx(iid)
INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['dce'] = newDce
INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['currentBinding'] = iid
else:
stringBindings = self.get_cinstance().get_string_bindings()
# No OXID present, we should create a new connection and store it
stringBinding = None
isTargetFDQN = self.is_fdqn()
LOG.debug('Target system is %s and isFDQN is %s' % (self.get_target(), isTargetFDQN))
for strBinding in stringBindings:
# Here, depending on the get_target() value several things can happen
# 1) it's an IPv4 address
# 2) it's an IPv6 address
# 3) it's a NetBios Name
# we should handle all this cases accordingly
# Does this match exactly what get_target() returns?
LOG.debug('StringBinding: %s' % strBinding['aNetworkAddr'])
if strBinding['wTowerId'] == 7:
# If there's port information, let's strip it for now.
if strBinding['aNetworkAddr'].find('[') >= 0:
binding, _, bindingPort = strBinding['aNetworkAddr'].partition('[')
bindingPort = '[' + bindingPort
else:
binding = strBinding['aNetworkAddr']
bindingPort = ''
if binding.upper().find(self.get_target().upper()) >= 0:
stringBinding = 'ncacn_ip_tcp:' + strBinding['aNetworkAddr'][:-1]
break
# If get_target() is a FQDN, does it match the hostname?
elif isTargetFDQN and binding.upper().find(self.get_target().upper().partition('.')[0]) >= 0:
# Here we replace the aNetworkAddr with self.get_target()
# This is to help resolving the target system name.
# self.get_target() has been resolved already otherwise we wouldn't be here whereas
# aNetworkAddr is usually the NetBIOS name and unless you have your DNS resolver
# with the right suffixes it will probably not resolve right.
stringBinding = 'ncacn_ip_tcp:%s%s' % (self.get_target(), bindingPort)
break
LOG.debug('StringBinding chosen: %s' % stringBinding)
if stringBinding is None:
# Something wen't wrong, let's just report it
raise Exception('Can\'t find a valid stringBinding to connect')
dcomInterface = transport.DCERPCTransportFactory(stringBinding)
if hasattr(dcomInterface, 'set_credentials'):
# This method exists only for selected protocol sequences.
dcomInterface.set_credentials(*DCOMConnection.PORTMAPS[self.__target].get_credentials())
dcomInterface.set_kerberos(DCOMConnection.PORTMAPS[self.__target].get_rpc_transport().get_kerberos(),
DCOMConnection.PORTMAPS[self.__target].get_rpc_transport().get_kdcHost())
dcomInterface.set_connect_timeout(300)
dce = dcomInterface.get_dce_rpc()
if iid is None:
raise Exception('IID is None')
else:
dce.set_auth_level(self.__cinstance.get_auth_level())
dce.set_auth_type(self.__cinstance.get_auth_type())
dce.connect()
if iid is None:
raise Exception('IID is None')
else:
dce.bind(iid)
if self.__oxid is None:
#import traceback
#traceback.print_stack()
raise Exception("OXID NONE, something wrong!!!")
INTERFACE.CONNECTIONS[self.__target][current_thread().name] = {}
INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid] = {}
INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['dce'] = dce
INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['currentBinding'] = iid
else:
# No connection created
raise Exception('No connection created')
def request(self, req, iid = None, uuid = None):
req['ORPCthis'] = self.get_cinstance().get_ORPCthis()
req['ORPCthis']['flags'] = 0
self.connect(iid)
dce = self.get_dce_rpc()
try:
resp = dce.request(req, uuid)
except Exception as e:
if str(e).find('RPC_E_DISCONNECTED') >= 0:
msg = str(e) + '\n'
msg += "DCOM keep-alive pinging it might not be working as expected. You can't be idle for more than 14 minutes!\n"
msg += "You should exit the app and start again\n"
raise DCERPCException(msg)
else:
raise
return resp
def disconnect(self):
return INTERFACE.CONNECTIONS[self.__target][current_thread().name][self.__oxid]['dce'].disconnect()
# 3.1.1.5.6.1 IRemUnknown Methods
class IRemUnknown(INTERFACE):
def __init__(self, interface):
self._iid = IID_IRemUnknown
#INTERFACE.__init__(self, interface.get_cinstance(), interface.get_objRef(), interface.get_ipidRemUnknown(),
# interface.get_iPid(), target=interface.get_target())
INTERFACE.__init__(self, interfaceInstance=interface)
self.set_oxid(interface.get_oxid())
def RemQueryInterface(self, cRefs, iids):
# For now, it only supports a single IID
request = RemQueryInterface()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['ripid'] = self.get_iPid()
request['cRefs'] = cRefs
request['cIids'] = len(iids)
for iid in iids:
_iid = IID()
_iid['Data'] = iid
request['iids'].append(_iid)
resp = self.request(request, IID_IRemUnknown, self.get_ipidRemUnknown())
#resp.dump()
return IRemUnknown2(
INTERFACE(self.get_cinstance(), None, self.get_ipidRemUnknown(), resp['ppQIResults']['std']['ipid'],
oxid=resp['ppQIResults']['std']['oxid'], oid=resp['ppQIResults']['std']['oxid'],
target=self.get_target()))
def RemAddRef(self):
request = RemAddRef()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['cInterfaceRefs'] = 1
element = REMINTERFACEREF()
element['ipid'] = self.get_iPid()
element['cPublicRefs'] = 1
request['InterfaceRefs'].append(element)
resp = self.request(request, IID_IRemUnknown, self.get_ipidRemUnknown())
return resp
def RemRelease(self):
request = RemRelease()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['cInterfaceRefs'] = 1
element = REMINTERFACEREF()
element['ipid'] = self.get_iPid()
element['cPublicRefs'] = 1
request['InterfaceRefs'].append(element)
resp = self.request(request, IID_IRemUnknown, self.get_ipidRemUnknown())
DCOMConnection.delOid(self.get_target(), self.get_oid())
return resp
# 3.1.1.5.7 IRemUnknown2 Interface
class IRemUnknown2(IRemUnknown):
def __init__(self, interface):
IRemUnknown.__init__(self, interface)
self._iid = IID_IRemUnknown2
# 3.1.2.5.1 IObjectExporter Methods
class IObjectExporter:
def __init__(self, dce):
self.__portmap = dce
# 3.1.2.5.1.1 IObjectExporter::ResolveOxid (Opnum 0)
def ResolveOxid(self, pOxid, arRequestedProtseqs):
self.__portmap.connect()
self.__portmap.bind(IID_IObjectExporter)
request = ResolveOxid()
request['pOxid'] = pOxid
request['cRequestedProtseqs'] = len(arRequestedProtseqs)
for protSeq in arRequestedProtseqs:
request['arRequestedProtseqs'].append(protSeq)
resp = self.__portmap.request(request)
Oxids = b''.join(pack('<H', x) for x in resp['ppdsaOxidBindings']['aStringArray'])
strBindings = Oxids[:resp['ppdsaOxidBindings']['wSecurityOffset']*2]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
return stringBindings
# 3.1.2.5.1.2 IObjectExporter::SimplePing (Opnum 1)
def SimplePing(self, setId):
self.__portmap.connect()
self.__portmap.bind(IID_IObjectExporter)
request = SimplePing()
request['pSetId'] = setId
resp = self.__portmap.request(request)
return resp
# 3.1.2.5.1.3 IObjectExporter::ComplexPing (Opnum 2)
def ComplexPing(self, setId = 0, sequenceNum = 0, addToSet = [], delFromSet = []):
self.__portmap.connect()
#self.__portmap.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_INTEGRITY)
self.__portmap.bind(IID_IObjectExporter)
request = ComplexPing()
request['pSetId'] = setId
request['SequenceNum'] = setId
request['cAddToSet'] = len(addToSet)
request['cDelFromSet'] = len(delFromSet)
if len(addToSet) > 0:
for oid in addToSet:
oidn = OID()
oidn['Data'] = oid
request['AddToSet'].append(oidn)
else:
request['AddToSet'] = NULL
if len(delFromSet) > 0:
for oid in delFromSet:
oidn = OID()
oidn['Data'] = oid
request['DelFromSet'].append(oidn)
else:
request['DelFromSet'] = NULL
resp = self.__portmap.request(request)
return resp
# 3.1.2.5.1.4 IObjectExporter::ServerAlive (Opnum 3)
def ServerAlive(self):
self.__portmap.connect()
self.__portmap.bind(IID_IObjectExporter)
request = ServerAlive()
resp = self.__portmap.request(request)
return resp
# 3.1.2.5.1.5 IObjectExporter::ResolveOxid2 (Opnum 4)
def ResolveOxid2(self,pOxid, arRequestedProtseqs):
self.__portmap.connect()
self.__portmap.bind(IID_IObjectExporter)
request = ResolveOxid2()
request['pOxid'] = pOxid
request['cRequestedProtseqs'] = len(arRequestedProtseqs)
for protSeq in arRequestedProtseqs:
request['arRequestedProtseqs'].append(protSeq)
resp = self.__portmap.request(request)
Oxids = b''.join(pack('<H', x) for x in resp['ppdsaOxidBindings']['aStringArray'])
strBindings = Oxids[:resp['ppdsaOxidBindings']['wSecurityOffset']*2]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
return stringBindings
# 3.1.2.5.1.6 IObjectExporter::ServerAlive2 (Opnum 5)
def ServerAlive2(self):
self.__portmap.connect()
self.__portmap.bind(IID_IObjectExporter)
request = ServerAlive2()
resp = self.__portmap.request(request)
Oxids = b''.join(pack('<H', x) for x in resp['ppdsaOrBindings']['aStringArray'])
strBindings = Oxids[:resp['ppdsaOrBindings']['wSecurityOffset']*2]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
return stringBindings
# 3.1.2.5.2.1 IActivation Methods
class IActivation:
def __init__(self, dce):
self.__portmap = dce
# 3.1.2.5.2.3.1 IActivation:: RemoteActivation (Opnum 0)
def RemoteActivation(self, clsId, iid):
# Only supports one interface at a time
self.__portmap.bind(IID_IActivation)
ORPCthis = ORPCTHIS()
ORPCthis['cid'] = generate()
ORPCthis['extensions'] = NULL
ORPCthis['flags'] = 1
request = RemoteActivation()
request['Clsid'] = clsId
request['pwszObjectName'] = NULL
request['pObjectStorage'] = NULL
request['ClientImpLevel'] = 2
request['Mode'] = 0
request['Interfaces'] = 1
_iid = IID()
_iid['Data'] = iid
request['pIIDs'].append(_iid)
request['cRequestedProtseqs'] = 1
request['aRequestedProtseqs'].append(7)
resp = self.__portmap.request(request)
# Now let's parse the answer and build an Interface instance
ipidRemUnknown = resp['pipidRemUnknown']
Oxids = b''.join(pack('<H', x) for x in resp['ppdsaOxidBindings']['aStringArray'])
strBindings = Oxids[:resp['ppdsaOxidBindings']['wSecurityOffset']*2]
securityBindings = Oxids[resp['ppdsaOxidBindings']['wSecurityOffset']*2:]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
done = False
while not done:
if len(securityBindings) < 2:
done = True
elif securityBindings[0:1] == b'\x00' and securityBindings[1:2 ]== b'\x00':
done = True
else:
secBinding = SECURITYBINDING(securityBindings)
securityBindings = securityBindings[len(secBinding):]
classInstance = CLASS_INSTANCE(ORPCthis, stringBindings)
return IRemUnknown2(INTERFACE(classInstance, b''.join(resp['ppInterfaceData'][0]['abData']), ipidRemUnknown,
target=self.__portmap.get_rpc_transport().getRemoteHost()))
# 3.1.2.5.2.2 IRemoteSCMActivator Methods
class IRemoteSCMActivator:
def __init__(self, dce):
self.__portmap = dce
def RemoteGetClassObject(self, clsId, iid):
# iid should be IID_IClassFactory
self.__portmap.bind(IID_IRemoteSCMActivator)
ORPCthis = ORPCTHIS()
ORPCthis['cid'] = generate()
ORPCthis['extensions'] = NULL
ORPCthis['flags'] = 1
request = RemoteGetClassObject()
request['ORPCthis'] = ORPCthis
activationBLOB = ACTIVATION_BLOB()
activationBLOB['CustomHeader']['destCtx'] = 2
activationBLOB['CustomHeader']['pdwReserved'] = NULL
clsid = CLSID()
clsid['Data'] = CLSID_InstantiationInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ActivationContextInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ServerLocationInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ScmRequestInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
properties = b''
# InstantiationInfo
instantiationInfo = InstantiationInfoData()
instantiationInfo['classId'] = clsId
instantiationInfo['cIID'] = 1
_iid = IID()
_iid['Data'] = iid
instantiationInfo['pIID'].append(_iid)
dword = DWORD()
marshaled = instantiationInfo.getData()+instantiationInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
instantiationInfo['thisSize'] = dword['Data']
properties += marshaled + b'\xFA'*pad
# ActivationContextInfoData
activationInfo = ActivationContextInfoData()
activationInfo['pIFDClientCtx'] = NULL
activationInfo['pIFDPrototypeCtx'] = NULL
dword = DWORD()
marshaled = activationInfo.getData()+activationInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += marshaled + b'\xFA'*pad
# ServerLocation
locationInfo = LocationInfoData()
locationInfo['machineName'] = NULL
dword = DWORD()
dword['Data'] = len(locationInfo.getData())
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += locationInfo.getData()+locationInfo.getDataReferents()
# ScmRequestInfo
scmInfo = ScmRequestInfoData()
scmInfo['pdwReserved'] = NULL
#scmInfo['remoteRequest']['ClientImpLevel'] = 2
scmInfo['remoteRequest']['cRequestedProtseqs'] = 1
scmInfo['remoteRequest']['pRequestedProtseqs'].append(7)
dword = DWORD()
marshaled = scmInfo.getData()+scmInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += marshaled + b'\xFA'*pad
activationBLOB['Property'] = properties
objrefcustom = OBJREF_CUSTOM()
objrefcustom['iid'] = IID_IActivationPropertiesIn[:-4]
objrefcustom['clsid'] = CLSID_ActivationPropertiesIn
objrefcustom['pObjectData'] = activationBLOB.getData()
objrefcustom['ObjectReferenceSize'] = len(objrefcustom['pObjectData'])+8
request['pActProperties']['ulCntData'] = len(objrefcustom.getData())
request['pActProperties']['abData'] = list(objrefcustom.getData())
resp = self.__portmap.request(request)
# Now let's parse the answer and build an Interface instance
objRefType = OBJREF(b''.join(resp['ppActProperties']['abData']))['flags']
objRef = None
if objRefType == FLAGS_OBJREF_CUSTOM:
objRef = OBJREF_CUSTOM(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_HANDLER:
objRef = OBJREF_HANDLER(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_STANDARD:
objRef = OBJREF_STANDARD(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_EXTENDED:
objRef = OBJREF_EXTENDED(b''.join(resp['ppActProperties']['abData']))
else:
LOG.error("Unknown OBJREF Type! 0x%x" % objRefType)
activationBlob = ACTIVATION_BLOB(objRef['pObjectData'])
propOutput = activationBlob['Property'][:activationBlob['CustomHeader']['pSizes'][0]['Data']]
scmReply = activationBlob['Property'][
activationBlob['CustomHeader']['pSizes'][0]['Data']:activationBlob['CustomHeader']['pSizes'][0]['Data'] +
activationBlob['CustomHeader']['pSizes'][1]['Data']]
scmr = ScmReplyInfoData()
size = scmr.fromString(scmReply)
# Processing the scmReply
scmr.fromStringReferents(scmReply[size:])
ipidRemUnknown = scmr['remoteReply']['ipidRemUnknown']
Oxids = b''.join(pack('<H', x) for x in scmr['remoteReply']['pdsaOxidBindings']['aStringArray'])
strBindings = Oxids[:scmr['remoteReply']['pdsaOxidBindings']['wSecurityOffset']*2]
securityBindings = Oxids[scmr['remoteReply']['pdsaOxidBindings']['wSecurityOffset']*2:]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
done = False
while not done:
if len(securityBindings) < 2:
done = True
elif securityBindings[0:1] == b'\x00' and securityBindings[1:2] == b'\x00':
done = True
else:
secBinding = SECURITYBINDING(securityBindings)
securityBindings = securityBindings[len(secBinding):]
# Processing the Properties Output
propsOut = PropsOutInfo()
size = propsOut.fromString(propOutput)
propsOut.fromStringReferents(propOutput[size:])
classInstance = CLASS_INSTANCE(ORPCthis, stringBindings)
classInstance.set_auth_level(scmr['remoteReply']['authnHint'])
classInstance.set_auth_type(self.__portmap.get_auth_type())
return IRemUnknown2(INTERFACE(classInstance, b''.join(propsOut['ppIntfData'][0]['abData']), ipidRemUnknown,
target=self.__portmap.get_rpc_transport().getRemoteHost()))
def RemoteCreateInstance(self, clsId, iid):
# Only supports one interface at a time
self.__portmap.bind(IID_IRemoteSCMActivator)
ORPCthis = ORPCTHIS()
ORPCthis['cid'] = generate()
ORPCthis['extensions'] = NULL
ORPCthis['flags'] = 1
request = RemoteCreateInstance()
request['ORPCthis'] = ORPCthis
request['pUnkOuter'] = NULL
activationBLOB = ACTIVATION_BLOB()
activationBLOB['CustomHeader']['destCtx'] = 2
activationBLOB['CustomHeader']['pdwReserved'] = NULL
clsid = CLSID()
clsid['Data'] = CLSID_InstantiationInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ActivationContextInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ServerLocationInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
clsid = CLSID()
clsid['Data'] = CLSID_ScmRequestInfo
activationBLOB['CustomHeader']['pclsid'].append(clsid)
properties = b''
# InstantiationInfo
instantiationInfo = InstantiationInfoData()
instantiationInfo['classId'] = clsId
instantiationInfo['cIID'] = 1
_iid = IID()
_iid['Data'] = iid
instantiationInfo['pIID'].append(_iid)
dword = DWORD()
marshaled = instantiationInfo.getData()+instantiationInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
instantiationInfo['thisSize'] = dword['Data']
properties += marshaled + b'\xFA'*pad
# ActivationContextInfoData
activationInfo = ActivationContextInfoData()
activationInfo['pIFDClientCtx'] = NULL
activationInfo['pIFDPrototypeCtx'] = NULL
dword = DWORD()
marshaled = activationInfo.getData()+activationInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += marshaled + b'\xFA'*pad
# ServerLocation
locationInfo = LocationInfoData()
locationInfo['machineName'] = NULL
dword = DWORD()
dword['Data'] = len(locationInfo.getData())
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += locationInfo.getData()+locationInfo.getDataReferents()
# ScmRequestInfo
scmInfo = ScmRequestInfoData()
scmInfo['pdwReserved'] = NULL
#scmInfo['remoteRequest']['ClientImpLevel'] = 2
scmInfo['remoteRequest']['cRequestedProtseqs'] = 1
scmInfo['remoteRequest']['pRequestedProtseqs'].append(7)
dword = DWORD()
marshaled = scmInfo.getData()+scmInfo.getDataReferents()
pad = (8 - (len(marshaled) % 8)) % 8
dword['Data'] = len(marshaled) + pad
activationBLOB['CustomHeader']['pSizes'].append(dword)
properties += marshaled + b'\xFA'*pad
activationBLOB['Property'] = properties
objrefcustom = OBJREF_CUSTOM()
objrefcustom['iid'] = IID_IActivationPropertiesIn[:-4]
objrefcustom['clsid'] = CLSID_ActivationPropertiesIn
objrefcustom['pObjectData'] = activationBLOB.getData()
objrefcustom['ObjectReferenceSize'] = len(objrefcustom['pObjectData'])+8
request['pActProperties']['ulCntData'] = len(objrefcustom.getData())
request['pActProperties']['abData'] = list(objrefcustom.getData())
resp = self.__portmap.request(request)
# Now let's parse the answer and build an Interface instance
objRefType = OBJREF(b''.join(resp['ppActProperties']['abData']))['flags']
objRef = None
if objRefType == FLAGS_OBJREF_CUSTOM:
objRef = OBJREF_CUSTOM(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_HANDLER:
objRef = OBJREF_HANDLER(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_STANDARD:
objRef = OBJREF_STANDARD(b''.join(resp['ppActProperties']['abData']))
elif objRefType == FLAGS_OBJREF_EXTENDED:
objRef = OBJREF_EXTENDED(b''.join(resp['ppActProperties']['abData']))
else:
LOG.error("Unknown OBJREF Type! 0x%x" % objRefType)
activationBlob = ACTIVATION_BLOB(objRef['pObjectData'])
propOutput = activationBlob['Property'][:activationBlob['CustomHeader']['pSizes'][0]['Data']]
scmReply = activationBlob['Property'][
activationBlob['CustomHeader']['pSizes'][0]['Data']:activationBlob['CustomHeader']['pSizes'][0]['Data'] +
activationBlob['CustomHeader']['pSizes'][1]['Data']]
scmr = ScmReplyInfoData()
size = scmr.fromString(scmReply)
# Processing the scmReply
scmr.fromStringReferents(scmReply[size:])
ipidRemUnknown = scmr['remoteReply']['ipidRemUnknown']
Oxids = b''.join(pack('<H', x) for x in scmr['remoteReply']['pdsaOxidBindings']['aStringArray'])
strBindings = Oxids[:scmr['remoteReply']['pdsaOxidBindings']['wSecurityOffset']*2]
securityBindings = Oxids[scmr['remoteReply']['pdsaOxidBindings']['wSecurityOffset']*2:]
done = False
stringBindings = list()
while not done:
if strBindings[0:1] == b'\x00' and strBindings[1:2] == b'\x00':
done = True
else:
binding = STRINGBINDING(strBindings)
stringBindings.append(binding)
strBindings = strBindings[len(binding):]
done = False
while not done:
if len(securityBindings) < 2:
done = True
elif securityBindings[0:1] == b'\x00' and securityBindings[1:2] == b'\x00':
done = True
else:
secBinding = SECURITYBINDING(securityBindings)
securityBindings = securityBindings[len(secBinding):]
# Processing the Properties Output
propsOut = PropsOutInfo()
size = propsOut.fromString(propOutput)
propsOut.fromStringReferents(propOutput[size:])
classInstance = CLASS_INSTANCE(ORPCthis, stringBindings)
classInstance.set_auth_level(scmr['remoteReply']['authnHint'])
classInstance.set_auth_type(self.__portmap.get_auth_type())
return IRemUnknown2(INTERFACE(classInstance, b''.join(propsOut['ppIntfData'][0]['abData']), ipidRemUnknown,
target=self.__portmap.get_rpc_transport().getRemoteHost()))
|
StarcoderdataPython
|
293446
|
# -*- coding: utf-8 -*-
from collections import Counter
from pathlib import Path
def total_answers( forms ):
total_yes = 0
group_answers = set( )
for form in forms:
if form:
group_answers = group_answers.union( form )
else:
total_yes += len( group_answers )
group_answers.clear( )
continue
return total_yes
def total_all_group_answer( forms ):
total_yes = 0
group_answers = ''
num_group_members = 0
for form in forms:
if form:
num_group_members += 1
group_answers += form
else:
counts = Counter( group_answers )
total_yes += sum( x == num_group_members for x in counts.values( ) )
group_answers = ''
num_group_members = 0
continue
return total_yes
if __name__ == '__main__':
forms = [ x.strip( ) for x in ( Path.cwd( ) / '6_input.txt' ).open( ).readlines( ) ]
if forms[ -1 ]:
forms.append( '' )
# Part 1
print( total_answers( forms ) )
# Part 2
print( total_all_group_answer( forms ) )
|
StarcoderdataPython
|
183987
|
from django.apps import AppConfig
class IndigoConfig(AppConfig):
name = "indigo"
|
StarcoderdataPython
|
5158803
|
<gh_stars>1-10
#!/usr/bin/python3
from control import PidController
class SonicController():
kP = 0.2
kI = 0
kD = 0.01
done = False
def __init__(self, target):
self.target = target
self.pid = PidController(self.kP, self.kI, self.kD, flipSign=True)
def calculate(self, distance):
output = self.pid.calculate(self.target, distance)
if (self.pid.done):
self.done = True
return output
|
StarcoderdataPython
|
1675794
|
"""Unit test package for install_webdrivers."""
|
StarcoderdataPython
|
11296737
|
# Escreva um programa que leia um valor em metros
# e o exiba convertido em centímetros e milímetros.
meters = int(input('Digite a medida em metros: '))
print('Em kilometros: {}km'.format(meters / 100))
print('Em metros: {}m'.format(meters))
print('Em centímetros: {}cm'.format(meters * 100))
print('Em milímetros: {}mm'.format(meters * 1000))
|
StarcoderdataPython
|
3372819
|
<filename>four_sum.py
class Solution(object):
@staticmethod
def kSum(nums, k, start, target):
result = list()
# if nums[start] * k > target or nums[-1] * k < target:
# return result
if k == 2:
left, right = start, len(nums) - 1
while left < right:
if left > start and nums[left - 1] == nums[left]:
left += 1
continue
delta = nums[left] + nums[right] - target
if delta == 0:
result.append([nums[left], nums[right]])
left += 1
right -= 1
elif delta > 0:
right -= 1
else:
left += 1
return result
for i in range(start, len(nums) - k + 1):
if i > start and nums[i] == nums[i - 1]:
continue
sub_result = Solution.kSum(nums, k - 1, i + 1, target - nums[i])
for sub in sub_result:
sub.insert(nums[i], 0)
result.append(sub)
return result
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
return Solution.kSum(nums, 4, 0, target)
|
StarcoderdataPython
|
3207419
|
<gh_stars>0
# services/web/server/main/views.py
try:
from pyspark import SparkContext, SparkConf,SQLContext
from pyspark.sql.functions import to_date,lit,desc,col
from pyspark.sql import Row
from operator import add
from server.main.utils import get_requireddataframe_fromcsv
import sys
except:
print('error')
def create_task(words):
conf = SparkConf().setAppName('letter count')
sc = SparkContext(conf=conf)
seq = words.split()
data = sc.parallelize(seq)
counts = data.map(lambda word: (word, 1)).reduceByKey(add).collect()
sc.stop()
return dict(counts)
def get_recent(spark_dataframe,given_date=None):
result_data_frame = spark_dataframe.filter(to_date(spark_dataframe.dateAdded) == lit(given_date)).orderBy(
spark_dataframe.dateAdded.desc()).limit(1)
return result_data_frame
def get_brand_count(spark_dataframe,given_date=None):
result_data_frame = spark_dataframe.filter(to_date(spark_dataframe.dateAdded) == lit(given_date)).groupBy(spark_dataframe.brand).count().orderBy(
col('count').desc())
return result_data_frame
def get_by_color(spark_dataframe,given_color=None):
result_data_frame = spark_dataframe.filter(spark_dataframe.colors.contains(given_color)).orderBy(
spark_dataframe.dateAdded.desc()).limit(10)
return result_data_frame
def get_result(function,param=None):
pandas_dataframe = get_requireddataframe_fromcsv('Latest_women_shoes.csv', ['id', 'brand', 'colors', 'dateAdded'])
conf = SparkConf().setAppName('Women Catalog')
sc = SparkContext(conf=conf)
# df2 = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('sample.csv')
#used pandas dataframe as using the above the file could not be located.
sqlContext = SQLContext(sc)
spark_dataframe = sqlContext.createDataFrame(pandas_dataframe)
#data=spark_dataframe.select("*").toPandas()
result_spark_dataframe=getattr(sys.modules[__name__], function)(spark_dataframe,param)
result_python_dataframe = result_spark_dataframe.toPandas()
result_dict = result_python_dataframe.to_dict('records')
sc.stop()
return result_dict
"""
def get_brandcount(given_date='2017-03-28'):
pandas_dataframe = get_requireddataframe_fromcsv('Latest_women_shoes.csv', ['id', 'brand', 'colors', 'dateAdded'])
conf = SparkConf().setAppName('Women Catalog')
sc = SparkContext(conf=conf)
# df2 = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('sample.csv')
# used pandas dataframe as using the above the file could not be located.
sqlContext = SQLContext(sc)
spark_dataframe = sqlContext.createDataFrame(pandas_dataframe)
# data=spark_dataframe.select("*").toPandas()
result_python_dataframe = result_spark_dataframe.toPandas()
result_dict = result_python_dataframe.to_dict()
return result_dict
"""
|
StarcoderdataPython
|
9665642
|
<gh_stars>0
# Generated by Django 3.0.5 on 2020-04-14 13:33
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200406_1646'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='date_created',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 14, 13, 33, 54, 666727, tzinfo=utc)),
),
migrations.AlterField(
model_name='collection',
name='figures',
field=models.ManyToManyField(related_name='collections', to='core.Figure'),
),
migrations.AlterField(
model_name='figure',
name='upload_date',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 14, 13, 33, 54, 666325, tzinfo=utc), verbose_name='date uploaded'),
),
]
|
StarcoderdataPython
|
250005
|
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import sys
from time import time
from os import makedirs, path
from psychrnn.backend.regularizations import Regularizer
from psychrnn.backend.loss_functions import LossFunction
from psychrnn.backend.initializations import WeightInitializer, GaussianSpectralRadius
class RNN(object):
def __init__(self, params):
self.params = params
# --------------------------------------------
# Unique name used to determine variable scope
# --------------------------------------------
try:
self.name = params['name']
except KeyError:
print("You must pass a 'name' to RNN")
raise
# ----------------------------------
# Network sizes (tensor dimensions)
# ----------------------------------
try:
N_in = self.N_in = params['N_in']
except KeyError:
print("You must pass 'N_in' to RNN")
raise
try:
N_rec = self.N_rec = params['N_rec']
except KeyError:
print("You must pass 'N_rec' to RNN")
raise
try:
N_out = self.N_out = params['N_out']
except KeyError:
print("You must pass 'N_out' to RNN")
raise
try:
N_steps = self.N_steps = params['N_steps']
except KeyError:
print("You must pass 'N_steps' to RNN")
raise
# ----------------------------------
# Physical parameters
# ----------------------------------
try:
self.dt = params['dt']
except KeyError:
print("You must pass 'dt' to RNN")
raise
try:
self.tau = params['tau']
except KeyError:
print("You must pass 'dt' to RNN")
raise
try:
self.N_batch = params['N_batch']
except KeyError:
print("You must pass 'N_batch' to RNN")
raise
self.alpha = (1.0 * self.dt) / self.tau
self.dale_ratio = params.get('dale_ratio', None)
self.rec_noise = params.get('rec_noise', 0.0)
# ----------------------------------
# Dale's law matrix
# ----------------------------------
dale_vec = np.ones(N_rec)
if self.dale_ratio is not None:
dale_vec[int(self.dale_ratio * N_rec):] = -1
self.dale_rec = np.diag(dale_vec)
dale_vec[int(self.dale_ratio * N_rec):] = 0
self.dale_out = np.diag(dale_vec)
else:
self.dale_rec = np.diag(dale_vec)
self.dale_out = np.diag(dale_vec)
# ----------------------------------
# Trainable features
# ----------------------------------
self.W_in_train = params.get('W_in_train', True)
self.W_rec_train = params.get('W_rec_train', True)
self.W_out_train = params.get('W_out_train', True)
self.b_rec_train = params.get('b_rec_train', True)
self.b_out_train = params.get('b_out_train', True)
self.init_state_train = params.get('init_state_train', True)
# ----------------------------------
# Load weights path
# ----------------------------------
self.load_weights_path = params.get('load_weights_path', None)
# ------------------------------------------------
# Define initializer for TensorFlow variables
# ------------------------------------------------
if self.load_weights_path is not None:
self.initializer = WeightInitializer(load_weights_path=self.load_weights_path)
else:
self.initializer = params.get('initializer',
GaussianSpectralRadius(N_in=N_in,
N_rec=N_rec, N_out=N_out,
autapses=True, spec_rad=1.1))
# --------------------------------------------------
# Tensorflow input/output placeholder initializations
# ---------------------------------------------------
self.x = tf.placeholder("float", [None, N_steps, N_in])
self.y = tf.placeholder("float", [None, N_steps, N_out])
self.output_mask = tf.placeholder("float", [None, N_steps, N_out])
# --------------------------------------------------
# Initialize variables in proper scope
# ---------------------------------------------------
with tf.variable_scope(self.name) as scope:
# ------------------------------------------------
# Trainable variables:
# Initial State, weight matrices and biases
# ------------------------------------------------
self.init_state = tf.get_variable('init_state', [1, N_rec],
initializer=self.initializer.get('init_state'),
trainable=self.init_state_train)
self.init_state = tf.tile(self.init_state, [self.N_batch, 1])
# Input weight matrix:
self.W_in = \
tf.get_variable('W_in', [N_rec, N_in],
initializer=self.initializer.get('W_in'),
trainable=self.W_in_train)
# Recurrent weight matrix:
self.W_rec = \
tf.get_variable(
'W_rec',
[N_rec, N_rec],
initializer=self.initializer.get('W_rec'),
trainable=self.W_rec_train)
# Output weight matrix:
self.W_out = tf.get_variable('W_out', [N_out, N_rec],
initializer=self.initializer.get('W_out'),
trainable=self.W_out_train)
# Recurrent bias:
self.b_rec = tf.get_variable('b_rec', [N_rec], initializer=self.initializer.get('b_rec'),
trainable=self.b_rec_train)
# Output bias:
self.b_out = tf.get_variable('b_out', [N_out], initializer=self.initializer.get('b_out'),
trainable=self.b_out_train)
# ------------------------------------------------
# Non-trainable variables:
# Overall connectivity and Dale's law matrices
# ------------------------------------------------
# Recurrent Dale's law weight matrix:
self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec],
initializer=tf.constant_initializer(self.dale_rec),
trainable=False)
# Output Dale's law weight matrix:
self.Dale_out = tf.get_variable('Dale_out', [N_rec, N_rec],
initializer=tf.constant_initializer(self.dale_out),
trainable=False)
# Connectivity weight matrices:
self.input_connectivity = tf.get_variable('input_connectivity', [N_rec, N_in],
initializer=self.initializer.get('input_connectivity'),
trainable=False)
self.rec_connectivity = tf.get_variable('rec_connectivity', [N_rec, N_rec],
initializer=self.initializer.get('rec_connectivity'),
trainable=False)
self.output_connectivity = tf.get_variable('output_connectivity', [N_out, N_rec],
initializer=self.initializer.get('output_connectivity'),
trainable=False)
# --------------------------------------------------
# Flag to check if variables initialized, model built
# ---------------------------------------------------
self.is_initialized = False
self.is_built = False
def build(self):
# --------------------------------------------------
# Define the predictions
# --------------------------------------------------
self.predictions, self.states = self.forward_pass()
# --------------------------------------------------
# Define the loss (based on the predictions)
# --------------------------------------------------
self.loss = LossFunction(self.params).set_model_loss(self)
# --------------------------------------------------
# Define the regularization
# --------------------------------------------------
self.reg = Regularizer(self.params).set_model_regularization(self)
# --------------------------------------------------
# Define the total regularized loss
# --------------------------------------------------
self.reg_loss = self.loss + self.reg
# --------------------------------------------------
# Open a session
# --------------------------------------------------
self.sess = tf.Session()
# --------------------------------------------------
# Record successful build
# --------------------------------------------------
self.is_built = True
return
def destruct(self):
# --------------------------------------------------
# Close the session. Delete the graph.
# --------------------------------------------------
if self.is_built:
self.sess.close()
tf.reset_default_graph()
return
def recurrent_timestep(self, rnn_in, state):
raise UserWarning("recurrent_timestep must be implemented in child class. See Basic for example.")
def output_timestep(self, state):
raise UserWarning("output_timestep must be implemented in child class. See Basic for example.")
def forward_pass(self):
raise UserWarning("forward_pass must be implemented in child class. See Basic for example.")
def get_weights(self):
if not self.is_initialized or not self.is_built:
raise UserWarning("No weights to return yet -- model has not yet been initialized.")
else:
weights_dict = dict()
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
# avoid saving duplicates
if var.name.endswith(':0') and var.name.startswith(self.name):
name = var.name[len(self.name)+1:-2]
weights_dict.update({name: var.eval(session=self.sess)})
return weights_dict
def save(self, save_path):
weights_dict = self.get_weights()
np.savez(save_path, **weights_dict)
return
def train(self, trial_batch_generator, train_params={}):
if not self.is_built:
raise UserWarning("Must build network before training. Call build() before calling train().")
t0 = time()
# --------------------------------------------------
# Extract params
# --------------------------------------------------
learning_rate = train_params.get('learning_rate', .001)
training_iters = train_params.get('training_iters', 10000)
loss_epoch = train_params.get('loss_epoch', 10)
verbosity = train_params.get('verbosity', True)
save_weights_path = train_params.get('save_weights_path', None)
save_training_weights_epoch = train_params.get('save_training_weights_epoch', 100)
training_weights_path = train_params.get('training_weights_path', None)
generator_function = train_params.get('generator_function', None)
optimizer = train_params.get('optimizer',
tf.train.AdamOptimizer(learning_rate=learning_rate))
clip_grads = train_params.get('clip_grads', True)
# --------------------------------------------------
# Make weights folder if it doesn't already exist.
# --------------------------------------------------
if save_weights_path != None:
if path.dirname(save_weights_path) != "" and not path.exists(path.dirname(save_weights_path)):
makedirs(path.dirname(save_weights_path))
# --------------------------------------------------
# Make train weights folder if it doesn't already exist.
# --------------------------------------------------
if training_weights_path != None:
if path.dirname(training_weights_path) != "" and not path.exists(path.dirname(training_weights_path)):
makedirs(path.dirname(_weights_path))
# --------------------------------------------------
# Compute gradients
# --------------------------------------------------
grads = optimizer.compute_gradients(self.reg_loss)
# --------------------------------------------------
# Clip gradients
# --------------------------------------------------
if clip_grads:
grads = [(tf.clip_by_norm(grad, 1.0), var)
if grad is not None else (grad, var)
for grad, var in grads]
# --------------------------------------------------
# Call the optimizer and initialize variables
# --------------------------------------------------
optimize = optimizer.apply_gradients(grads)
self.sess.run(tf.global_variables_initializer())
self.is_initialized = True
# --------------------------------------------------
# Record training time for performance benchmarks
# --------------------------------------------------
t1 = time()
# --------------------------------------------------
# Training loop
# --------------------------------------------------
epoch = 1
batch_size = next(trial_batch_generator)[0].shape[0]
losses = []
while epoch * batch_size < training_iters:
batch_x, batch_y, output_mask = next(trial_batch_generator)
self.sess.run(optimize, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask})
# --------------------------------------------------
# Output batch loss
# --------------------------------------------------
if epoch % loss_epoch == 0:
reg_loss = self.sess.run(self.reg_loss,
feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask})
losses.append(reg_loss)
if verbosity:
print("Iter " + str(epoch * batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(reg_loss))
# --------------------------------------------------
# Allow for curriculum learning
# --------------------------------------------------
if generator_function is not None:
trial_batch_generator = generator_function(reg_loss, epoch)
# --------------------------------------------------
# Save intermediary weights
# --------------------------------------------------
if epoch % save_training_weights_epoch == 0:
if training_weights_path is not None:
self.save(training_weights_path + str(epoch))
if verbosity:
print("Training weights saved in file: %s" % training_weights_path + str(epoch))
epoch += 1
t2 = time()
if verbosity:
print("Optimization finished!")
# --------------------------------------------------
# Save final weights
# --------------------------------------------------
if save_weights_path is not None:
self.save(save_weights_path)
if verbosity:
print("Model saved in file: %s" % save_weights_path)
# --------------------------------------------------
# Return losses, training time, initialization time
# --------------------------------------------------
return losses, (t2 - t1), (t1 - t0)
def test(self, trial_batch):
if not self.is_built:
raise UserWarning("Must build network before training. Call build() before calling train().")
if not self.is_initialized:
self.sess.run(tf.global_variables_initializer())
# --------------------------------------------------
# Run the forward pass on trial_batch
# --------------------------------------------------
outputs, states = self.sess.run([self.predictions, self.states],
feed_dict={self.x: trial_batch})
return outputs, states
|
StarcoderdataPython
|
8110823
|
<reponame>sebasrp/yaticker
import socket
import textwrap
from tempfile import NamedTemporaryFile
import requests
from matplotlib.image import imread
from PIL import Image, ImageDraw, ImageFont
def is_connected(url="http://www.google.com/", timeout=3):
try:
requests.head(url, timeout=timeout)
return True
except requests.ConnectionError as ex:
print(ex)
return False
def get_ip():
"""
Retrieve primary IP address on the local box
see https://stackoverflow.com/a/28950776/91468
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(("8.8.8.8", 1))
ip = s.getsockname()[0]
except Exception:
ip = "127.0.0.1"
finally:
s.close()
return ip
def get_hostname():
return socket.gethostname()
def number_to_string(number):
"""
Transform a number to a string. If number is smaller than 1000, displays the last 5 significant figures.
:param number: number to convert to string
:return: string representation of the number
"""
number_string = ""
if number > 100:
number_string = format(int(number), ",")
else:
number_string = str(float("%.5g" % number))
return number_string
def get_percentage_diff(current, previous):
if current == previous:
return 0
try:
return (float(current - previous) / previous) * 100.0
except ZeroDivisionError:
return float("inf")
# see https://kavigupta.org/2019/05/18/Setting-the-size-of-figures-in-matplotlib/
def get_size(fig, dpi=100):
with NamedTemporaryFile(suffix=".png") as f:
fig.savefig(f.name, bbox_inches="tight", dpi=dpi)
height, width, _channels = imread(f.name).shape
return width / dpi, height / dpi
def set_size(fig, size, dpi=100, eps=1e-2, give_up=2, min_size_px=10):
target_width, target_height = size
set_width, set_height = target_width, target_height # reasonable starting point
deltas = [] # how far we have
while True:
fig.set_size_inches([set_width, set_height])
actual_width, actual_height = get_size(fig, dpi=dpi)
set_width *= target_width / actual_width
set_height *= target_height / actual_height
deltas.append(
abs(actual_width - target_width) + abs(actual_height - target_height)
)
if deltas[-1] < eps:
return True
if len(deltas) > give_up and sorted(deltas[-give_up:]) == deltas[-give_up:]:
return False
if set_width * dpi < min_size_px or set_height * dpi < min_size_px:
return False
def empty_image(width, height):
"""
Returns an empty canvas/image to draw on
:return:
"""
return Image.new("1", (width, height), 255) # 255: clear the frame
def place_text(
img,
text,
x_offset=0,
y_offset=0,
font_size=40,
font_name="Forum-Regular",
fill=0,
):
"""
Put some text at a location on the image.
"""
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/usr/share/fonts/TTF/DejaVuSans.ttf", font_size)
draw.text((x_offset, y_offset), text, font=font, fill=fill)
def place_centered_text(
img,
text,
x_offset=0,
y_offset=0,
font_size=40,
font_name="Forum-Regular",
fill=0,
):
"""
Put some centered text at a location on the image.
"""
font = ImageFont.truetype("/usr/share/fonts/TTF/DejaVuSans.ttf", font_size)
img_width, img_height = img.size
text_width, _ = font.getsize(text)
text_height = font_size
draw_x = (img_width - text_width) // 2 + x_offset
draw_y = (img_height - text_height) // 2 + y_offset
place_text(img, text, draw_x, draw_y, font_size, font_name, fill)
def place_text_right(
img,
text,
x_offset=0,
y_offset=0,
font_size=40,
font_name="Forum-Regular",
fill=0,
):
font = ImageFont.truetype("/usr/share/fonts/TTF/DejaVuSans.ttf", font_size)
img_width, img_height = img.size
text_width, _ = font.getsize(text)
draw_x = (img_width - text_width) + x_offset
draw_y = y_offset
place_text(img, text, draw_x, draw_y, font_size, font_name, fill)
def write_wrapped_lines(
img,
text,
font_size=16,
y_text=20,
height=15,
width=25,
font_name="Roboto-Light",
):
lines = textwrap.wrap(text, width)
num_lines = 0
for line in lines:
place_centered_text(img, line, 0, y_text, font_size, font_name)
y_text += height
num_lines += 1
return img
|
StarcoderdataPython
|
12807587
|
<reponame>mgrsantox/nmmis<filename>nmmis/contrib/municipal/migrations/0004_auto_20200723_1714.py
# Generated by Django 3.0.8 on 2020-07-23 11:29
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('municipal', '0003_auto_20200723_1712'),
]
operations = [
migrations.AlterField(
model_name='road',
name='geom',
field=django.contrib.gis.db.models.fields.LineStringField(srid=4326),
),
]
|
StarcoderdataPython
|
11228392
|
<gh_stars>0
from tests import *
from naxxatrapy.naxxatrapy import average_velocity
class TestAverageVelocity(unittest.TestCase):
def test_average_velocity(self):
self.assertEqual(average_velocity(), ...)
|
StarcoderdataPython
|
1770251
|
"""
This implements a model of mesolimbic dopamine cell activity during monkey
conditioning as found in `Montague, Dayan, and Sejnowski (1996) in PsyNeuLink
<http://www.jneurosci.org/content/jneuro/16/5/1936.full.pdf>`_
"""
import argparse
import numpy as np
import psyneulink as pnl
all_figures = ['5a', '5b', '5c']
parser = argparse.ArgumentParser()
parser.add_argument('--no-plot', action='store_false', help='Disable plotting', dest='enable_plot')
parser.add_argument('--figure', nargs='+', help='Figure(s) to plot (default=all)', choices=all_figures, default=all_figures)
args = parser.parse_args()
if args.enable_plot:
import matplotlib.pyplot as plt
def build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials):
stimulus_onset = 41
reward_delivery = 54
samples = []
targets = []
for trial in range(120):
target = [0.] * 60
target[reward_delivery] = 1.
if trial in no_reward_trials:
target[reward_delivery] = 0.
targets.append(target)
sample = [0.] * 60
for i in range(stimulus_onset, 60):
sample[i] = 1.
samples.append(sample)
return {sample_mechanism: samples,
target_mechanism: targets}
def build_stimulus_dictionary_figure_5c(sample_mechanism, target_mechanism):
stimulus_onset = 42
reward_delivery = 54
# build input dictionary
samples = []
targets = []
for trial in range(150):
target = [0.] * 60
target[reward_delivery] = 1.
if trial > 70:
target[reward_delivery] = 0.
targets.append(target)
sample = [0.] * 60
for i in range(stimulus_onset, 60):
sample[i] = 1.
samples.append(sample)
return {sample_mechanism: samples,
target_mechanism: targets}
def figure_5a():
"""
This creates the plot for figure 5A in the Montague paper. Figure 5A is
a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).'
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=0.01),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5A'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
no_reward_trials = {14, 29, 44, 59, 74, 89}
inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
# Plot Delta Values form trials 1, 30, and 50
with plt.style.context('seaborn'):
plt.plot(delta_vals[0][0], "-o", label="Trial 1")
plt.plot(delta_vals[29][0], "-s", label="Trial 30")
plt.plot(delta_vals[49][0], "-o", label="Trial 50")
plt.title("Montague et. al. (1996) -- Figure 5A")
plt.xlabel("Timestep")
plt.ylabel("∂")
plt.legend()
plt.xlim(xmin=35)
plt.xticks()
plt.show(block=not pnl._called_from_pytest)
return comp
def figure_5b():
"""
This creates the plot for figure 5B in the Montague paper. Figure 5B shows
the 'entire time course of model responses (trials 1-150).' The setup is
the same as in Figure 5A, except that training begins at trial 10.
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=1.0),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5B'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
no_reward_trials = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 29, 44, 59, 74,
89, 104, 119}
inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
with plt.style.context('seaborn'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_vals, y_vals = np.meshgrid(np.arange(120), np.arange(40, 60, step=1))
d_vals = np.array([d[0][40:60] for d in delta_vals]).transpose()
ax.plot_surface(x_vals, y_vals, d_vals)
ax.set_xlabel("Trial")
ax.set_ylabel("Timestep")
ax.set_zlabel("∂")
ax.set_ylim(y_vals.max(), y_vals.min())
ax.set_xlim(0, 120)
ax.set_zlim(-1, 1)
ax.set_title("Montague et. al. (1996) -- Figure 5B")
plt.show(block=not pnl._called_from_pytest)
return comp
def figure_5c():
"""
This creates the plot for Figure 5C in the Montague paper. Figure 5C shows
'extinction of response to the sensory cue.' The setup is the same as
Figure 5A, except that reward delivery stops at trial 70
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=1.0),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5C'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
inputs = build_stimulus_dictionary_figure_5c(sample_mechanism, target_mechanism)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
with plt.style.context('seaborn'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_vals, y_vals = np.meshgrid(np.arange(150), np.arange(40, 60, step=1))
d_vals = np.array([d[0][40:60] for d in delta_vals]).transpose()
ax.plot_surface(x_vals, y_vals, d_vals)
ax.set_ylim(y_vals.max(), y_vals.min())
ax.set_xlim(0, 140)
ax.set_zlim(-1, 1)
ax.set_xlabel("Trial")
ax.set_ylabel("Timestep")
ax.set_zlabel("∂")
ax.set_title("Montague et. al. (1996) -- Figure 5C")
plt.show(block=not pnl._called_from_pytest)
return comp
if '5a' in args.figure:
comp_5a = figure_5a()
if '5b' in args.figure:
comp_5b = figure_5b()
if '5c' in args.figure:
comp_5c = figure_5c()
|
StarcoderdataPython
|
3501101
|
<filename>setup.py
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.develop import develop
import subprocess
from os.path import join
description = 'A toolkit to work with the Oriented Bounding Boxes annotation ' \
'schema for datasets.'
def build_with_swig(command_subclass):
"""A decorator that builds the extension using SWIG first.
Modifies run by first calling the checks for swig and its build command.
Also takes care of the install.run(self) call.
"""
orig_run = command_subclass.run
def modified_run(self):
print("Installing polyiou extension using SWIG...")
try:
r = subprocess.call("swig", stdout=subprocess.DEVNULL)
if r != 1:
raise EnvironmentError("Make sure that SWIG is properly "
"installed and is in PATH")
except FileNotFoundError:
raise FileNotFoundError("SWIG does not seem to be installed or "
"could not be found in PATH")
polyiou_dir = join("obb_anns","polyiou")
subprocess.call(
["swig", "-c++", "-python", "polyiou.i"],
cwd=polyiou_dir,
stdout=subprocess.DEVNULL
)
orig_run(self)
command_subclass.run = modified_run
return command_subclass
@build_with_swig
class InstallPolyIOU(install):
"""Installs the PolyIOU extension."""
def run(self):
install.run(self)
polyiou_dir = join('obb_anns', 'polyiou')
subprocess.call(
["python3", "setup.py", "build_ext"],
cwd=polyiou_dir,
stdout=subprocess.DEVNULL
)
print("polyiou extension installed!")
@build_with_swig
class DevelopPolyIOU(develop):
"""Installs the PolyIOU extension in place"""
def run(self):
develop.run(self)
polyiou_dir = join('obb_anns', 'polyiou')
subprocess.call(
["python3", "setup.py", "build_ext", "--inplace"],
cwd=polyiou_dir,
stdout=subprocess.DEVNULL
)
print("polyiou extension installed in place!")
def readme():
with open('README.md') as f:
return f.read()
setup(name='obb_anns',
version='0.1a1',
description=description,
long_description=readme(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
# 'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='AI machine learning neural network deep learning object '
'detection oriented bounding box annotations segmentation',
author='<NAME>',
license='BSD 3-Clause License',
packages=['obb_anns'],
install_requires=[
'numpy',
'pillow',
'colorcet',
'pandas',
'tqdm',
'torch'
],
python_requires='>=3',
cmdclass={'install': InstallPolyIOU,
'develop': DevelopPolyIOU,
},
include_package_data=True,
zip_safe=False)
|
StarcoderdataPython
|
4919572
|
# Generated by Django 3.2.2 on 2021-05-11 20:28
import datetime
from django.db import migrations
from django.utils.timezone import utc
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
("jobs", "0006_auto_20210511_2322"),
]
operations = [
migrations.AlterModelOptions(
name="projectprogress",
options={"get_latest_by": "modified"},
),
migrations.AddField(
model_name="projectprogress",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True,
default=datetime.datetime(2021, 5, 11, 20, 28, 44, 810242, tzinfo=utc),
verbose_name="created",
),
preserve_default=False,
),
migrations.AddField(
model_name="projectprogress",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name="modified"),
),
]
|
StarcoderdataPython
|
1739968
|
<filename>rdisq/request/receiver.py<gh_stars>1-10
from typing import *
from rdisq.consts import RECEIVER_SERVICE_NAME
from rdisq.configuration import get_rdisq_config
from rdisq.payload import SessionResult
from rdisq.request.message import RdisqMessage
from rdisq.request.dispatcher import RequestDispatcher
from rdisq.service import RdisqService, remote_method
from rdisq.request.handler import _Handler
class RegisterMessage(RdisqMessage):
def __init__(self, new_message_class: Type[RdisqMessage], new_handler_kwargs: Union[Dict, object] = None):
"""
:param new_message_class: The receiver will start handling messages of this class
:param new_handler_kwargs: A kwargs dictionary for the new handler, or a new handler
"""
self.new_message_class = new_message_class
self.new_handler_instance = new_handler_kwargs
super().__init__()
class UnregisterMessage(RdisqMessage):
def __init__(self, old_message_class: Type[RdisqMessage]):
self.old_message_class = old_message_class
super().__init__()
class GetRegisteredMessages(RdisqMessage):
def __init__(self):
super().__init__()
class RegisterAll(RdisqMessage):
def __init__(self, new_handler_kwargs: Union[Dict, object] = None, handler_class: type = None):
super(RegisterAll, self).__init__()
self.handler_class = handler_class
self.new_handler_kwargs = new_handler_kwargs
class AddQueue(RdisqMessage):
def __init__(self, new_queue_name: str):
self.new_queue_name = new_queue_name
super().__init__()
class RemoveQueue(RdisqMessage):
def __init__(self, old_queue_name: str):
self.old_queue_name = old_queue_name
super(RemoveQueue, self).__init__()
class GetStatus(RdisqMessage):
def __init__(self):
super(GetStatus, self).__init__()
class SetReceiverTags(RdisqMessage):
def __init__(self, new_dict: Dict):
super().__init__()
self.new_dict = new_dict
class ShutDownReceiver(RdisqMessage):
def __init__(self):
super().__init__()
CORE_RECEIVER_MESSAGES = {RegisterMessage, UnregisterMessage, GetRegisteredMessages, AddQueue, RemoveQueue,
RegisterAll, SetReceiverTags, ShutDownReceiver}
class ReceiverService(RdisqService):
service_name = RECEIVER_SERVICE_NAME
redis_dispatcher: RequestDispatcher
_handlers: Dict[Type[RdisqMessage], "_Handler"]
_tags: Dict = None
def __init__(self, uid=None, message_class: Type[RdisqMessage] = None, instance: object = None,
dispatcher: RequestDispatcher = None):
self._tags = {}
self.redis_dispatcher = dispatcher or get_rdisq_config().request_dispatcher
super().__init__(uid)
self._handlers = dict()
for m in CORE_RECEIVER_MESSAGES:
handling_message = RegisterMessage(m, self)
self.register_message(handling_message)
if message_class:
self.register_message(RegisterMessage(message_class, instance))
self._on_process_loop()
@property
def tags(self) -> Dict:
return self._tags.copy()
@tags.setter
def tags(self, value: Dict):
if not isinstance(value, dict):
raise RuntimeError("tags must be a dict")
else:
self._tags = value
@ShutDownReceiver.set_handler
def shut_down_receiver(self, message: ShutDownReceiver = None):
self.stop()
self._on_process_loop()
@AddQueue.set_handler
def add_queue(self, message: AddQueue):
self.register_method_to_queue(self.receive_message, message.new_queue_name)
self._on_process_loop()
return self.listening_queues
@RemoveQueue.set_handler
def remove_queue(self, message: RemoveQueue):
self.unregister_from_queue(message.old_queue_name)
self._on_process_loop()
return self.listening_queues
@GetRegisteredMessages.set_handler
def get_registered_messages(
self, message: GetRegisteredMessages = None) -> Set[Type[RdisqMessage]]:
f"""{message} is present so as not to break uniformity, but isn't used."""
return set(self._handlers.keys())
@RegisterMessage.set_handler
def register_message(self, message: RegisterMessage) -> Set[Type[RdisqMessage]]:
if message.new_message_class in self.get_registered_messages():
raise RuntimeError(
f"Tried registering {message.new_message_class} to {self}."
f"But it's already registered."
)
self.add_queue(AddQueue(message.new_message_class.get_message_class_id()))
self._handlers[message.new_message_class] = get_rdisq_config().handler_factory.create_handler(
message.new_message_class, message.new_handler_instance, self._handlers.values())
self._on_process_loop()
return self.get_registered_messages()
@RegisterAll.set_handler
def register_all(self, message: RegisterAll) -> Set[Type[RdisqMessage]]:
handlers: Dict[Type[RdisqMessage], "_Handler"] = get_rdisq_config().handler_factory.create_handlers_for_object(
message.new_handler_kwargs, message.handler_class)
for message_type, handler in handlers.items():
self.add_queue(AddQueue(message_type.get_message_class_id()))
self._handlers[message_type] = handler
self._on_process_loop()
return self.get_registered_messages()
@UnregisterMessage.set_handler
def unregister_message(self, message: UnregisterMessage):
self.unregister_from_queue(message.old_message_class.get_message_class_id())
self._handlers.pop(message.old_message_class)
self._on_process_loop()
return self.get_registered_messages()
@SetReceiverTags.set_handler
def set_tags(self, message: SetReceiverTags) -> Dict:
self.tags = message.new_dict
self._on_process_loop()
return self.tags
@remote_method
def receive_message(self, message: RdisqMessage):
if type(message) not in self.get_registered_messages():
raise RuntimeError(f"Received an unregistered message {type(message)}")
handler_result = self._handlers[type(message)].handle(message)
if message.session_data is not None:
result = SessionResult(result=handler_result, session_data=message.session_data)
else:
result = handler_result
return result
def _on_process_loop(self):
self.redis_dispatcher.update_receiver_service_status(self)
|
StarcoderdataPython
|
5177671
|
from zope import component
from zope import interface
import importlib
from hashlib import md5
import mellon
from scrapy.linkextractors import LinkExtractor
import scrapy.spiders
try:
from urllib import parse
except ImportError:
import urlparse as parse # Py2
from sparc.configuration import container
from mellon.factories import web_crawler
from mellon.factories.web_crawler.web_crawler.items import WebCrawlerItem
from sparc.logging import logging
logger = logging.getLogger(__name__)
"""
How to connect IMellonFileProviderFactory (which produces IMellonFileProvider
objects) to:
- a crawler process which contains one or more spiders
How to create a CrawlerSpider implementation that can take runtime config
parameters to define unique types (probably using the Python type() call)
How to define and deliver spider settings. Should try to make this native.
Looks like we could easily allow setting definition in YAML and pass it
into type creation at runtime for spider-specific stuff. For project level,
not yet sure how API launch would look for it...might be env variable to
a settings.py file
Figure out how/when to run spiders concurrently vs sequentially (probably
has to do with mellon yaml config and factory definitions in file...
How to define a factory...1-to-1 of spider to factory or many-to-one (prefer the 2nd).
Need way to define spiders where mellon config can deliver all possible
configs (see CrawlerSpider to understand what options are there)
Might want to be able enable use of base spider types defined by Scrapy with
mellon yaml options. Also allow create interfaces and base classes to allow
custom spider definitions...hmm..think about this one.
We may want to have Spiders registered via ISpider marker, then have mellon
pick these up via subscriber lookup <--we'd probably run all spiders in a
single process...might not be the way to go (memory usage?)...we should at
least provide some mechanism to create multiple processes to run spiders.
"""
class MellonSpiderMixin(scrapy.spiders.CrawlSpider):
rules = (scrapy.spiders.Rule(LinkExtractor(
deny_extensions=[],
unique=True),
callback='parse_item'),)
def parse_start_url(self, response):
return self.parse_item(response)
def parse_item(self, response):
item = WebCrawlerItem()
item['response'] = response
return item
def ScrapySimpleMellonWebsiteCrawlerTypeFromConfigFactory(config):
"""Create new types whose base is scrapy.spiders.CrawlSpider
Allows dynamic CrawlSpider types to be created based on inputs. We need
this because scrapy's method of running is to look for runtime types that
inherit from scrapy.spiders.Spider. We have this factory to allow folks
to crawl based on a runtime config, not runtime code.
Args:
config: sparc.configuration.container.ISparcAppPyContainerConfiguration
values of ScrapySimpleTextWebsiteCrawler yaml def.
"""
# get base type information by grabbing first urls entry
type_url_info = parse.urlparse(config['urls'][0])
type_dict = \
{'name': type_url_info.netloc,
#'allowed_domains': [parse.urlparse(url).netloc for url in config['urls']],
'start_urls' : config['urls'],
'custom_settings': {}
}
if 'attributes' in config:
type_dict.update(config['attributes'])
if 'ScrapySettings' in config:
type_dict['custom_settings'] = config['ScrapySettings']
if 'RulesListFactory' in config:
_callable = importlib.import_module(config['RulesListFactory'])
type_dict['rules'] = _callable()
_type_name_md5 = md5()
_type_name_md5.update(config['urls'][0].encode('utf-8'))
return type('ScrapySimpleTextWebsiteCrawler_'\
+_type_name_md5.hexdigest(),
(MellonSpiderMixin,),
type_dict)
"""
We need to dynamically create Spider types based on yaml config information.
In addition, we need to make these type available in 2 areas:
- We need to add them to this module's namespace in order for the Scrapy
CLI app to find them
- We need to add them to the component registry, so the
IMellonFileProviderFactory implementation can find them
Note:
This only works within a Mellon runtime environment (e.g. there is a
component registry with a mellon.IMellonApplication registered utility
"""
Mellon = component.getUtility(mellon.IMellonApplication)
v_iter = component.getUtility(container.ISparcPyDictValueIterator)
for d in v_iter.values(Mellon.get_config(), 'ScrapySimpleTextWebsiteCrawler'):
new_crawler_type = ScrapySimpleMellonWebsiteCrawlerTypeFromConfigFactory(d)
interface.alsoProvides(new_crawler_type, web_crawler.IScrapySpider)
globals()[new_crawler_type.__name__] = new_crawler_type
sm = component.getSiteManager()
sm.registerUtility(new_crawler_type, web_crawler.IScrapySpider, name=new_crawler_type.name) #give components access to app config
logger.info("Registered new spider for ScrapySimpleTextWebsiteCrawler: {}".format(new_crawler_type.name))
|
StarcoderdataPython
|
3571093
|
<reponame>LudditeLabs/autodoc-tool<gh_stars>0
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from autodoc.contentdb import Arg, CompoundType
from autodoc.python.rst.transforms.collect_fields import CollectInfoFields
from autodoc.python.rst.transforms.sync_params import SyncParametersWithSpec
from autodoc.report import Codes
# These param will be loaded by the fixtures (assert_py_doc, parse_py_doc).
docstring_transforms = [CollectInfoFields, SyncParametersWithSpec]
class TestSyncParams:
def test_no_params(self, parse_py_doc):
env = parse_py_doc(
# def test_func(name, type)
args=(Arg('param1', None), Arg('param2', ['int'])),
text="""
This is an ordinary paragraph.
:except RuntimeError:
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('params')
assert section is not None
assert len(section) == 2
param = section[0]
assert param.get('name') == 'param1'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[1]
assert param.get('name') == 'param2'
assert param.get('type') == ['int']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
def test_with_params(self, parse_py_doc):
args = (Arg('with_separate_type', None),
Arg('with_type', ['str']),
Arg('no_type', None),
Arg('new_PARAM', None),
Arg('type_wrong_place', ['str', 'int']))
env = parse_py_doc(
args=args,
text="""
This is an ordinary paragraph.
:parameter:
:type type_wrong_place: xxx
:param no_type: No type.
:param type_wrong_place: Wrong place.
:param str with_type: With type.
:parameter with_separate_type: With separate type.
:type with_separate_type: integer or None
:type with_separate_type: string
:type non_exist: str
:parameter to_remove: This one will be removed.
This is a paragraph after the field list.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('params')
assert section is not None
assert len(section) == 5
param = section[0]
assert param.get('name') == 'with_separate_type'
assert param.get('type') == ['integer or None', 'string']
assert param.get('orig_field_tag') == 'parameter'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'With separate type.'
param = section[1]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'With type.'
param = section[2]
assert param.get('name') == 'no_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'No type.'
param = section[3]
assert param.get('name') == 'new_PARAM'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[4]
assert param.get('name') == 'type_wrong_place'
assert param.get('type') == ['str', 'int']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'Wrong place.'
def test_with_missing_params(self, parse_py_doc):
args = (Arg('with_separate_type', None),
Arg('with_type', ['str']),
Arg('no_type', None),
Arg('new_PARAM', None),
Arg('type_wrong_place', ['str', 'int']))
env = parse_py_doc(
args=args,
text="""
This is an ordinary paragraph.
This is a paragraph after the field list.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('params')
assert section is not None
assert len(section) == 5
param = section[0]
assert param.get('name') == 'with_separate_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[1]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[2]
assert param.get('name') == 'no_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[3]
assert param.get('name') == 'new_PARAM'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
param = section[4]
assert param.get('name') == 'type_wrong_place'
assert param.get('type') == ['str', 'int']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 0
def test_report(self, parse_py_doc):
args = (Arg('with_separate_type', None),
Arg('with_type', ['str']),
Arg('no_type', None),
Arg('new_PARAM', None),
Arg('type_wrong_place', ['str', 'int']))
env = parse_py_doc(
args=args,
text="""
This is an ordinary paragraph.
:param no_type: No type.
:param type_wrong_place: Wrong place.
:type type_wrong_place: xxx
:param str with_type: With type.
:parameter with_separate_type: With separate type.
:type with_separate_type: integer or None
:type with_separate_type: string
:parameter to_remove: This one will be removed.
This is a paragraph after the field list.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 7
for i, item in enumerate(report):
assert len(item) == 8, 'Report at %d.' % i
# Note: analysis proceeds in the order of args,
# so errors will appear in the same order.
path, domain, line, col, ref_name, level, code, msg = report[0]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Parameter order is incorrect [with_separate_type]'
path, domain, line, col, ref_name, level, code, msg = report[1]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Parameter order is incorrect [with_type]'
path, domain, line, col, ref_name, level, code, msg = report[2]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Parameter order is incorrect [no_type]'
path, domain, line, col, ref_name, level, code, msg = report[3]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.MISSING
assert msg == 'Missing parameter [new_PARAM]'
path, domain, line, col, ref_name, level, code, msg = report[4]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.MISMATCH
assert msg == 'Parameter type is different [type_wrong_place]'
path, domain, line, col, ref_name, level, code, msg = report[5]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Parameter order is incorrect [type_wrong_place]'
path, domain, line, col, ref_name, level, code, msg = report[6]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.UNKNOWN
assert msg == 'Unknown parameter [to_remove]'
# Test: Skip first arg for method and class methods
def test_method(self, parse_py_doc):
args = (Arg('self', None), Arg('with_type', ['str']))
env = parse_py_doc(
args=args,
compound_type=CompoundType.CLASS, # Use method instead of function.
text="""
This is an ordinary paragraph.
:param str with_type: With type.
This is a paragraph after the field list.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('params')
assert section is not None
assert len(section) == 1
param = section[0]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
report = env.get('reporter').report
assert isinstance(report, list)
assert not len(report)
|
StarcoderdataPython
|
254439
|
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
""" A patch assessment """
import time
from core.src.bootstrap.Constants import Constants
class PatchAssessor(object):
""" Wrapper class of a single patch assessment """
def __init__(self, env_layer, execution_config, composite_logger, telemetry_writer, status_handler, package_manager, lifecycle_manager):
self.env_layer = env_layer
self.execution_config = execution_config
self.composite_logger = composite_logger
self.telemetry_writer = telemetry_writer
self.status_handler = status_handler
self.lifecycle_manager = lifecycle_manager
self.package_manager = package_manager
def start_assessment(self):
""" Start a patch assessment """
self.status_handler.set_current_operation(Constants.ASSESSMENT)
self.raise_if_agent_incompatible()
self.composite_logger.log('\nStarting patch assessment...')
self.status_handler.set_assessment_substatus_json(status=Constants.STATUS_TRANSITIONING)
self.composite_logger.log("\nMachine Id: " + self.env_layer.platform.node())
self.composite_logger.log("Activity Id: " + self.execution_config.activity_id)
self.composite_logger.log("Operation request time: " + self.execution_config.start_time)
self.composite_logger.log("\n\nGetting available patches...")
self.package_manager.refresh_repo()
self.status_handler.reset_assessment_data()
for i in range(0, Constants.MAX_ASSESSMENT_RETRY_COUNT):
try:
if self.lifecycle_manager is not None:
self.lifecycle_manager.lifecycle_status_check() # may terminate the code abruptly, as designed
packages, package_versions = self.package_manager.get_all_updates()
self.telemetry_writer.write_event("Full assessment: " + str(packages), Constants.TelemetryEventLevel.Verbose)
self.status_handler.set_package_assessment_status(packages, package_versions)
if self.lifecycle_manager is not None:
self.lifecycle_manager.lifecycle_status_check() # may terminate the code abruptly, as designed
sec_packages, sec_package_versions = self.package_manager.get_security_updates()
self.telemetry_writer.write_event("Security assessment: " + str(sec_packages), Constants.TelemetryEventLevel.Verbose)
self.status_handler.set_package_assessment_status(sec_packages, sec_package_versions, "Security")
self.status_handler.set_assessment_substatus_json(status=Constants.STATUS_SUCCESS)
break
except Exception as error:
if i < Constants.MAX_ASSESSMENT_RETRY_COUNT:
error_msg = 'Retryable error retrieving available patches: ' + repr(error)
self.composite_logger.log_warning(error_msg)
self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.DEFAULT_ERROR)
time.sleep(2*(i + 1))
else:
error_msg = 'Error retrieving available patches: ' + repr(error)
self.composite_logger.log_error(error_msg)
self.status_handler.add_error_to_status(error_msg, Constants.PatchOperationErrorCodes.DEFAULT_ERROR)
if Constants.ERROR_ADDED_TO_STATUS not in repr(error):
error.args = (error.args, "[{0}]".format(Constants.ERROR_ADDED_TO_STATUS))
self.status_handler.set_assessment_substatus_json(status=Constants.STATUS_ERROR)
raise
self.composite_logger.log("\nPatch assessment completed.\n")
return True
def raise_if_agent_incompatible(self):
if self.lifecycle_manager.get_vm_cloud_type() == Constants.VMCloudType.ARC and self.execution_config.operation not in [Constants.ASSESSMENT, Constants.INSTALLATION]:
self.composite_logger.log("Skipping agent compatibility check for Arc cloud type when operation is not manual")
return
if not self.telemetry_writer.is_agent_compatible():
error_msg = Constants.TELEMETRY_AT_AGENT_NOT_COMPATIBLE_ERROR_MSG
self.composite_logger.log_error(error_msg)
raise Exception(error_msg)
self.composite_logger.log(Constants.TELEMETRY_AT_AGENT_COMPATIBLE_MSG)
|
StarcoderdataPython
|
6625162
|
<reponame>rqssouza/opencv-gui-parameter-tuner<gh_stars>1-10
#!/bin/env python3
import cv2 as cv
import numpy as np
import argparse
import tuner.tuner as tuner
def mag(gradient_x, gradient_y):
gradient_mag = np.sqrt(np.square(gradient_x) + np.square(gradient_y))
return np.uint8(255 * (gradient_mag / np.max(gradient_mag)))
def ths(img, ths_min, ths_max):
ret = np.zeros_like(img)
ret[(img >= ths_min) & (img <= ths_max)] = 255
return ret
def process(image, args):
adj_k = lambda ksize : ksize + (ksize + 1) % 2
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gradient_x = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 1,
dy = 0,
ksize = adj_k(args.kernel_size),
)
gradient_y = cv.Sobel(
src = gray,
ddepth = cv.CV_64F,
dx = 0,
dy = 1,
ksize = adj_k(args.kernel_size),
)
gradient_mag = ths(mag(gradient_x, gradient_y), args.ths_min, args.ths_max)
return ((1, 1), [gradient_mag])
CFG = [
['kernel_size', 3, 30],
['ths_min', 20, 255],
['ths_max', 100, 255],
]
if __name__ == '__main__':
tuner.Tuner_App(
process,
CFG,
'Gradient Magnitude',
'Tune gradient magnitude parameters',
)
|
StarcoderdataPython
|
6652621
|
<reponame>rogerio-ignacio-developer/formulas-github
#!/usr/bin/python3
import os
from formula import formula_horusec
from formula import formula_superlinter
from formula import formula_dependabot
def run(project_path, workflows, new_branch, new_branch_name):
try:
current_pwd = os.environ.get("CURRENT_PWD")
if project_path == "" or project_path == current_pwd:
os.chdir(current_pwd)
else:
os.chdir(project_path)
if new_branch == "yes":
os.system(f"git checkout -b {new_branch_name}")
createGithubFolder()
createGithubActionsWorkflows(workflows)
if new_branch == "yes":
os.system(f"git add .")
os.system("git commit -m \"Add Github Actions Workflows with Ritchie CLI\"")
os.system(f"git push origin {new_branch_name}")
print("✅ Github Actions workflows successfully added to the project.")
if new_branch == "yes":
print(f"✅ Code successfully added and committed to the {new_branch_name} branch.")
except:
print("❌ Oops, something went wrong. Check the informed inputs first.")
print("⚠️ If the error persists, please, open an ISSUE on the related repository.")
def createGithubFolder():
print("Creating folders...")
if not os.path.exists(".github"):
os.makedirs(".github")
os.chdir('.github')
else:
os.chdir('.github')
if not os.path.exists("workflows"):
os.makedirs("workflows")
os.chdir('workflows')
else:
os.chdir('workflows')
def createGithubActionsWorkflows(workflows):
workflows = workflows.split("|")
loop = 0
for w in workflows:
loop = loop +1
if w == "Horusec":
formula_horusec.run()
elif w == "Super-Linter":
formula_superlinter.run()
|
StarcoderdataPython
|
8057873
|
<reponame>msklarek/CodeWars-Python-Solutions
def digital_root(n):
a = str(n)
suma = 0
while len(a) > 1:
arr = []
for i in range(len(a)):
arr.append(a[i])
suma = 0
for j in range(len(arr)):
suma = suma + int(arr[j])
a = str(suma)
return suma
|
StarcoderdataPython
|
103186
|
import numpy as np
import pandas as pd
from typing import List
from .phantom_class import Phantom
class Beam:
"""A class used to create an X-ray beam and detector.
Attributes
----------
r : np.array
5*3 array, locates the xyz coordinates of the apex and verticies of a
pyramid shaped X-ray beam, where the apex represents the X-ray focus
(row 1) and the vertices where the beam intercepts the X-ray detector
(row 2-5)
ijk : np.array
A matrix containing vertex indices. This is required in order to
plot the beam using plotly Mesh3D. For more info, see "i", "j", and "k"
at https://plot.ly/python/reference/#mesh3d
det_r: np.array
8*3 array, where each row locates the xyz coordinate of one of the 8
corners of the cuboid shaped X-ray detector
det_ijk : np.array
same as ijk, but for plotting the X-ray detector
N : np.array
4*3 array, where each row contains a normal vector to one of the four
faces of the beam.
Methods
-------
check_hit(patient)
Calculates which of the patient phantom's entrance skin cells are hit
by the X-ray beam. For 3D phantoms, skin cells on the beams exit path
are neglected.
"""
def __init__(self, data_norm: pd.DataFrame, event: int = 0,
plot_setup: bool = False) -> None:
"""Initialize the beam and detector for a specific irradiation event.
Parameters
----------
data_norm : pd.DataFrame
Dicom RDSR information from each irradiation event. See
rdsr_normalizer.py for more information.
event : int, optional
Specifies the index of the irradiation event in the procedure
(the default is 0, which is the first event).
plot_setup : bool, optional
If True, the beam angulation info from data_norm is neglected,
and a beam of zero angulation is created insted. This is a
debugging feature used when positioning new phantoms or
implementing currently unsupported venor RDSR files (the default is
False).
"""
# Override beam angulation if plot_setup
if plot_setup:
ap1 = ap2 = ap3 = 0
else:
# Fetch rotation angles of the X-ray tube
# Positioner isocenter primary angle (Ap1)
ap1 = np.deg2rad(data_norm.Ap1[event])
# Positioner isocenter secondary angle (Ap2)
ap2 = np.deg2rad(data_norm.Ap2[event])
# Positioner isocenter detector rotation angle (Ap3)
ap3 = np.deg2rad(data_norm.Ap3[event])
R1 = np.array([[+np.cos(ap1), np.sin(ap1), +0],
[-np.sin(ap1), +np.cos(ap1), +0],
[+0, +0, +1]])
R2 = np.array([[+1, +0, +0],
[+0, +np.cos(ap2), +np.sin(ap2)],
[+0, -np.sin(ap2), +np.cos(ap2)]])
R3 = np.array([[+np.cos(ap3), +0, -np.sin(ap3)],
[+0, +1, +0],
[+np.sin(ap3), +0, +np.cos(ap3)]])
# Locate X-ray source
source = np.array([0, data_norm.DSI[event], 0])
# Create beam-detector interception point for a beam of side length 1
r = np.array([[+0.5, -1.0, +0.5],
[+0.5, -1.0, -0.5],
[-0.5, -1.0, -0.5],
[-0.5, -1.0, +0.5]])
r[:, 0] *= data_norm.FS_long[event] # Longitudinal collimation
r[:, 1] *= data_norm.DID[event] # Set source-detector distance
r[:, 2] *= data_norm.FS_lat[event] # Lateral collimation
r = np.vstack([source, r])
# Rotate beam about ap1, ap2 and ap3
r = np.matmul(np.matmul(R2, R1).T, np.matmul(R3.T, r.T)).T
self.r = r
# Manually create vertex index vector for the X-ray beam
self.ijk = np.column_stack((
[0, 0, 0, 0, 1, 1],
[1, 1, 3, 3, 2, 3],
[2, 4, 2, 4, 3, 4]))
# Create unit vectors from X-ray source to beam verticies
v = ((self.r[1:] - self.r[0, :]).T /
np.linalg.norm(self.r[1:] - self.r[0, :], axis=1)).T
# Create the four normal vectors to the faces of the beam.
self.N = np.vstack([np.cross(v[0, :], v[1, :]),
np.cross(v[1, :], v[2, :]),
np.cross(v[2, :], v[3, :]),
np.cross(v[3, :], v[0, :])])
# Create detector corners for with side length 1
# The first four rows represent the X-ray detector surface, the last
# four are there to give the detector some depth for 3D visualization.
det_r = np.array([[+0.5, -1.0, +0.5],
[+0.5, -1.0, -0.5],
[-0.5, -1.0, -0.5],
[-0.5, -1.0, +0.5],
[+0.5, -1.2, +0.5],
[+0.5, -1.2, -0.5],
[-0.5, -1.2, -0.5],
[-0.5, -1.2, +0.5]])
# Add detector dimensions
detector_width = data_norm.DSL[0]
det_r[:, 0] *= detector_width
det_r[:, 2] *= detector_width
# Place detector at actual distance
det_r[:, 1] *= data_norm.DID[event]
# Rotate detector about ap1, ap2 and ap3
det_r = np.matmul(np.matmul(R2, R1).T, det_r.T).T
self.det_r = det_r
# Manually construct vertex index vector for the X-ray detector
self.det_ijk = np.column_stack((
[0, 0, 4, 4, 0, 1, 0, 3, 3, 7, 1, 1],
[1, 2, 5, 6, 1, 5, 3, 7, 2, 2, 2, 6],
[2, 3, 6, 7, 4, 4, 4, 4, 7, 6, 6, 5]))
def check_hit(self, patient: Phantom) -> List[bool]:
"""Calculate which patient entrance skin cells are hit by the beam.
A description of this algoritm is presented in the wiki, please visit
https://dev.azure.com/Sjukhusfysiker/PySkinDose/_wiki
Parameters
----------
patient : Phantom
Patient phantom, either of type plane, cylinder or human, i.e.
instance of class Phantom
Returns
-------
List[bool]
A boolean list of the same length as the number of patient skin
cells. True for all entrance skin cells that are hit by the beam.
"""
# Create vectors from X-ray source to each phantom skin cell
v = patient.r - self.r[0, :]
# Check which skin cells lies within the beam
hits = (np.dot(v, self.N.T) <= 0).all(axis=1)
# if patient phantom is 3D, remove exit path skin cells
if patient.phantom_model != "plane":
temp1 = v[hits]
temp2 = patient.n[hits]
bool_entrance = [np.dot(temp1[i], temp2[i]) <= 0
for i in range(len(temp1))]
hits[np.where(hits)] = bool_entrance
return hits.tolist()
|
StarcoderdataPython
|
269685
|
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/enums.py
__version__='3.3.0'
__doc__="""
Container for constants. Hardly used!
"""
TA_LEFT = 0
TA_CENTER = 1
TA_RIGHT = 2
TA_JUSTIFY = 4
|
StarcoderdataPython
|
5069586
|
import requests
from bs4 import BeautifulSoup
from menus.menu import Menu
class Edison(Menu):
def __init__(self):
super().__init__()
self.url = 'http://restaurangedison.se/lunch'
self.dow = {
0: 'monday',
1: 'tuesday',
2: 'wednesday',
3: 'thursday',
4: 'friday',
}
def __repr__(self):
return ":bulb: Edison"
def _get_week(self):
"""
Fetches the menu data from the given URL, returns a menu dictionary:
{
'dayofweek 1': ['dish 1', 'dish 2', ..., 'dish N'],
'dayofweek 2': [ ... ]
}
"""
content = requests.get(self.url)
soup = BeautifulSoup(content.text, 'html5lib')
# menu list
for weekday in self.dow.values():
day_menu = soup.find('div', {'id': weekday})
dishes = []
for dish in day_menu.find_all('tr'):
txt = dish.find('td', {'class': 'course_type'}).text.strip()
txt += (
': '
+ dish.find(
'td', {'class': 'course_description'}
).text.strip()
)
dishes.append(txt)
self.menu[weekday] = dishes
return self.menu
|
StarcoderdataPython
|
305677
|
from .Test import add
from .QuitWarning import warning
from .Email import email
|
StarcoderdataPython
|
8035767
|
import numpy as np
from matplotlib import pyplot as plt, colors, cm
from sklearn import decomposition, manifold
def plot_with_pca(X, assigned_cluster_numbers, point_labels, features_count, coeff_labels):
pca = decomposition.PCA(n_components=2)
principal_components = pca.fit_transform(X)
scatter_points(assigned_cluster_numbers, point_labels, principal_components)
plot_coeffients(coeff_labels, features_count, pca)
plt.show()
print_highest_coefficents(coeff_labels, pca)
def print_highest_coefficents(coeff_labels, pca):
pc1 = pca.components_[0]
pc2 = pca.components_[1]
print('Attribute, PC1, PC2:')
coeffs = sorted([((pc1_val, pc2_val), label) for label, pc1_val, pc2_val in zip(coeff_labels, pc1, pc2)],
reverse=True)
for data in coeffs:
print(str(data[1]).ljust(70) + ':' + str(repr(data[0][0])).rjust(25) + str(repr(data[0][1])).rjust(25))
def scatter_points(assigned_cluster_numbers, point_labels, principal_components):
x = np.transpose(principal_components)[0]
y = np.transpose(principal_components)[1]
plt.scatter(x, y, c=assigned_cluster_numbers)
for i, text in enumerate(point_labels):
plt.annotate(text, (x[i], y[i]), ha="center", size=6)
if text=="Poland":
plt.scatter(x[i], y[i], s=100, facecolors='none', edgecolors='r')
def plot_coeffients(coeff_labels, features_count, pca, should_add_labels=False):
coeff = np.transpose(pca.components_[0:features_count, :])
single_feature_values_count = len(coeff_labels) // features_count
# arrows are scaled so that they are visible on a full graph
coeff_scaling_factor = 40
cmap = plt.cm.get_cmap(name='jet')
colors_norm = colors.Normalize(vmin=0, vmax=features_count)
scalar_map = cm.ScalarMappable(norm=colors_norm, cmap=cmap)
for i in range(coeff.shape[0]):
plt.arrow(0, 0, coeff[i, 0] * coeff_scaling_factor, coeff[i, 1] * coeff_scaling_factor,
color=scalar_map.to_rgba(i // single_feature_values_count), alpha=0.5)
if should_add_labels:
plt.text(coeff[i, 0] * coeff_scaling_factor, coeff[i, 1] * coeff_scaling_factor,
coeff_labels[i % len(coeff_labels)], color='b', ha='center', va='center', size=5)
def plot_with_tsne(data, assigned_cluster_numbers, point_labels,perplexity=7, learning_rate=100.0, iterations=20000, should_save=False, filename=''):
tsne = manifold.TSNE(perplexity=perplexity, learning_rate=learning_rate, n_iter=iterations)
# scikit-learn recommends reducing dimensions to about 50 beforehand if there's more of them (e.g. with PCA, so let's do it
X = data.copy()
if X[0].shape[0] > 50 and len(X) > 50:
pca = decomposition.PCA(n_components=50)
pca = pca.fit_transform(X)
results = tsne.fit_transform(X)
scatter_points(assigned_cluster_numbers, point_labels, results)
plt.title("t-SNE; perplexity={0}; learning rate = {1}, number of iterations = {2}".format(str(perplexity), str(learning_rate), str(iterations)))
if should_save:
plt.savefig(filename+'.png')
else:
plt.show()
plt.clf()
|
StarcoderdataPython
|
231769
|
<reponame>ctc316/algorithm-python<gh_stars>0
class Solution:
"""
@param s: A string
@return: the length of last word
"""
def lengthOfLastWord(self, s):
return len(s.strip().split(" ")[-1])
|
StarcoderdataPython
|
11201453
|
from tkinter import *
import sqlite3
from tkinter import messagebox
root = Tk()
root.title("Fantasy Cricket")
root.geometry("680x490")
root.resizable(width=FALSE, height=FALSE)
root.configure(background='#FFFFFF')
# -----------Show Root win in center--------------
def center(win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
#5
def go(event):#==========================================================================================================================
j=lt1.curselection()
jj=j[0]
c.execute("SELECT * FROM DATA")
data = c.fetchall()
TotalP=[]
for i in data:
TotalP.append(i)
global ee1
global ee2
global ee3
global ee4
global ee5
global ee6
global ee7
global pp1
if(sn==1):
u = batP[jj]
ee1=ee1+1
for i in TotalP:
if i[0] == u:
ee6=i[2]
if(int(ee1)<int(5)):
pp1=pp1+ee6
e6.configure(text=pp1)
e1.configure(text=ee1)
lt2.insert(END,u)
batP.remove(u)
lt1.delete(j)
else:
messagebox.showwarning("Team Formation", "Cannot Select More Than 4 Batsmen !")
elif(sn==2):
u = bowP[jj]
ee2=ee2+1
for i in TotalP:
if i[0] == u:
ee6=i[2]
if (int(ee2) < int(4)):
pp1=pp1+ee6
e6.configure(text=pp1)
e2.configure(text=ee2)
lt2.insert(END,u)
bowP.remove(u)
lt1.delete(j)
else:
messagebox.showwarning("Team Formation", "Cannot Select More Than 3 Bowlers !")
elif(sn==3):
u = arP[jj]
ee3=ee3+1
for i in TotalP:
if i[0] == u:
ee6=i[2]
if (int(ee3) < int(4)):
pp1=pp1+ee6
e6.configure(text=pp1)
e3.configure(text=ee3)
arP.remove(u)
lt2.insert(END,u)
lt1.delete(j)
else:
messagebox.showwarning("Team Formation", "Cannot Select More Than 3 Allrounders !")
else:
u = wkP[jj]
ee4=ee4+1
for i in TotalP:
if i[0] == u:
ee6=i[2]
if (int(ee4) < int(2)):
pp1=pp1+ee6
e6.configure(text=pp1)
e4.configure(text=ee4)
wkP.remove(u)
lt2.insert(END,u)
lt1.delete(j)
else:
messagebox.showwarning("Team Formation", "Cannot Select More Than 1 Allrounders !")
v=1000-int(pp1)
if(int(v)<1001 and int(v)>-1):
e5.configure(text=v)
else:
messagebox.showerror("Error", "Insufficient Points Available !\nYou Can not Buy This Player !")
NEW()
#4
def selection():#==========================================================================================================================
global sn
sn = selected.get()
lt1.delete(0, END)
if(sn==1):
for i in batP:
lt1.insert(END, i)
elif(sn==2):
for i in bowP:
lt1.insert(END, i)
elif(sn==3):
for i in arP:
lt1.insert(END, i)
else:
for i in wkP:
lt1.insert(END, i)
#3
def GV():#==========================================================================================================================
global conn
global c
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
global TotalP
global batP
global bowP
global arP
global wkP
global ee1
global ee2
global ee3
global ee4
global ee5
global ee6
global ee7
global pp1
TotalP=[]
batP=[]
bowP=[]
arP=[]
wkP=[]
ee1=0
ee2=0
ee3=0
ee4=0
ee5=0
ee6=1000
ee7=0
pp1=0
c.execute("SELECT * FROM DATA")
data = c.fetchall()
for i in data:
TotalP.append(i)
lt1.delete(0, END)
for i in TotalP:
if i[1]=="BAT":
batP.append(i[0])
for i in TotalP:
if i[1]=="BOW":
bowP.append(i[0])
for i in TotalP:
if i[1]=="AR":
arP.append(i[0])
for i in TotalP:
if i[1]=="WK":
wkP.append(i[0])
#2
def NEW():#==========================================================================================================================
GV()
lt1.delete(0, END)
lt2.delete(0, END)
def tname():
global tn
tn=en.get().upper()
try:
global conn
global c
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
c.execute("CREATE TABLE " + tn + "(player TEXT, Tin INTEGER, ctg TEXT)")
conn.commit()
c.execute("DROP TABLE "+tn+"")
conn.commit()
root1.destroy()
e1.configure(text=0)
e2.configure(text=0)
e3.configure(text=0)
e4.configure(text=0)
e5.configure(text=1000)
e6.configure(text=0)
e7.configure(text="\"" + tn + "\"")
rad1.configure(state='active')
rad2.configure(state='active')
rad3.configure(state='active')
rad4.configure(state='active')
except Exception as e:
rad1.configure(state='disabled')
rad2.configure(state='disabled')
rad3.configure(state='disabled')
rad4.configure(state='disabled')
e1.configure(text="##")
e2.configure(text="##")
e3.configure(text="##")
e4.configure(text="##")
e5.configure(text="####")
e6.configure(text="####")
e7.configure(text="Displayed Here")
lt1.delete(0, END)
lt2.delete(0, END)
root1.destroy()
messagebox.showwarning("Value Error", "Enter Valid Team Name !")
def q():
rad1.configure(state='disabled')
rad2.configure(state='disabled')
rad3.configure(state='disabled')
rad4.configure(state='disabled')
e1.configure(text="##")
e2.configure(text="##")
e3.configure(text="##")
e4.configure(text="##")
e5.configure(text="####")
e6.configure(text="####")
e7.configure(text="Displayed Here")
lt1.delete(0, END)
lt2.delete(0, END)
root1.destroy()
root1 = Tk()
root1.title("New Team")
root1.geometry("380x200")
root1.resizable(width=FALSE, height=FALSE)
Label(root1, text="Enter Team Name: ", font=('Comic Sans MS', 12)).place(x=30, y=40)
en = Entry(root1, font=('arial', 12, "bold"), bg="#F0F0F0", bd=5, highlightthickness=2, highlightcolor="black")
en.focus_set()
en.place(x=180, y=40, width=150)
b1=Button(root1, text="OK", font=('Comic Sans MS', 10), relief=RAISED, bd=3, width=10, command=tname)
b1.place(x=80, y=110)
b2=Button(root1, text="Exit", font=('Comic Sans MS', 10), relief=RAISED, bd=3, width=10, command=q)
b2.place(x=220, y=110)
center(root1)
root1.mainloop()
#6
def OPEN():#==========================================================================================================================
def f2(z):
lt3.delete(0, END)
c.execute("SELECT * FROM TeamData WHERE TeamName = ?", [z])
data = c.fetchall()
data = data[0]
p = list(data)
p.remove(z)
for i in p:
lt3.insert(END, i)
def f1(event):
global x
x = variable.get()
xx = x[0]
f2(xx)
root2 = Tk()
root2.title("OPEN Team")
root2.geometry("400x390")
root2.resizable(width=FALSE, height=FALSE)
center(root2)
b2 = Text(root2, height=4, width=40, bg="#F0F0F0", relief="solid")
b2.place(x=40, y=20)
b2.configure(state='disabled')
Label(root2, text="Select Team Name :", font=('arial', 14, "italic"), bg="#F0F0F0", bd=0, fg='#399B9B').place(x=60,
y=43)
Pname = []
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
c.execute("SELECT TeamName FROM TeamData")
data = c.fetchall()
for i in data:
Pname.append(i)
variable = Variable(root2)
variable.set("\"" + "Click Me" + "\"")
w = OptionMenu(root2, variable, *Pname, command=f1)
w.place(x=250, y=40)
b3 = Text(root2, height=16.5, width=28, bg="white", relief="solid")
b3.place(x=90, y=100)
b3.configure(state='disabled')
lt3 = Listbox(root2, font=('Comic Sans MS', 12), highlightcolor="white", bd=0, width=22, height=11,
fg="#497CFF", selectbackground="#CCFFFF", selectforeground="#497CFF")
lt3.place(x=93, y=103)
#7
def SAVE():#==========================================================================================================================
try:
global conn
global c
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
x=lt2.get(0,END)
if(len(x)==11):
c.execute("INSERT INTO TeamData(TeamName, player1, player2, player3, player4, player5, player6, player7, player8, player9, player10, player11) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(tn, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10]))
conn.commit()
messagebox.showinfo("Team Created", "Best Of Luck.")
else:
messagebox.showerror("Error", "11 Player Required For Cricket Match !")
except Exception as e:
messagebox.showwarning("Error", str(e))
NEW()
#8
def EVALUATE():#==========================================================================================================================
def f4(cc):
global POINT
POINT = 0
runsscored = cc[1]
ballsfaced = cc[2]
fours = cc[3]
sixes = cc[4]
bowled = cc[5]
runsgiven = cc[6]
wickets = cc[7]
catches = cc[8]
stumpings = cc[9]
runouts = cc[10]
## BATTING POINT-------------------------------------------------------
# 1 point for 2 runs scored
POINT += runsscored // 2
# Additional 5 points for half century
if runsscored >= 50:
POINT += 5
# Additional 10 points for century
if runsscored >= 100:
POINT += 10
if ballsfaced > 0:
# strike_rate = runs/balls_faced
strike_rate = (runsscored / ballsfaced) * 100
# 4 points for strike rate>100
if strike_rate > 100:
POINT += 4
# 2 points for strike rate of 80-100
elif strike_rate >= 80:
POINT += 2
# 1 point for hitting a boundary (four)
POINT += fours
# 2 points for over boundary(six)
POINT += 2 * sixes
## BOLWLNG POINT-------------------------------------------------------
# 10 points for each wicket
POINT += (10 * wickets)
# Additional 5 points for three wickets per innings
if wickets >= 3:
POINT += 5
# Additional 10 points for 5 wickets or more in innings
if wickets >= 5:
POINT += 10
if bowled > 0:
economy_rate = (runsgiven / bowled) * 6
# 4 points for economy rate between 3.5 and 4.5
if (economy_rate >= 3.5 and economy_rate <= 4.5):
POINT += 4
# 7 points for economy rate between 2 and 3.5
elif (economy_rate >= 2 and economy_rate < 3.5):
POINT += 7
# 10 points for economy rate less than 2
elif (economy_rate < 2):
POINT += 10
## FIELDING POINT-------------------------------------------------------
# 10 points each for catch/stumping/run out
POINT += 10 * (catches + stumpings + runouts)
xx = lt5.get(0, END)
xx = len(xx)
if (11 > xx):
lt5.insert(END, POINT)
else:
pass
def f3():
v1 = variable1.get()
v1 = v1[0]
v2 = variable2.get()
global conn
global c
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
c.execute("SELECT * from TeamData WHERE TeamName = ?", (v1,))
tp = c.fetchone()
tp = tp[1:]
for td in tp:
c.execute("SELECT * from " + v2 + " WHERE name = ?", (td,))
cc = c.fetchone()
f4(cc)
t = 0
g = lt5.get(0, END)
for i in g:
t += i
e8.configure(text=t)
def f2(event):
e8.configure(text="####")
lt5.delete(0, END)
k = variable1.get()
if (k == "\"Select Team\""):
b.configure(state=DISABLED)
else:
b.configure(state=NORMAL)
def f1(event):
e8.configure(text="####")
x = variable1.get()
global z
global k
z = x[0]
k = variable2.get()
lt4.delete(0, END)
lt5.delete(0, END)
c.execute("SELECT * FROM TeamData WHERE TeamName = ?", [z])
data = c.fetchall()
data = data[0]
p = list(data)
p.remove(z)
for i in p:
lt4.insert(END, i)
if (k == "\"Select Match\""):
b.configure(state=DISABLED)
else:
b.configure(state=NORMAL)
root3 = Tk()
root3.title("EVALUATE Team")
root3.geometry("500x450")
root3.resizable(width=FALSE, height=FALSE)
center(root3)
b2 = Text(root3, height=4, width=57, bg="#F0F0F0", relief="solid")
b2.place(x=20, y=10)
b2.configure(state='disabled')
Label(root3, text="Evaluate the Performance of your Fantasy Team", font=('arial', 12, "italic", "bold"),
bg="#F0F0F0", bd=0, fg='#399B9B').place(x=65, y=15)
Pname = []
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
c.execute("SELECT TeamName FROM TeamData")
data = c.fetchall()
for i in data:
Pname.append(i)
variable1 = Variable(root3)
variable1.set("\"" + "Select Team" + "\"")
w1 = OptionMenu(root3, variable1, *Pname, command=f1)
w1.place(x=80, y=40, width=120)
Game = ["Match1", "Match2", "Match3", "Match4", "Match5"]
variable2 = Variable(root3)
variable2.set("\"" + "Select Match" + "\"")
w2 = OptionMenu(root3, variable2, *Game, command=f2)
w2.place(x=300, y=40, width=120)
b4 = Text(root3, height=17, width=25, bg="white", relief="solid")
b4.place(x=30, y=100)
b4.configure(state='disabled')
b5 = Text(root3, height=17, width=25, bg="white", relief="solid")
b5.place(x=270, y=101)
b5.configure(state='disabled')
lt4 = Listbox(root3, font=('Comic Sans MS', 12), highlightcolor="white", bd=0, width=20, height=11,
fg="#497CFF", selectbackground="#CCFFFF", selectforeground="#497CFF")
lt4.place(x=31, y=101)
lt5 = Listbox(root3, font=('Comic Sans MS', 12), highlightcolor="white", bd=0, width=20, height=11,
fg="#497CFF", selectbackground="#CCFFFF", selectforeground="#497CFF")
lt5.place(x=271, y=102)
Label(root3, text="Total Points: ", font=('arial', 13, "bold"), bg="#F0F0F0", bd=0).place(x=55, y=400)
e8 = Label(root3, text="####", font=('arial', 13, "italic", "bold"), fg='#399B9B')
e8.place(x=155, y=399)
b = Button(root3, text="Calculate Score", bd=3, command=f3, state=DISABLED)
b.place(x=320, y=398)
Pname = []
conn = sqlite3.connect('Cricket.db')
c = conn.cursor()
c.execute("SELECT TeamName FROM TeamData")
data = c.fetchall()
for i in data:
Pname.append(i)
#1
mb= Menubutton (root, text=" Manage Teams ",font=('Comic Sans MS',9),activebackground="#CCCCCC", relief="solid")
mb.place(x=0,y=0)
mb.menu = Menu ( mb, tearoff = 0 )
mb["menu"] = mb.menu
mb.config(background="#CCCCCC")
mb.menu.config(background="#CCCCCC")
mb.menu.add_command(label="NEW Team", command=NEW,font=('Comic Sans MS',9))
mb.menu.add_command(label="OPEN Team", command=OPEN,font=('Comic Sans MS',9))
mb.menu.add_command(label="SAVE Team", command=SAVE,font=('Comic Sans MS',9))
mb.menu.add_command(label="EVALUATE Team", command=EVALUATE,font=('Comic Sans MS',9))
mb.place(x=0,y=0)
b1 = Text(root, height=4, width=72,bg="#F0F0F0", relief="solid")
b1.place(x=45,y=45)
b1.configure(state='disabled')
Label(b1, text="Your Selection",font=('Comic Sans MS',8)).place(x=1,y=0)
l1=Label(b1,text="Batsman (BAT)",font=('Comic Sans MS',9,"bold"))
l2=Label(b1,text="Bowers (BOW)",font=('Comic Sans MS',9,"bold"))
l3=Label(b1,text="Allroundders (AR)",font=('Comic Sans MS',9,"bold"))
l4=Label(b1,text="Wicket-Keeper (WK)",font=('Comic Sans MS',9,"bold"))
l1.place(x=3,y=23)
l2.place(x=138,y=23)
l3.place(x=273,y=23)
l4.place(x=420,y=23)
e1=Label(b1,text="##",font=('arial',10,"italic"),bg="#F0F0F0",bd=0,fg='#399B9B')
e1.place(x=97, y=26,width=15)
e2=Label(b1,text="##",font=('arial',10,"italic"),bg="#F0F0F0",bd=0,fg='#399B9B')
e2.place(x=230, y=26,width=15)
e3=Label(b1,text="##",font=('arial',10,"italic"),bg="#F0F0F0",bd=0,fg='#399B9B')
e3.place(x=382, y=26,width=15)
e4=Label(b1,text="##",font=('arial',10,"italic"),bg="#F0F0F0",bd=0,fg='#399B9B')
e4.place(x=546, y=26,width=15)
Label(root,text="Point Available",bg="white",font=('Comic Sans MS',9,"bold")).place(x=120,y=120)
Label(root,text="Point Used",bg="white",font=('Comic Sans MS',9,"bold")).place(x=430,y=120)
e5=Label(root,text="####",bg="white",font=('arial',10,"italic","bold"), fg='#399B9B')
e5.place(x=212, y=120)
e6=Label(root,text="####",bg="white",font=('arial',10,"italic","bold"),fg='#399B9B')
e6.place(x=500, y=120)
b1 = Text(root, height=20, width=28, bg="white", relief="solid")
b1.place(x=80, y=150)
b1.configure(state='disabled')
b2 = Text(root, height=20, width=28, bg="white", relief="solid")
b2.place(x=380, y=150)
b2.configure(state='disabled')
Label(root,text=">",bg="white", font=('Comic Sans MS',20)).place(x=335,y=245)
Label(root,text="Team Name: ", bg="white", font=('Comic Sans MS',10)).place(x=405,y=155)
e7=Label(root,text="Displayed Here", font=('arial',10,"bold"),bg="white", fg='#399B9B', anchor="w")
e7.place(x=485, y=156,width=100)
selected = IntVar()
rad1 = Radiobutton(root, text='BAT', value=1, bg="white",activebackground="white", font=('Comic Sans MS',9), variable=selected,command=selection)
rad2 = Radiobutton(root, text='BOW', value=2, bg="white",activebackground="white", font=('Comic Sans MS',9), variable=selected,command=selection)
rad3 = Radiobutton(root, text='AR', value=3, bg="white",activebackground="white", font=('Comic Sans MS',9), variable=selected,command=selection)
rad4 = Radiobutton(root, text='WK', value=4, bg="white",activebackground="white", font=('Comic Sans MS',9), variable=selected,command=selection)
rad1.place(x=85, y=155)
rad2.place(x=143, y=155)
rad3.place(x=207, y=155)
rad4.place(x=256, y=155)
rad1.configure(state='disabled')
rad2.configure(state='disabled')
rad3.configure(state='disabled')
rad4.configure(state='disabled')
lt1 = Listbox(root, font=('Comic Sans MS', 12), highlightcolor="white", bd=0, width=22, height=11,
fg="#497CFF", selectbackground="#CCFFFF", selectforeground="#497CFF")
lt1.bind("<Double-1>", go)
lt1.place(x=83, y=199)
lt2 = Listbox(root, font=('Comic Sans MS',12), highlightcolor="white", bd=0, width=22, height=11,
fg="#497CFF", selectbackground="#CCFFFF",selectforeground="#497CFF")
lt2.place(x=385, y=199)
GV()
#0
center(root)
root.mainloop()
|
StarcoderdataPython
|
5042706
|
# This initializes the problem class for SWE
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from parameters import Nx, Ny, Lx, Ly
from parameters import rho, grav, dt, dx, dy, ft
from parameters import K
from parameters import plot_viz, num_steps_per_plot, num_samples, num_train
# Common functions for spatial discretizations
def state_reconstruction(q,Nx,Ny):
# Weno5
pad = 3
qtemp = periodic_bc(q,pad)
# Smoothness indicators in x
beta_0 = 13.0/12.0*(qtemp[pad-2:pad+Nx-2,:]-2.0*qtemp[pad-1:pad+Nx-1,:]+qtemp[pad:Nx+pad,:])**2 \
+ 1.0/4.0*(qtemp[pad-2:pad+Nx-2,:]-4.0*qtemp[pad-1:pad+Nx-1,:]+3.0*qtemp[pad:Nx+pad,:])**2
beta_1 = 13.0/12.0*(qtemp[pad-1:pad+Nx-1,:]-2.0*qtemp[pad:pad+Nx,:]+qtemp[pad+1:Nx+pad+1,:])**2 \
+ 1.0/4.0*(qtemp[pad-1:pad+Nx-1,:]-qtemp[pad+1:pad+Nx+1,:])**2
beta_2 = 13.0/12.0*(qtemp[pad:pad+Nx,:]-2.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2 \
+ 1.0/4.0*(3.0*qtemp[pad:pad+Nx,:]-4.0*qtemp[pad+1:pad+Nx+1,:]+qtemp[pad+2:Nx+pad+2,:])**2
# nonlinear weights in x
alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2)
alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2)
alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2)
# Find nonlinear weights
w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0
w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0
w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0
# Find state reconstructions in x - wave to right (at i+1/2)
qxright = w_0*(2.0*qtemp[pad-2:pad+Nx-2,:]-7.0*qtemp[pad-1:pad+Nx-1,:]+11.0*qtemp[pad:pad+Nx,:]) \
+ w_1*(-qtemp[pad-1:pad+Nx-1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad+1:pad+Nx+1,:]) \
+ w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad+1:pad+Nx+1,:]-qtemp[pad+2:pad+Nx+2,:])
# Find state reconstructions in x - wave to left (at i+1/2)
qxleft = w_0*(2.0*qtemp[pad+2:pad+Nx+2,:]-7.0*qtemp[pad+1:pad+Nx+1,:]+11.0*qtemp[pad:pad+Nx,:]) \
+ w_1*(-qtemp[pad+1:pad+Nx+1,:]+5.0*qtemp[pad:pad+Nx,:]+2.0*qtemp[pad-1:pad+Nx-1,:]) \
+ w_2*(2.0*qtemp[pad:pad+Nx,:]+5.0*qtemp[pad-1:pad+Nx-1,:]-qtemp[pad-2:pad+Nx-2,:])
qxleft = qxleft[:,pad:pad+Ny]
qxright = qxright[:,pad:pad+Ny]
# Smoothness indicators in y
beta_0 = 13.0/12.0*(qtemp[:,pad-2:pad+Ny-2]-2.0*qtemp[:,pad-1:pad+Ny-1]+qtemp[:,pad:Ny+pad])**2 \
+ 1.0/4.0*(qtemp[:,pad-2:pad+Ny-2]-4.0*qtemp[:,pad-1:pad+Ny-1]+3.0*qtemp[:,pad:Ny+pad])**2
beta_1 = 13.0/12.0*(qtemp[:,pad-1:pad+Ny-1]-2.0*qtemp[:,pad:pad+Ny]+qtemp[:,pad+1:Ny+pad+1])**2 \
+ 1.0/4.0*(qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad+1:pad+Ny+1])**2
beta_2 = 13.0/12.0*(qtemp[:,pad:pad+Ny]-2.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2 \
+ 1.0/4.0*(3.0*qtemp[:,pad:pad+Ny]-4.0*qtemp[:,pad+1:pad+Ny+1]+qtemp[:,pad+2:Ny+pad+2])**2
# nonlinear weights in x
alpha_0 = (1.0/10.0)/((beta_0+1.0e-6)**2)
alpha_1 = (6.0/10.0)/((beta_1+1.0e-6)**2)
alpha_2 = (3.0/10.0)/((beta_2+1.0e-6)**2)
# Find nonlinear weights
w_0 = (alpha_0/(alpha_0+alpha_1+alpha_2))/6.0
w_1 = (alpha_1/(alpha_0+alpha_1+alpha_2))/6.0
w_2 = (alpha_2/(alpha_0+alpha_1+alpha_2))/6.0
# Find state reconstructions in y - qright (at i+1/2)
qyright = w_0*(2.0*qtemp[:,pad-2:pad+Ny-2]-7.0*qtemp[:,pad-1:pad+Ny-1]+11.0*qtemp[:,pad:pad+Ny]) \
+ w_1*(-qtemp[:,pad-1:pad+Ny-1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad+1:pad+Ny+1]) \
+ w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad+1:pad+Ny+1]-qtemp[:,pad+2:pad+Ny+2])
# Find state reconstructions in y - wave to left (at i+1/2)
qyleft = w_0*(2.0*qtemp[:,pad+2:pad+Ny+2]-7.0*qtemp[:,pad+1:pad+Ny+1]+11.0*qtemp[:,pad:pad+Ny]) \
+ w_1*(-qtemp[:,pad+1:pad+Ny+1]+5.0*qtemp[:,pad:pad+Ny]+2.0*qtemp[:,pad-1:pad+Ny-1]) \
+ w_2*(2.0*qtemp[:,pad:pad+Ny]+5.0*qtemp[:,pad-1:pad+Ny-1]-qtemp[:,pad-2:pad+Ny-2])
qyleft = qyleft[pad:pad+Nx,:]
qyright = qyright[pad:pad+Nx,:]
return qxleft, qxright, qyleft, qyright
def reimann_solve(spec_rad,fl,fr,ql,qr,dim):
# Rusanov reimann solver
pad = 3
srt = periodic_bc(spec_rad,pad)
if dim == 'x':
srt = np.maximum.reduce([srt[pad-3:Nx+pad-3,pad:Ny+pad],srt[pad-2:Nx+pad-2,pad:Ny+pad],srt[pad-1:Nx+pad-1,pad:Ny+pad],\
srt[pad:Nx+pad,pad:Ny+pad],srt[pad+1:Nx+pad+1,pad:Ny+pad],srt[pad+2:Nx+pad+2,pad:Ny+pad],srt[pad+3:Nx+pad+3,pad:Ny+pad]])
flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql)
return flux
else:
srt = np.maximum.reduce([srt[pad:Nx+pad,pad-3:Ny+pad-3],srt[pad:Nx+pad,pad-2:Ny+pad-2],srt[pad:Nx+pad,pad-1:Ny+pad-1],\
srt[pad:Nx+pad,pad:Ny+pad],srt[pad:Nx+pad,pad+1:Ny+pad+1],srt[pad:Nx+pad,pad+2:Ny+pad+2],srt[pad:Nx+pad,pad+3:Ny+pad+3]])
flux = 0.5*(fr+fl) + 0.5*srt*(qr+ql)
return flux
def periodic_bc(q,pad):
qtemp = np.zeros(shape=(q.shape[0]+2*pad,q.shape[1]+2*pad),dtype='double')
# Periodicity updates
qtemp[pad:Nx+pad,pad:Ny+pad] = q[:,:]
# x direction periodicity
qtemp[0:pad,:] = qtemp[Nx-pad:Nx,:]
qtemp[Nx+pad:,:] = qtemp[pad:2*pad,:]
# y direction periodicity
qtemp[:,0:pad] = qtemp[:,Ny-pad:Ny]
qtemp[:,Ny+pad:] = qtemp[:,pad:2*pad]
return qtemp
def spectral_radius(q1,q2):
sound_speed = 2.0*np.sqrt(q1/rho*grav)
u = q2/q1
return np.maximum.reduce([np.abs(u+sound_speed),np.abs(u-sound_speed),\
np.abs(sound_speed)])
def flux_reconstruction(q1,q2,q3):
spec_rad_x = spectral_radius(q1,q2)
spec_rad_y = spectral_radius(q1,q3)
q1xleft, q1xright, q1yleft, q1yright = state_reconstruction(q1,Nx,Ny)
q2xleft, q2xright, q2yleft, q2yright = state_reconstruction(q2,Nx,Ny)
q3xleft, q3xright, q3yleft, q3yright = state_reconstruction(q3,Nx,Ny)
# Reconstructing fluxes for q1
f1xleft = np.copy(q2xleft)
f1xright = np.copy(q2xright)
f1x = reimann_solve(spec_rad_x,f1xleft,f1xright,q1xleft,q1xright,'x')
f1yleft = np.copy(q3yleft)
f1yright = np.copy(q3yright)
f1y = reimann_solve(spec_rad_y,f1yleft,f1yright,q1yleft,q1yright,'y')
# Reconstructing fluxes for q2
f2xleft = (q2xleft**2)/(q1xleft) + 0.5*(q1xleft**2)*(grav/rho)
f2xright = (q2xright**2)/(q1xright) + 0.5*(q1xright**2)*(grav/rho)
f2x = reimann_solve(spec_rad_x,f1xleft,f2xright,q2xleft,q2xright,'x')
f2yleft = (q2yleft*q3yleft/q1yleft)
f2yright = (q2yright*q3yright/q1yright)
f2y = reimann_solve(spec_rad_y,f2yleft,f2yright,q2yleft,q2yright,'y')
# Reconstructing fluxes for q3
f3xleft = (q2xleft*q3xleft/q1xleft)
f3xright = (q2xright*q3xright/q1xright)
f3x = reimann_solve(spec_rad_x,f3xleft,f3xright,q3xleft,q3xright,'x')
f3yleft = (q3yleft**2)/(q1yleft) + 0.5*(q1yleft**2)*(grav/rho)
f3yright = (q3yright**2)/(q1yright) + 0.5*(q1yright**2)*(grav/rho)
f3y = reimann_solve(spec_rad_y,f3yleft,f3yright,q3yleft,q3yright,'y')
return f1x, f1y, f2x, f2y, f3x, f3y
# Plotting functions
def plot_coefficients(Ytilde):
fig,ax = plt.subplots(nrows=1,ncols=4)
ax[0].plot(Ytilde[0,:],label='Mode 1')
ax[1].plot(Ytilde[1,:],label='Mode 2')
ax[2].plot(Ytilde[2,:],label='Mode 3')
ax[3].plot(Ytilde[3,:],label='Mode 4')
plt.legend()
plt.show()
def plot_fields_debug(X,Y,q,label,iter):
fig = plt.figure(figsize = (11, 7))
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, q, rstride = 1, cstride = 1,
cmap = plt.cm.jet, linewidth = 0, antialiased = True)
ax.set_title('Visualization', fontname = "serif", fontsize = 17)
ax.set_xlabel("x [m]", fontname = "serif", fontsize = 16)
ax.set_ylabel("y [m]", fontname = "serif", fontsize = 16)
if label == 'q1':
ax.set_zlim((0,2))
elif label == 'q2':
ax.set_zlim((-1,1))
else:
ax.set_zlim((-1,1))
plt.savefig(label+'_'+str(iter)+'.png')
# Shallow water equations class
class shallow_water(object):
"""docstring for ClassName"""
def __init__(self,args=[0,0]):
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
x = np.linspace(-self.Lx/2, self.Lx/2, self.Nx) # Array with x-points
y = np.linspace(-self.Ly/2, self.Ly/2, self.Ny) # Array with y-points
# Meshgrid for plotting
self.X, self.Y = np.meshgrid(x, y)
# Initialize fields
self.initialize(args)
# Field storage for viz
self.q_list = []
# Plot interval
self.plot_interval = num_steps_per_plot
# Field storage for ROM
self.snapshots_pod = [] # at plot interval
def initialize(self,args=[0,0]):
loc_x = args[0]
loc_y = args[1]
# There are three conserved quantities - initialize
self.q1 = 1.0+(rho*np.exp(-((self.X-loc_x)**2/(2*(0.05)**2) + (self.Y-loc_y)**2/(2*(0.05)**2))))
self.q2 = np.zeros(shape=(self.Nx,self.Ny),dtype='double')
self.q3 = np.zeros(shape=(self.Nx,self.Ny),dtype='double')
def right_hand_side(self,q1,q2,q3):
f1x, f1y, f2x, f2y, f3x, f3y = flux_reconstruction(q1,q2,q3) # these are all i+1/2
# Periodicity
pad = 1
f1xtemp = periodic_bc(f1x,pad)
f1ytemp = periodic_bc(f1y,pad)
f2xtemp = periodic_bc(f2x,pad)
f2ytemp = periodic_bc(f2y,pad)
f3xtemp = periodic_bc(f3x,pad)
f3ytemp = periodic_bc(f3y,pad)
r1 = 1.0/dx*(f1xtemp[pad:Nx+pad,pad:Ny+pad]-f1xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f1ytemp[pad:Nx+pad,pad:Ny+pad]-f1ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r2 = 1.0/dx*(f2xtemp[pad:Nx+pad,pad:Ny+pad]-f2xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f2ytemp[pad:Nx+pad,pad:Ny+pad]-f2ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r3 = 1.0/dx*(f3xtemp[pad:Nx+pad,pad:Ny+pad]-f3xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f3ytemp[pad:Nx+pad,pad:Ny+pad]-f3ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
return -r1, -r2, -r3
def integrate_rk(self):
# Equally spaced time integration
q1temp = np.copy(self.q1)
q2temp = np.copy(self.q2)
q3temp = np.copy(self.q3)
r1_k1, r2_k1, r3_k1 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
q1temp[:,:] = self.q1[:,:] + dt*(r1_k1[:,:])
q2temp[:,:] = self.q2[:,:] + dt*(r2_k1[:,:])
q3temp[:,:] = self.q3[:,:] + dt*(r3_k1[:,:])
r1_k2, r2_k2, r3_k2 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
q1temp[:,:] = self.q1[:,:] + 0.125*dt*r1_k1[:,:] + 0.125*dt*r1_k2[:,:]
q2temp[:,:] = self.q2[:,:] + 0.125*dt*r2_k1[:,:] + 0.125*dt*r2_k2[:,:]
q3temp[:,:] = self.q3[:,:] + 0.125*dt*r3_k1[:,:] + 0.125*dt*r3_k2[:,:]
r1_k3, r2_k3, r3_k3 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
self.q1[:,:] = self.q1[:,:] + (1.0/6.0)*dt*r1_k1[:,:] + (1.0/6.0)*dt*r1_k2[:,:] + (2.0/3.0)*dt*r1_k3[:,:]
self.q2[:,:] = self.q2[:,:] + (1.0/6.0)*dt*r2_k1[:,:] + (1.0/6.0)*dt*r2_k2[:,:] + (2.0/3.0)*dt*r2_k3[:,:]
self.q3[:,:] = self.q3[:,:] + (1.0/6.0)*dt*r3_k1[:,:] + (1.0/6.0)*dt*r3_k2[:,:] + (2.0/3.0)*dt*r3_k3[:,:]
def solve(self):
self.t = 0
plot_iter = 0
save_iter = 0
# Save initial conditions
flattened_data = np.concatenate((self.q1.flatten(),self.q2.flatten(),self.q3.flatten()),axis=0)
self.snapshots_pod.append(flattened_data)
while self.t < ft:
print('Time is:',self.t)
self.t = self.t + dt
self.integrate_rk()
if plot_iter == self.plot_interval:
# Save snapshots
flattened_data = np.concatenate((self.q1.flatten(),self.q2.flatten(),self.q3.flatten()),axis=0)
self.snapshots_pod.append(flattened_data)
if plot_viz:
plot_fields_debug(self.X,self.Y,self.q1,'q1',save_iter)
plot_iter = 0
save_iter = save_iter + 1
plot_iter = plot_iter + 1
print('Solution finished')
class shallow_water_rom(object):
def __init__(self,snapshot_matrix_pod,snapshot_matrix_test):
"""
K - number of POD DOF for GP
snapshot_matrix_pod - At snapshot location
"""
self.K = K
self.q1_snapshot_matrix_pod = snapshot_matrix_pod[:Nx*Ny,:]
self.q2_snapshot_matrix_pod = snapshot_matrix_pod[Nx*Ny:2*Nx*Ny,:]
self.q3_snapshot_matrix_pod = snapshot_matrix_pod[2*Nx*Ny:,:]
self.q1_snapshot_matrix_test = snapshot_matrix_test[:Nx*Ny,:]
self.q2_snapshot_matrix_test = snapshot_matrix_test[Nx*Ny:2*Nx*Ny,:]
self.q3_snapshot_matrix_test = snapshot_matrix_test[2*Nx*Ny:,:]
# Plot interval
self.plot_interval = num_steps_per_plot
# Plot related
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
x = np.linspace(-self.Lx/2, self.Lx/2, self.Nx) # Array with x-points
y = np.linspace(-self.Ly/2, self.Ly/2, self.Ny) # Array with y-points
# Meshgrid for plotting
self.X, self.Y = np.meshgrid(x, y)
def method_of_snapshots(self,snapshot_matrix_pod,snapshot_matrix_test):
"""
Read snapshot_matrix (field or nonlinear term) and compute the POD bases and coefficients
snapshot_matrix_pod - N x S - where N is DOF, S snapshots
V - truncated POD basis matrix - shape: NxK - K is truncation number
Ytilde - shape: KxS - POD basis coefficients for train data
Ytilde_test - shape: KxS - POD basis coefficients for test data
"""
new_mat = np.matmul(np.transpose(snapshot_matrix_pod),snapshot_matrix_pod)
w,v = np.linalg.eig(new_mat)
# Bases
V = np.real(np.matmul(snapshot_matrix_pod,v))
trange = np.arange(np.shape(V)[1])
V[:,trange] = V[:,trange]/np.sqrt(w[:])
# Truncate phis
V = V[:,0:self.K] # Columns are modes
# Find POD coefficients
Ytilde = np.matmul(np.transpose(V),snapshot_matrix_pod)
Ytilde_test = np.matmul(np.transpose(V),snapshot_matrix_test)
return w, V, Ytilde, Ytilde_test
def svd_method(self,snapshot_matrix_pod):
"""
Read snapshot_matrix (field or nonlinear term) and compute the POD bases and coefficients
snapshot_matrix_pod - N x S - where N is DOF, S snapshots
V - truncated POD basis matrix - shape: NxK - K is truncation number
Ytilde - shape: KxS - POD basis coefficients
"""
phi, S, Vt = np.linalg.svd(snapshot_matrix_pod)
Ytilde = np.matmul(phi.T[:,truncation],snapshot_matrix)
Ytilde_test = np.matmul(phi.T[:,truncation],snapshot_matrix_test)
return S, phi.T[:,self.K], Ytilde, Ytilde_test
def generate_pod(self):
# Do the POD of the conserved variables
self.q1_w, self.q1_V, self.q1_Ytilde, self.q1_Ytilde_test = self.method_of_snapshots(self.q1_snapshot_matrix_pod,self.q1_snapshot_matrix_test)
self.q2_w, self.q2_V, self.q2_Ytilde, self.q2_Ytilde_test = self.method_of_snapshots(self.q2_snapshot_matrix_pod,self.q2_snapshot_matrix_test)
self.q3_w, self.q3_V, self.q3_Ytilde, self.q3_Ytilde_test = self.method_of_snapshots(self.q3_snapshot_matrix_pod,self.q3_snapshot_matrix_test)
# Print captured energy - using definition in https://arxiv.org/pdf/1308.3276.pdf
print('Capturing ',np.sum(self.q1_w[0:self.K])/np.sum(self.q1_w),'% variance in conserved variable 1')
print('Capturing ',np.sum(self.q2_w[0:self.K])/np.sum(self.q2_w),'% variance in conserved variable 2')
print('Capturing ',np.sum(self.q3_w[0:self.K])/np.sum(self.q3_w),'% variance in conserved variable 3')
np.save('PCA_Vectors_q1.npy',self.q1_V) # The POD bases
np.save('PCA_Vectors_q2.npy',self.q2_V)
np.save('PCA_Vectors_q3.npy',self.q3_V)
np.save('PCA_Coefficients_q1_train.npy',self.q1_Ytilde) # The true projection
np.save('PCA_Coefficients_q2_train.npy',self.q2_Ytilde)
np.save('PCA_Coefficients_q3_train.npy',self.q3_Ytilde)
np.save('PCA_Coefficients_q1_test.npy',self.q1_Ytilde_test) # The true projection
np.save('PCA_Coefficients_q2_test.npy',self.q2_Ytilde_test)
np.save('PCA_Coefficients_q3_test.npy',self.q3_Ytilde_test)
def load_pregenerated_pod(self):
self.q1_V = np.load('PCA_Vectors_q1.npy') # The POD bases
self.q2_V = np.load('PCA_Vectors_q2.npy')
self.q3_V = np.load('PCA_Vectors_q3.npy')
self.q1_Ytilde = np.load('PCA_Coefficients_q1_train.npy') # The true projection
self.q2_Ytilde = np.load('PCA_Coefficients_q2_train.npy')
self.q3_Ytilde = np.load('PCA_Coefficients_q3_train.npy')
self.q1_Ytilde_test = np.load('PCA_Coefficients_q1_test.npy') # The true projection
self.q2_Ytilde_test = np.load('PCA_Coefficients_q2_test.npy')
self.q3_Ytilde_test = np.load('PCA_Coefficients_q3_test.npy')
def plot_reconstruction_error(self):
fig,ax = plt.subplots(ncols=3)
ax[0].plot(self.q1_w[:]/np.sum(self.q1_w))
ax[1].plot(self.q2_w[:]/np.sum(self.q2_w))
ax[2].plot(self.q3_w[:]/np.sum(self.q3_w))
plt.show()
def solve(self):
from time import time
num_test = int(num_samples-num_train)
self.q1_snapshots = np.copy(self.q1_Ytilde_test)
self.q2_snapshots = np.copy(self.q2_Ytilde_test)
self.q3_snapshots = np.copy(self.q3_Ytilde_test)
start_time = time()
for test in range(num_test):
plot_iter = 0
save_iter = 0
iter_num = 0
# initalize solutions
self.q1 = np.copy(self.q1_Ytilde_test[:,num_steps_per_plot*test])
self.q2 = np.copy(self.q2_Ytilde_test[:,num_steps_per_plot*test])
self.q3 = np.copy(self.q3_Ytilde_test[:,num_steps_per_plot*test])
self.t = 0.0
save_iter = num_steps_per_plot*test+1
while self.t < ft:
print('Time is:',self.t)
self.t = self.t + dt
self.integrate_rk()
iter_num = iter_num + 1
if plot_iter == self.plot_interval:
# q1_full = np.matmul(self.q1_V,self.q1)
# q2_full = np.matmul(self.q2_V,self.q2)
# q3_full = np.matmul(self.q3_V,self.q3)
# flattened_data = np.concatenate((q1_full,q2_full,q3_full),axis=0)
# self.rom_pred_snapshots.append(flattened_data)
self.q1_snapshots[:,save_iter] = self.q1[:]
self.q2_snapshots[:,save_iter] = self.q2[:]
self.q3_snapshots[:,save_iter] = self.q3[:]
if plot_viz:
q1_full = np.reshape(q1_full,newshape=(Nx,Ny))
plot_fields_debug(self.X,self.Y,q1_full,'q1',save_iter)
plot_iter = 0
save_iter = save_iter + 1
plot_iter = plot_iter + 1
print('Average elapsed time GP:',(time()-start_time)/(num_test))
np.save('PCA_Coefficients_q1_pred.npy',self.q1_snapshots)
np.save('PCA_Coefficients_q2_pred.npy',self.q2_snapshots)
np.save('PCA_Coefficients_q3_pred.npy',self.q3_snapshots)
def integrate_rk(self):
# Equally spaced time integration in reduced space
q1temp = np.copy(self.q1)
q2temp = np.copy(self.q2)
q3temp = np.copy(self.q3)
r1_k1, r2_k1, r3_k1 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
q1temp[:] = self.q1[:] + dt*(r1_k1[:])
q2temp[:] = self.q2[:] + dt*(r2_k1[:])
q3temp[:] = self.q3[:] + dt*(r3_k1[:])
r1_k2, r2_k2, r3_k2 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
q1temp[:] = self.q1[:] + 0.125*dt*r1_k1[:] + 0.125*dt*r1_k2[:]
q2temp[:] = self.q2[:] + 0.125*dt*r2_k1[:] + 0.125*dt*r2_k2[:]
q3temp[:] = self.q3[:] + 0.125*dt*r3_k1[:] + 0.125*dt*r3_k2[:]
r1_k3, r2_k3, r3_k3 = self.right_hand_side(q1temp,q2temp,q3temp) # Note switch in sign
self.q1[:] = self.q1[:] + (1.0/6.0)*dt*r1_k1[:] + (1.0/6.0)*dt*r1_k2[:] + (2.0/3.0)*dt*r1_k3[:]
self.q2[:] = self.q2[:] + (1.0/6.0)*dt*r2_k1[:] + (1.0/6.0)*dt*r2_k2[:] + (2.0/3.0)*dt*r2_k3[:]
self.q3[:] = self.q3[:] + (1.0/6.0)*dt*r3_k1[:] + (1.0/6.0)*dt*r3_k2[:] + (2.0/3.0)*dt*r3_k3[:]
def right_hand_side(self,q1_red,q2_red,q3_red):
"""
Function calculates nonlinear term using state vector and DEIM
Need to embed conditional RHS calculation - WIP
"""
q1_full = np.matmul(self.q1_V,q1_red)
q1_full = np.reshape(q1_full,newshape=(Nx,Ny))
q2_full = np.matmul(self.q2_V,q2_red)
q2_full = np.reshape(q2_full,newshape=(Nx,Ny))
q3_full = np.matmul(self.q3_V,q3_red)
q3_full = np.reshape(q3_full,newshape=(Nx,Ny))
q1nl, q2nl, q3nl = self.nonlinear_term_full(q1_full,q2_full,q3_full)
q1nl_red = np.matmul(np.transpose(self.q1_V),q1nl.reshape(4096,))
q2nl_red = np.matmul(np.transpose(self.q2_V),q2nl.reshape(4096,))
q3nl_red = np.matmul(np.transpose(self.q3_V),q3nl.reshape(4096,))
return -q1nl_red, -q2nl_red, -q3nl_red
def nonlinear_term_full(self,q1,q2,q3):
f1x, f1y, f2x, f2y, f3x, f3y = flux_reconstruction(q1,q2,q3) # these are all i+1/2
# Periodicity
pad = 1
f1xtemp = periodic_bc(f1x,pad)
f1ytemp = periodic_bc(f1y,pad)
f2xtemp = periodic_bc(f2x,pad)
f2ytemp = periodic_bc(f2y,pad)
f3xtemp = periodic_bc(f3x,pad)
f3ytemp = periodic_bc(f3y,pad)
r1 = 1.0/dx*(f1xtemp[pad:Nx+pad,pad:Ny+pad]-f1xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f1ytemp[pad:Nx+pad,pad:Ny+pad]-f1ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r2 = 1.0/dx*(f2xtemp[pad:Nx+pad,pad:Ny+pad]-f2xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f2ytemp[pad:Nx+pad,pad:Ny+pad]-f2ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
r3 = 1.0/dx*(f3xtemp[pad:Nx+pad,pad:Ny+pad]-f3xtemp[pad-1:Nx+pad-1,pad:Ny+pad]) + 1.0/dy*(f3ytemp[pad:Nx+pad,pad:Ny+pad]-f3ytemp[pad:Nx+pad,pad-1:Ny+pad-1])
return r1, r2, r3
|
StarcoderdataPython
|
8089771
|
<reponame>fananchong/go-xclient<filename>pyclient/main.py
#! python3
import wx
import config
import user
import login_window
import log
if __name__ == "__main__":
args, cfg = config.load_config()
usr = user.User(args, cfg).init()
log.init(cfg["logfile"])
app = wx.App(False)
login_window.new(usr, args, cfg)
app.MainLoop()
|
StarcoderdataPython
|
5183265
|
import functools
import os
import sys
import threading
import pkg_resources
from .executor import Context, Tasks, TaskError
from .paths import in_dir, paths_for_shell
tasks = Tasks()
@tasks.register('dependencies', 'additional_assets', 'bundles', 'collect_static_files',
'take_screenshots', 'compile_messages', 'precompile_python_code', default=True)
def build(_: Context):
"""
Builds all necessary assets
"""
@tasks.register('build')
def start(context: Context, port=8000):
"""
Starts a development server
"""
# NB: if called in the same interpreter, cannot use auto-reloading else all tasks re-run
# context.management_command('runserver', addrport=f'0:{port}', use_reloader=False)
return context.shell(sys.executable, 'manage.py', 'runserver', f'0:{port}')
@tasks.register('build')
def serve(context: Context, port=8000, browsersync_port=3000, browsersync_ui_port=3030):
"""
Starts a development server with auto-building and live-reload
"""
try:
from watchdog.observers import Observer
except ImportError:
context.pip_command('install', 'watchdog>0.8,<0.9')
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class RebuildHandler(PatternMatchingEventHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = ['*.js', '*.scss', '*.html']
self._ignore_directories = True
self.builder = None
self.rebuild_javascript = threading.Event()
self.rebuild_stylesheets = threading.Event()
def on_any_event(self, event):
if self.builder:
self.builder.cancel()
extension = event.src_path.rsplit('.', 1)[-1].lower()
if extension == 'js':
self.rebuild_javascript.set()
elif extension == 'scss':
self.rebuild_stylesheets.set()
self.builder = threading.Timer(3, self.rebuild)
self.builder.start()
def rebuild(self):
if self.rebuild_javascript.is_set():
self.rebuild_javascript.clear()
context.debug('Triggering javascript build')
bundle_javascript(context)
if self.rebuild_stylesheets.is_set():
self.rebuild_stylesheets.clear()
context.debug('Triggering stylesheet build')
bundle_stylesheets(context)
context.debug('Reloading browsers')
context.node_tool('browser-sync', 'reload', f'--port={browsersync_port}')
context.info('Watching sources')
observer = Observer()
paths = [
context.app.common_asset_source_path,
context.app.asset_source_path,
context.app.common_templates_path,
context.app.templates_path,
]
handler = RebuildHandler()
for path in paths:
observer.schedule(handler, path, recursive=True)
observer.setDaemon(True)
observer.start()
context.info('Starting browser sync')
browsersync_args = ['start', '--host=localhost', '--no-open',
'--logLevel', {0: 'silent', 1: 'info', 2: 'debug'}[context.verbosity],
f'--port={browsersync_port}', f'--proxy=localhost:{port}',
f'--ui-port={browsersync_ui_port}']
browsersync = functools.partial(context.node_tool, 'browser-sync', *browsersync_args)
threading.Thread(target=browsersync, daemon=True).start()
context.info('Starting web server')
return start(context, port=port)
@tasks.register('build', 'lint')
def test(context: Context, test_labels=None, functional_tests=False, accessibility_tests=False, webdriver=None):
"""
Tests the app
"""
if accessibility_tests:
functional_tests = True
os.environ['RUN_ACCESSIBILITY_TESTS'] = '1'
if functional_tests:
os.environ['RUN_FUNCTIONAL_TESTS'] = '1'
if webdriver:
os.environ['WEBDRIVER'] = webdriver
test_labels = (test_labels or '').split()
return context.management_command('test', *test_labels, interactive=False)
@tasks.register(hidden=True)
def create_build_paths(context: Context):
"""
Creates directories needed for build outputs
"""
paths = [
context.app.asset_build_path,
context.app.scss_build_path,
context.app.screenshots_build_path,
context.app.collected_assets_path,
]
for path in filter(None, paths):
os.makedirs(path, exist_ok=True)
@tasks.register(hidden=True)
def python_dependencies(context: Context, common_path=None):
"""
Updates python dependencies
"""
context.pip_command('install', '-r', context.requirements_file)
if common_path:
context.pip_command('uninstall', '--yes', 'money-to-prisoners-common')
context.pip_command('install', '--force-reinstall', '-e', common_path)
context.shell('rm', '-rf', 'webpack.config.js') # because it refers to path to common
@tasks.register(hidden=True)
def package_json(context: Context):
"""
Generates a package.json file
"""
context.write_template('package.json')
@tasks.register('package_json', hidden=True)
def node_dependencies(context: Context):
"""
Updates node.js dependencies
"""
args = ['--loglevel', {0: 'silent', 1: 'warn', 2: 'info'}[context.verbosity]]
if not context.use_colour:
args.append('--color false')
args.append('install')
return context.shell('npm', *args)
@tasks.register('python_dependencies', 'node_dependencies', hidden=True)
def dependencies(_: Context):
"""
Updates all dependencies
"""
@tasks.register(hidden=True)
def docker_compose_config(context: Context, port=8000):
"""
Generates a docker-compose.yml file
"""
context.write_template('docker-compose.yml', context={
'port': port,
})
@tasks.register('docker_compose_config', hidden=True)
def local_docker(context: Context):
"""
Runs the app in a docker container; for local development only!
Once performed, `docker-compose up` can be used directly
"""
args = ()
if context.verbosity > 1:
args += ('--verbose',)
args += ('up', '--build', '--remove-orphans')
if not context.use_colour:
args += ('--no-color',)
context.shell('docker-compose', *args)
@tasks.register(hidden=True)
def webpack_config(context: Context):
"""
Generates a webpack.config.js file
"""
context.write_template('webpack.config.js')
@tasks.register('create_build_paths', 'node_dependencies', 'webpack_config', hidden=True)
def bundle_javascript(context: Context, production_bundle=False):
"""
Compiles javascript
"""
args = ['--bail']
if not context.use_colour:
args.append('--no-color')
if production_bundle:
args.append('--mode=production')
return context.node_tool('webpack', *args)
@tasks.register('create_build_paths', 'node_dependencies', hidden=True)
def bundle_stylesheets(context: Context, production_bundle=False):
"""
Compiles stylesheets
"""
def make_output_file(css_path):
css_name = os.path.basename(css_path)
base_name = os.path.splitext(css_name)[0]
return os.path.join(context.app.scss_build_path, f'{base_name}.css')
style = 'compressed' if production_bundle else 'nested'
args = [
'pysassc', # pysassc entrypoint always removes the first item
f'--output-style={style}',
]
for path in context.app.scss_include_paths:
args.append(f'--include-path={path}')
return_code = 0
pysassc = pkg_resources.load_entry_point('libsass', 'console_scripts', 'pysassc')
for source_file in context.app.scss_source_file_set.paths_for_shell(separator=None):
context.info(f'Building {source_file}')
pysassc_args = [*args + [source_file, make_output_file(source_file)]]
return_code = pysassc(pysassc_args) or return_code
return return_code
@tasks.register('bundle_javascript', 'bundle_stylesheets', hidden=True)
def bundles(_: Context):
"""
Compiles assets
"""
@tasks.register(hidden=True)
def lint_config(context: Context):
"""
Generates javasript and stylesheet linting configuration files
"""
context.write_template('eslintrc.json', path='.eslintrc.json')
context.write_template('sass-lint.yml', path='.sass-lint.yml')
@tasks.register('node_dependencies', 'lint_config', hidden=True)
def lint_javascript(context: Context):
"""
Tests javascript for code and style errors
"""
args = ['--format', 'stylish']
if context.verbosity == 0:
args.append('--quiet')
if not context.use_colour:
args.append('--no-color')
args.append(context.app.javascript_source_path)
return context.node_tool('eslint', *args)
@tasks.register('node_dependencies', 'lint_config', hidden=True)
def lint_stylesheets(context: Context):
"""
Tests stylesheets for code and style errors
"""
args = ['--format', 'stylish', '--syntax', 'scss']
if context.verbosity > 1:
args.append('--verbose')
args.append(os.path.join(context.app.scss_source_path, '**', '*.scss'))
return context.node_tool('sass-lint', *args)
@tasks.register('lint_javascript', 'lint_stylesheets', hidden=True)
def lint(_: Context):
"""
Tests javascript and stylesheets for code and style errors
"""
@tasks.register('create_build_paths', hidden=True)
def additional_assets(context: Context):
"""
Collects assets from GOV.UK frontend toolkit
"""
rsync_flags = '-avz' if context.verbosity == 2 else '-az'
for path in context.app.additional_asset_paths:
context.shell(f'rsync {rsync_flags} {path}/ {context.app.asset_build_path}/')
@tasks.register('create_build_paths', hidden=True)
def take_screenshots(context: Context):
"""
Takes screenshots if special test cases are defined
"""
context.management_command('takescreenshots', interactive=False)
collect_static_files(context)
@tasks.register('create_build_paths', hidden=True)
def collect_static_files(context: Context):
"""
Collects assets for serving from single root
"""
context.management_command('collectstatic', interactive=False)
@tasks.register(hidden=True)
def precompile_python_code(context: Context):
"""
Pre-compiles python modules
"""
from compileall import compile_dir
kwargs = {}
if context.verbosity < 2:
kwargs['quiet'] = True
compile_dir(context.app.django_app_name, **kwargs)
@tasks.register('python_dependencies')
def make_messages(context: Context, javascript=False, fuzzy=False):
"""
Collects text into translation source files
"""
kwargs = {
'all': True,
'keep_pot': True,
'no_wrap': True,
}
if fuzzy:
kwargs['allow_fuzzy'] = True
if javascript:
kwargs.update(domain='djangojs', ignore_patterns=['app.js'])
with in_dir(context.app.django_app_name):
return context.management_command('makemessages', **kwargs)
@tasks.register('python_dependencies', hidden=True)
def compile_messages(context: Context):
"""
Compiles translation messages
"""
with in_dir(context.app.django_app_name):
return context.management_command('compilemessages')
@tasks.register('python_dependencies')
def translations(context: Context, pull=False, push=False):
"""
Synchronises translations with transifex.com
"""
if not (pull or push):
raise TaskError('Specify whether to push or pull translations')
if pull:
context.shell('tx', 'pull')
make_messages(context, javascript=False)
make_messages(context, javascript=True)
if push:
context.shell('tx', 'push', '--source', '--no-interactive')
@tasks.register()
def clean(context: Context, delete_dependencies: bool = False):
"""
Deletes build outputs
"""
paths = [
context.app.asset_build_path, context.app.collected_assets_path,
'docker-compose.yml', 'package.json', 'package-lock.json', 'webpack.config.js',
]
context.shell(f'rm -rf {paths_for_shell(paths)}')
context.shell(f'find {context.app.django_app_name} -name "*.pyc" -or -name __pycache__ -delete')
if delete_dependencies:
context.info(f'Cleaning app {context.app.name} dependencies')
paths = ['node_modules', 'venv']
context.shell(f'rm -rf {paths_for_shell(paths)}')
|
StarcoderdataPython
|
3550696
|
<reponame>octoenergy/oliver-twist
from dataclasses import dataclass
from enum import Enum
from json import JSONEncoder
from typing import List
class MyEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
@dataclass
class ReportStatus(str, Enum):
PASSED = "passed"
SKIPPED = "skipped"
ERRORED = "errored"
WARNED = "warned"
@dataclass
class ReportRule:
def __init__(
self,
id: str,
name: str,
status: ReportStatus,
):
self.id = id
self.name = name
self.status = status
@dataclass
class ReportMetrics:
def __init__(
self,
name: str,
score: int,
) -> None:
self.name = name
self.score = score
self.pretty_name = name.replace("_", " ").capitalize()
@dataclass
class ReportSummary:
def __init__(
self,
passed: int,
skipped: int,
errored: int,
warned: int,
) -> None:
self.passed = passed
self.skipped = skipped
self.errored = errored
self.warned = warned
@dataclass
class ReportModel:
def __init__(
self,
model_key: str,
file_path: str,
model_name: str,
metrics: List[ReportMetrics],
rules: List[ReportRule],
summary: ReportSummary,
) -> None:
self.model_key = model_key
self.file_path = file_path
self.model_name = model_name
self.metrics = metrics
self.rules = rules
self.summary = summary
@dataclass
class Report:
def __init__(self, summary: ReportSummary, models: List[ReportModel]) -> None:
self.summary = summary
self.models = models
|
StarcoderdataPython
|
1759103
|
<filename>AI/csv_data.py
# This script provides a way for the different models to access the training and testing data
# Created by: <NAME>(KCL)
import pandas as pd
import torch
import math
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import MinMaxScaler
class Data():
def __init__(self):
self.data = pd.read_csv("csv/lineMergeDataWithHeading.csv")
self.data.drop(['recommendation', 'recommendedAcceleration'],axis=1,inplace=True)
self.data = self.data[::-1]
self.data.heading = (self.data.heading + 180) % 360
self.train_data, self.test_data = train_test_split(self.data, test_size=0.2, shuffle=False)
self.scaler = MinMaxScaler(feature_range =(-1, 1))
def get_data(self):
return self.data
def get_RFC_dataset(self):
return pd.read_csv("csv/lineMergeDataWithHeading.csv")
def get_training_data_tensor(self):
featuresTrain = torch.zeros(round(self.train_data.shape[0]/70),70,20)
batch = torch.zeros(70,20)
counter = 0
index = 0
for idx in range(self.train_data.shape[0]):
if idx % 70 != 0 or idx == 0:
batch[index] = torch.Tensor(self.train_data.values[idx])
index = index + 1
else:
try:
featuresTrain[counter] = batch
counter = counter + 1
index = 0
batch = torch.zeros(70,20)
except:
pass
return featuresTrain
def get_testing_data_tensor(self):
featuresTest = torch.zeros(round(self.test_data.shape[0]/70),70,20)
batch = torch.zeros(70,20)
counter = 0
index = 0
for idx in range(self.test_data.shape[0]):
if idx % 70 != 0 or idx == 0:
batch[index] = torch.Tensor(self.test_data.values[idx])
index = index + 1
else:
try:
featuresTest[counter] = batch
counter = counter + 1
index = 0
batch = torch.zeros(70,20)
except:
pass
return featuresTest
def get_training_lstm_data(self):
self.train_data.drop(['recommendation', 'recommendedAcceleration','widthMerging','lenghtMerging','spacingMerging','lengthPreceding','widthPreceding','widthFollowing','spacingFollowing'], axis=1, inplace=True)
# scaled = self.scaler.fit_transform(self.train_data)
scaled = self.train_data
featuresTrain = torch.zeros(math.ceil(scaled.shape[0]/2),1,13)
targetsTrain = torch.zeros(math.ceil(scaled.shape[0]/2),1,13)
f_counter = 0
t_counter = 0
for idx in range(scaled.shape[0]):
if idx % 2 != 0:
featuresTrain[f_counter] = torch.Tensor(scaled.values[idx])
f_counter = f_counter + 1
else:
targetsTrain[t_counter] = torch.Tensor(scaled.values[idx])
t_counter = t_counter + 1
return featuresTrain,targetsTrain
def get_testing_lstm_data(self):
self.test_data.drop(['recommendation', 'recommendedAcceleration','widthMerging','lenghtMerging','spacingMerging','lengthPreceding','widthPreceding','widthFollowing','spacingFollowing'], axis=1, inplace=True)
featuresTest = torch.zeros(math.ceil(self.train_data.shape[0]/2),1,13)
targetsTest = torch.zeros(math.ceil(self.train_data.shape[0]/2),1,13)
f_counter = 0
t_counter = 0
for idx in range(self.train_data.shape[0]):
if idx % 2 != 0:
featuresTest[f_counter] = torch.Tensor(self.train_data.values[idx])
f_counter = f_counter + 1
else:
targetsTest[t_counter] = torch.Tensor(self.train_data.values[idx])
t_counter = t_counter + 1
return featuresTest,targetsTest
|
StarcoderdataPython
|
4933445
|
<reponame>hknerdgn/theanets<gh_stars>1-10
'''This package groups together a bunch of theano code for neural nets.'''
from .dataset import Dataset
from .main import Experiment
from .feedforward import Network, Autoencoder, Regressor, Classifier
from . import flags
from . import layers
from . import recurrent
from . import maskedrecurrent
from . import trainer
|
StarcoderdataPython
|
9717723
|
from decimal import Decimal
import requests
from cryptoportfolio.interfaces.base import Address
class F2PoolWallet(Address):
decimal_places = 18
symbol = None
f2pool_currecnices_mapping = {
'bitcoin': "BTC",
'litecoin': "LTC",
'etc': "ETC",
'eth': "ETH",
'zec': "ZEC",
'sc': "SC",
'monero': "XMR",
'dash': "DASH",
}
def __init__(self, currency, user, **kwargs):
assert currency in self.f2pool_currecnices_mapping.keys()
self.symbol = self.f2pool_currecnices_mapping[currency]
self.currency = currency
self.user = user
super(F2PoolWallet, self).__init__(**kwargs)
def _get_addr_coins_and_tokens_balance(self):
result = requests.get("http://api.f2pool.com/%s/%s" % (self.currency, self.user)).json()
return [
(self.symbol, Decimal(result['balance']))
]
|
StarcoderdataPython
|
8103819
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Power by <NAME> 2020-10-24 13:53:35
import os
import cv2
import torch
import numpy as np
from pathlib import Path
from utils import batch_PSNR, batch_SSIM
from skimage import img_as_float32, img_as_ubyte
from networks.derain_net import DerainNet
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(1)
# Build the network
print('Loading from {:s}'.format(str(Path('./model_states/derainer_rho05.pt'))))
model = DerainNet(n_features=32, n_resblocks=8).cuda()
state_dict = torch.load(str(Path('./model_states/derainer_rho05.pt')))
model.load_state_dict(state_dict)
model.eval()
# load data
base_data_path = Path('./testsets/synthetic_NTURain')
rain_types = sorted([x.stem.split('_')[0] for x in base_data_path.glob('*_Rain')])
truncate = 24
psnr_all_y = []
ssim_all_y = []
for current_type in rain_types:
rain_dir = base_data_path / (current_type + '_Rain')
im_rain_path_list = sorted([x for x in rain_dir.glob('*.jpg')])
for ii, im_rain_path in enumerate(im_rain_path_list):
im_gt_path = base_data_path / (current_type+'_GT') / im_rain_path.name
im_rain = img_as_float32(cv2.imread(str(im_rain_path), flags=cv2.IMREAD_COLOR)[:, :, ::-1])
im_gt = img_as_float32(cv2.imread(str(im_gt_path), flags=cv2.IMREAD_COLOR)[:, :, ::-1])
if ii == 0:
rain_data = torch.from_numpy(im_rain.transpose([2,0,1])).unsqueeze(0).unsqueeze(2) # 1 x c x 1 x h x w
gt_data = torch.from_numpy(im_gt.transpose([2,0,1])[np.newaxis,]) # 1 x c x h x w
else:
temp = torch.from_numpy(im_rain.transpose([2,0,1])).unsqueeze(0).unsqueeze(2) # 1 x c x 1 x h x w
rain_data = torch.cat((rain_data, temp), dim=2) # 1 x c x n x h x w
temp = torch.from_numpy(im_gt.transpose([2,0,1])[np.newaxis, ])
gt_data = torch.cat((gt_data, temp), dim=0) # n x c x h x w
num_frame = rain_data.shape[2]
inds_start = list(range(0, num_frame, truncate))
inds_end = list(range(truncate, num_frame, truncate)) + [num_frame,]
assert len(inds_start) == len(inds_end)
inds_ext_start = [0,] + [x-2 for x in inds_start[1:]]
inds_ext_end = [x+2 for x in inds_end[:-1]] + [num_frame,]
derain_data = torch.zeros_like(rain_data)
with torch.set_grad_enabled(False):
for ii in range(len(inds_start)):
start_ext, end_ext, start, end = [x[ii] for x in [inds_ext_start, inds_ext_end, inds_start, inds_end]]
inputs = rain_data[:, :, start_ext:end_ext, :, :].cuda()
out_temp = model(inputs)
if ii == 0:
derain_data[0, :, start:end, ] = out_temp[:, :, :-2,].cpu().clamp_(0.0, 1.0)
elif (ii+1) == len(inds_start):
derain_data[0, :, start:end, ] = out_temp[:, :, 2:,].cpu().clamp_(0.0, 1.0)
else:
derain_data[0, :, start:end, ] = out_temp[:, :, 2:-2,].cpu().clamp_(0.0, 1.0)
derain_data = derain_data[:, :, 2:-2,].squeeze(0).permute([1, 0, 2, 3])
gt_data = gt_data[2:-2,]
psnrm_y = batch_PSNR(derain_data, gt_data, ycbcr=True)
psnr_all_y.append(psnrm_y)
ssimm_y = batch_SSIM(derain_data, gt_data, ycbcr=True)
ssim_all_y.append(ssimm_y)
print('Type:{:s}, PSNR:{:5.2f}, SSIM:{:6.4f}'.format(current_type, psnrm_y, ssimm_y))
mean_psnr_y = sum(psnr_all_y) / len(rain_types)
mean_ssim_y = sum(ssim_all_y) / len(rain_types)
print('MPSNR:{:5.2f}, MSSIM:{:6.4f}'.format(mean_psnr_y, mean_ssim_y))
|
StarcoderdataPython
|
5156612
|
import FWCore.ParameterSet.Config as cms
from RecoVertex.Configuration.RecoVertex_cff import unsortedOfflinePrimaryVertices, trackWithVertexRefSelector, trackRefsForJets, sortedPrimaryVertices, offlinePrimaryVertices, offlinePrimaryVerticesWithBS,vertexrecoTask
from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA2D_vectParameters
unsortedOfflinePrimaryVertices4D = unsortedOfflinePrimaryVertices.clone(
TkClusParameters = DA2D_vectParameters,
TrackTimesLabel = cms.InputTag("trackTimeValueMapProducer","generalTracksConfigurableFlatResolutionModel"),
TrackTimeResosLabel = cms.InputTag("trackTimeValueMapProducer","generalTracksConfigurableFlatResolutionModelResolution"),
)
trackWithVertexRefSelectorBeforeSorting4D = trackWithVertexRefSelector.clone(
vertexTag = "unsortedOfflinePrimaryVertices4D",
ptMax = 9e99,
ptErrorCut = 9e99
)
trackRefsForJetsBeforeSorting4D = trackRefsForJets.clone(
src = "trackWithVertexRefSelectorBeforeSorting4D"
)
offlinePrimaryVertices4D = sortedPrimaryVertices.clone(
vertices = "unsortedOfflinePrimaryVertices4D",
particles = "trackRefsForJetsBeforeSorting4D",
trackTimeTag = "trackTimeValueMapProducer:generalTracksConfigurableFlatResolutionModel",
trackTimeResoTag = "trackTimeValueMapProducer:generalTracksConfigurableFlatResolutionModelResolution",
assignment = dict(useTiming = True)
)
offlinePrimaryVertices4DWithBS = offlinePrimaryVertices4D.clone(
vertices = "unsortedOfflinePrimaryVertices4D:WithBS"
)
unsortedOfflinePrimaryVertices4DnoPID = unsortedOfflinePrimaryVertices4D.clone(
TrackTimesLabel = "trackExtenderWithMTD:generalTrackt0",
TrackTimeResosLabel = "trackExtenderWithMTD:generalTracksigmat0"
)
trackWithVertexRefSelectorBeforeSorting4DnoPID = trackWithVertexRefSelector.clone(
vertexTag = "unsortedOfflinePrimaryVertices4DnoPID",
ptMax = 9e99,
ptErrorCut = 9e99
)
trackRefsForJetsBeforeSorting4DnoPID = trackRefsForJets.clone(
src = "trackWithVertexRefSelectorBeforeSorting4DnoPID"
)
offlinePrimaryVertices4DnoPID = offlinePrimaryVertices4D.clone(
vertices = "unsortedOfflinePrimaryVertices4DnoPID",
particles = "trackRefsForJetsBeforeSorting4DnoPID",
trackTimeTag = "trackExtenderWithMTD:generalTrackt0",
trackTimeResoTag = "trackExtenderWithMTD:generalTracksigmat0"
)
offlinePrimaryVertices4DnoPIDWithBS=offlinePrimaryVertices4DnoPID.clone(
vertices = "unsortedOfflinePrimaryVertices4DnoPID:WithBS"
)
unsortedOfflinePrimaryVertices4DwithPID = unsortedOfflinePrimaryVertices4D.clone(
TrackTimesLabel = "tofPID4DnoPID:t0safe",
TrackTimeResosLabel = "tofPID4DnoPID:sigmat0safe"
)
trackWithVertexRefSelectorBeforeSorting4DwithPID = trackWithVertexRefSelector.clone(
vertexTag = "unsortedOfflinePrimaryVertices4DwithPID",
ptMax = 9e99,
ptErrorCut = 9e99
)
trackRefsForJetsBeforeSorting4DwithPID = trackRefsForJets.clone(
src = "trackWithVertexRefSelectorBeforeSorting4DwithPID"
)
offlinePrimaryVertices4DwithPID=offlinePrimaryVertices4D.clone(
vertices = "unsortedOfflinePrimaryVertices4DwithPID",
particles = "trackRefsForJetsBeforeSorting4DwithPID",
trackTimeTag = "tofPID4DnoPID:t0safe",
trackTimeResoTag = "tofPID4DnoPID:sigmat0safe"
)
offlinePrimaryVertices4DwithPIDWithBS = offlinePrimaryVertices4DwithPID.clone(
vertices = "unsortedOfflinePrimaryVertices4DwithPID:WithBS"
)
from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import tpClusterProducer
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import quickTrackAssociatorByHits
from SimTracker.TrackAssociation.trackTimeValueMapProducer_cfi import trackTimeValueMapProducer
from RecoMTD.TimingIDTools.tofPIDProducer_cfi import tofPIDProducer
tofPID4DnoPID=tofPIDProducer.clone(vtxsSrc='unsortedOfflinePrimaryVertices4DnoPID')
tofPID=tofPIDProducer.clone()
from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer
phase2_timing_layer.toModify(tofPID, vtxsSrc='unsortedOfflinePrimaryVertices4D')
|
StarcoderdataPython
|
4815950
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://github.com/jvanvugt/pytorch-unet
import json
import warnings
warnings.filterwarnings("ignore")
import argparse
import traceback
from collections import OrderedDict
import torch
from torch import nn
from torch.utils.data import DataLoader
from albumentations import *
from albumentations.pytorch import ToTensor
from unet import UNet
import cv2
import numpy as np
import glob
import random, sys
import os
import umap
from QA_utils import get_torch_device
# +
class Dataset(object):
def __init__(self, fnames,patch_size,transform=None):
print('Initializing dataset:')
self.fnames=fnames
self.patch_size=patch_size
self.transform=transform
def __getitem__(self, index):
fname=self.fnames[index]
image = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
patch = image
if self.transform is not None:
patch = self.transform(image=image)['image']
#have fixed using set seed of random package
return patch, fname
def __len__(self):
return len(self.fnames)
try:
print("USER: Starting to embed patches")
parser = argparse.ArgumentParser(description='make embedding using umap')
parser.add_argument('project_name', type=str)
parser.add_argument('-p', '--patchsize', help="patchsize, default 256", default=256, type=int)
parser.add_argument('-b', '--batchsize', help="", default=32, type=int)
parser.add_argument('-m', '--numimgs', help="Number of images to in embedding, default is-1, implying all", default=-1, type=int)
parser.add_argument('-i', '--gpuid', help="GPU ID, set to -2 to use CPU", default=0, type=int)
parser.add_argument('-o', '--outdir', help="", default="./", type=str)
args = parser.parse_args()
print(f"args: {args}")
project_name = args.project_name
patch_size = args.patchsize
batch_size = args.batchsize
num_imgs = args.numimgs
modelid = args.outdir.split("/")[4]
model_name = f"{args.outdir}/best_model.pth"
if(not os.path.exists(model_name)):
print(f"Can't find model {model_name}, exiting")
sys.exit()
# -
# get the device to run deep learning
print('Getting device:', flush=True)
device = get_torch_device(args.gpuid)
print('Loading checkpoint:', flush=True)
checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage) #load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666
print('Creating model:', flush=True)
model = UNet(n_classes=checkpoint["n_classes"], in_channels=checkpoint["in_channels"],
padding=checkpoint["padding"], depth=checkpoint["depth"], wf=checkpoint["wf"],
up_mode=checkpoint["up_mode"], batch_norm=checkpoint["batch_norm"]).to(device)
model.load_state_dict(checkpoint["model_dict"])
print(f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")
# -
# +
rois=glob.glob(f"./projects/{project_name}/roi/*.png")
patches=glob.glob(f"./projects/{project_name}/patches/*.png")
#if we don't want entire list, subset it randomly. note. we always take all rois
if (num_imgs !=-1):
maximgs= min(num_imgs,len(patches))
patches=random.sample(patches,maximgs)
patches=sorted(patches,key=os.path.getctime) # needed for colors in the front end to appear consistent
# +
img_transform = Compose([
RandomCrop(height=patch_size, width=patch_size, always_apply=True),
PadIfNeeded(min_height=patch_size,min_width=patch_size),
ToTensor()
])
data_train=Dataset(patches+rois, patch_size=patch_size,transform=img_transform) #img_transform)
data_train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=True)
# -
model.up_path=nn.ModuleList()
model.last=nn.Sequential()
# +
all_imgs=[]
#all_preds=[]
all_preds_full=[]
all_fnames=[]
niter=len(data_train_loader)
for ii, (X, fname) in enumerate(data_train_loader):
print(f"PROGRESS: {ii+1}/{niter} | | Feature Gen")
print('Sending data to device:')
X = X.to(device)
prediction = model(X)
pred=prediction.detach().cpu().numpy()
all_preds_full.append(pred)
# pred=pred.reshape(X.shape[0],-1)
# all_preds.append(pred)
all_fnames.extend(fname)
#all_preds=np.vstack(all_preds)
all_preds_full=np.vstack(all_preds_full)
# -
all_preds_full=all_preds_full.reshape(all_preds_full.shape[0],all_preds_full.shape[1],-1)
# +
features_hists=[]
for i in range(all_preds_full.shape[1]):
print(f"PROGRESS: {i+1}/{all_preds_full.shape[1]} | | Histogram Gen")
print(f'Processing histogram {i}:', flush=True)
filt=all_preds_full[:,i,:]
mean=filt.mean()
std=filt.std()
hists=np.apply_along_axis(lambda a: np.histogram(a, bins=10, range=(mean-std,mean+std))[0], 1, filt)
features_hists.append(hists)
features_hists=np.hstack(features_hists)
# -
print('Fitting umap to histogram:', flush=True)
embedding = umap.UMAP(n_neighbors=100,min_dist=0.0).fit_transform(features_hists)
# +
all_fnames_base=[]
for fname in all_fnames:
fname=os.path.basename(fname).replace("_roi.png",".png")
fname= fname[0:fname[0:fname.rfind("_",)].rfind("_")]
all_fnames_base.append(fname)
id_s = {c: i for i, c in enumerate(OrderedDict.fromkeys(all_fnames_base))}
li = [id_s[c] for c in all_fnames_base]
# -
print('Saving to embedding.csv:')
f = open(f"{args.outdir}/embedding.csv", "w")
f.write("#filename,group,x,y\n")
for fname,group,emb in zip(all_fnames,li,embedding):
f.write(f"{fname},{group},{emb[0]},{emb[1]}\n")
f.close()
print("USER: Done embedding patches", flush=True)
print(f"RETVAL: {json.dumps({'project_name': project_name,'modelid':modelid})}", flush=True)
except:
track = traceback.format_exc()
track = track.replace("\n","\t")
print(f"ERROR: {track}", flush=True)
sys.exit(1)
|
StarcoderdataPython
|
11221614
|
# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
low, high = 0, len(nums)-1
first = self.findFirstIndex(nums, low, high, target)
second = self.findLastIndex(nums, low, high, target)
return [first, second]
def findFirstIndex(self, nums, low, high, target):
res = -1
while low <= high:
mid = (low+high)//2
if nums[mid] == target:
res = mid
high = mid-1
elif nums[mid] > target:
high = mid-1
else:
low = mid + 1
return res
def findLastIndex(self, nums, low, high, target):
res = -1
while low <= high:
mid = (low+high)//2
if nums[mid] == target:
res = mid
low = mid+1
elif nums[mid] > target:
high = mid-1
else:
low = mid + 1
return res
|
StarcoderdataPython
|
1844567
|
<reponame>zj-zhang/inDelphi-model
"""
Run each component of inDephi to understand its inputs/outputs
"""
# zzjfrank, 2020-10-11
from inDelphi import init_model, predict
# the example sequence in inDelphi webserver
left = 'GCAGTCAGTGCAGTAGAGGATGTGTCGCTCTCCCGTACGGCGTGAAAATGACTAGCAAAG'
right = 'TTGGGGCCTTTTTGGAAGACCTAGAGCCTTAGGCCACGGTACACAATGGTGTCCTGCATA'
seq = left + right
cutsite = len(left)
pred_df, stats = predict(seq, cutsite)
|
StarcoderdataPython
|
11379481
|
<gh_stars>0
"""
Write a Python program to read first n lines of a file.
"""
from itertools import islice
def read_line_numbers(file_name, line_no):
with open(file_name) as file:
for line in islice(file, line_no):
print(line)
read_line_numbers("main.txt", 2)
|
StarcoderdataPython
|
20473
|
import tensorflow as tf
import numpy as np
def euclidean_dist(x, y):
return np.linalg.norm(x - y)
def limit_gpu():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
tf.config.set_logical_device_configuration(
gpus[0],
[tf.config.LogicalDeviceConfiguration(memory_limit=4000)])
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
|
StarcoderdataPython
|
1866443
|
<filename>ledger/basket/admin.py<gh_stars>10-100
from oscar.apps.basket.admin import * # noqa
|
StarcoderdataPython
|
5117787
|
"""Test script to send control messages to a MQTT topic."""
import datetime
import json
import logging
import logging.config
import os
import time
from picamera_mqtt.mqtt_clients import AsyncioClient, message_string_encoding
from picamera_mqtt.protocol import (
connect_topic, control_topic, deployment_topic, imaging_topic, params_topic
)
from picamera_mqtt.util import files
# Set up logging
logger = logging.getLogger(__name__)
payload_log_max_len = 400
# Configure messaging
topics = {
control_topic: {
'qos': 2,
'local_namespace': True,
'subscribe': False,
'log': False
},
imaging_topic: {
'qos': 2,
'local_namespace': True,
'subscribe': True,
'log': False
},
params_topic: {
'qos': 2,
'local_namespace': True,
'subscribe': True,
'log': True
},
deployment_topic: {
'qos': 2,
'local_namespace': True,
'subscribe': True,
'log': True
},
connect_topic: {
'qos': 2,
'local_namespace': False,
'subscribe': True,
'log': True
}
}
class Host(AsyncioClient):
"""Sends imaging control messages to broker and saves received images."""
def __init__(self, *args, capture_dir='', camera_params={}, **kwargs):
super().__init__(*args, **kwargs)
self.camera_params = camera_params
self.image_ids = {target_name: 1 for target_name in self.target_names}
self.capture_dir = capture_dir
def add_topic_handlers(self):
"""Add any topic handler message callbacks as needed."""
for topic_path in self.get_topic_paths(params_topic):
self.client.message_callback_add(topic_path, self.on_params_topic)
for topic_path in self.get_topic_paths(imaging_topic):
self.client.message_callback_add(topic_path, self.on_imaging_topic)
def on_params_topic(self, client, userdata, msg):
payload = msg.payload.decode(message_string_encoding)
target_name = msg.topic.split('/')[0]
logger.info(
'Received camera params response from target {}: {}'
.format(target_name, payload)
)
def on_imaging_topic(self, client, userdata, msg):
payload = msg.payload.decode(message_string_encoding)
receive_time = time.time()
receive_datetime = str(datetime.datetime.now())
try:
capture = json.loads(payload)
except json.JSONDecodeError:
payload_truncated = (
payload[:payload_log_max_len]
+ (payload[payload_log_max_len:] and '...')
)
logger.error('Malformed image: {}'.format(payload_truncated))
return
capture['metadata']['receive_time'] = {
'time': receive_time,
'datetime': receive_datetime
}
self.save_captured_image(capture)
capture.pop('image', None)
self.save_captured_metadata(capture)
capture['camera_params'] = '...'
logger.debug('Received image on topic {}: {}'.format(
msg.topic, json.dumps(capture)
))
def build_capture_filename(self, capture):
return '{} {} {}'.format(
capture['metadata']['client_name'],
capture['metadata']['image_id'],
capture['metadata']['capture_time']['datetime']
)
def save_captured_image(self, capture):
files.ensure_path(self.capture_dir)
capture_filename = self.build_capture_filename(capture)
image_filename = '{}.{}'.format(capture_filename, capture['format'])
image_base64 = capture['image']
image_path = os.path.join(self.capture_dir, image_filename)
files.b64_string_bytes_save(image_base64, image_path)
logger.info('Saved image to: {}'.format(image_path))
def save_captured_metadata(self, capture):
files.ensure_path(self.capture_dir)
capture_filename = self.build_capture_filename(capture)
image_filename = '{}.{}'.format(capture_filename, capture['format'])
capture['image'] = image_filename
metadata_path = os.path.join(
self.capture_dir, '{}.json'.format(capture_filename)
)
files.json_dump(capture, metadata_path)
logger.info('Saved metadata to: {}'.format(metadata_path))
def request_image(
self, target_name, format='jpeg',
capture_format_params={'quality': 100},
transport_format_params={'quality': 80},
extra_metadata={}
):
if target_name not in self.target_names:
logger.error(
'Unknown camera client target: {}'.format(target_name)
)
return
acquisition_obj = {
'action': 'acquire_image',
'format': format,
'capture_format_params': capture_format_params,
'transport_format_params': transport_format_params,
'metadata': {
'client_name': target_name,
'image_id': self.image_ids[target_name],
'command_time': {
'time': time.time(),
'datetime': str(datetime.datetime.now())
},
}
}
for (key, value) in extra_metadata.items():
acquisition_obj['metadata'][key] = value
acquisition_message = json.dumps(acquisition_obj)
self.image_ids[target_name] += 1
logger.info(
'Sending acquisition message to topic {}: {}'.format(
self.get_topic_paths(
control_topic, local_namespace=target_name
)[0], acquisition_message
)
)
return self.publish_message(
control_topic, acquisition_message, local_namespace=target_name
)
def set_params(self, target_name, **params):
update_obj = {'action': 'set_params'}
for (key, value) in params.items():
if value is not None:
update_obj[key] = value
logger.info('Setting {} camera parameters to: {}'.format(
target_name, update_obj
))
update_message = json.dumps(update_obj)
return self.publish_message(
control_topic, update_message, local_namespace=target_name
)
def set_params_from_stored(self, target_name):
return self.set_params(target_name, **self.camera_params[target_name])
def set_roi(self, target_name, zoom=None):
self.set_params(target_name, roi_zoom=zoom)
def set_shutter_speed(self, target_name, shutter_speed=None):
self.set_params(target_name, shutter_speed=shutter_speed)
def set_iso(self, target_name, iso=None):
self.set_params(target_name, iso=iso)
def set_resolution(self, target_name, width=None, height=None):
self.set_params(
target_name, resolution_width=width, resolution_height=height
)
def set_awb_gains(self, target_name, red=None, blue=None):
self.set_params(target_name, awb_gain_red=red, awb_gain_blue=blue)
|
StarcoderdataPython
|
3413069
|
<gh_stars>0
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import re
from copy import deepcopy
from operator import itemgetter
from math import sqrt
import ROOT
from PyAnalysisTools.base import _logger, InvalidInputError
import PyAnalysisTools.PlottingUtils.Formatting as fm
import PyAnalysisTools.PlottingUtils.PlottingTools as pt
from PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig, get_default_color_scheme
def consistency_check_bins(obj1, obj2):
try:
return obj1.GetNbinsX() == obj2.GetNbinsX()
except AttributeError:
_logger.error('Try comparing no. of bins, but no histogram provided')
raise InvalidInputError
def calculate_significance(signal, background):
"""
Calculate significance as s/sqrt(b)
:param signal: signal yield
:param background: background yield
:return: significance (0 if background=0)
"""
try:
return float(signal) / sqrt(float(background))
except (ZeroDivisionError, ValueError):
return 0.
def get_significance(signal, background, plot_config, canvas=None, upper_cut=False):
"""
Calculate significance for cutting on some variable
:param signal: signal histogram
:param background: background histogram
:param plot_config:
:param canvas:
:param upper_cut:
:return:
"""
significance_hist = signal.Clone("significance")
if not consistency_check_bins(signal, background):
_logger.error("Signal and background have different binnings.")
raise InvalidInputError("Inconsistent binning")
for ibin in range(signal.GetNbinsX() + 1):
try:
if not upper_cut:
significance_hist.SetBinContent(ibin, calculate_significance(signal.Integral(-1, ibin),
background.Integral(-1, ibin)))
else:
significance_hist.SetBinContent(ibin, calculate_significance(signal.Integral(ibin, -1),
background.Integral(ibin, -1)))
except ValueError:
pass
fm.set_title_y(significance_hist, "S/#sqrt{B}")
if canvas is None:
canvas = pt.plot_obj(significance_hist, plot_config)
else:
pt.add_object_to_canvas(canvas, significance_hist, plot_config)
return canvas
def get_statistical_uncertainty_hist(hists):
"""
Sum all histograms in hists and rename as stat.unc for statistical uncertainty overlay
"""
if len(hists) == 0:
return None
statistical_uncertainty_hist = hists[0].Clone("stat.unc")
for hist in hists[1:]:
statistical_uncertainty_hist.Add(hist)
return statistical_uncertainty_hist
def get_statistical_uncertainty_from_stack(stack):
"""
Retrieve total statistical uncertainty histogram from THStack
:param stack: stack plots
:type stack: ROOT.THStack
:return: stat. uncertainty histogram
:rtype: TH1F
"""
return get_statistical_uncertainty_hist([h for h in stack.GetHists()])
def get_statistical_uncertainty_ratio(stat_unc_hist):
try:
stat_unc_hist_ratio = stat_unc_hist.Clone("stat.unc.ratio")
for b in range(0, stat_unc_hist.GetNbinsX() + 1):
stat_unc_hist_ratio.SetBinContent(b, 1.)
if stat_unc_hist.GetBinContent(b) > 0.:
stat_unc_hist_ratio.SetBinError(b, old_div(stat_unc_hist.GetBinError(b),
stat_unc_hist.GetBinContent(b)))
else:
stat_unc_hist_ratio.SetBinError(b, 0.)
stat_unc_hist_ratio.SetMarkerStyle(20)
stat_unc_hist_ratio.SetMarkerSize(0)
except AttributeError:
_logger.error('Stat. uncertainty input cannot be cloned. Likely invalid input {:s}'.format(str(stat_unc_hist)))
return None
return stat_unc_hist_ratio
def get_single_relative_systematics_ratio(nominal, stat_unc, systematic, color=None):
ratio_hist = nominal.Clone("ratio_{:s}".format(systematic.GetName()))
for b in range(nominal.GetNbinsX() + 1):
nominal_yield = nominal.GetBinContent(b)
if nominal_yield == 0.:
ratio_hist.SetBinContent(b, stat_unc.GetBinContent(b) - 1.)
continue
uncertainty = old_div((systematic.GetBinContent(b) - nominal_yield), nominal_yield)
uncertainty += stat_unc.GetBinError(b)
ratio_hist.SetBinContent(b, 1.)
ratio_hist.SetBinError(b, uncertainty)
ratio_hist.SetMarkerStyle(20)
ratio_hist.SetMarkerSize(0)
if color:
ratio_hist.SetMarkerColor(color)
ratio_hist.SetMarkerColorAlpha(color, 0)
return ratio_hist
def get_relative_systematics_ratio(nominal, stat_unc, systematic_category_hists):
total_per_category_hist = None
relative_syst_ratios = []
default_colors = [6, 3, 4]
for index, hist in enumerate(systematic_category_hists):
if total_per_category_hist is None:
total_per_category_hist = hist
else:
total_per_category_hist.Add(hist)
relative_syst_ratios.append(get_single_relative_systematics_ratio(nominal, stat_unc, total_per_category_hist,
color=default_colors[index]))
return relative_syst_ratios
def get_KS(reference, compare):
return reference.KolmogorovTest(compare)
def get_signal_acceptance(signal_yields, generated_events, plot_config=None):
"""
Calculate signal acceptance
:param signal_yields: process and signal yields after cut
:type signal_yields: dict
:param generated_events: generated MC statistics
:type generated_events: dict
:return: hist of signal acceptance
:rtype: TH1
"""
def make_acceptance_graph(data):
data.sort(key=itemgetter(0))
graph = ROOT.TGraph(len(data))
for i, signal in enumerate(data):
graph.SetPoint(i, signal[0], signal[1])
ROOT.SetOwnership(graph, False)
return graph
acceptance_hists = []
for process, yields in list(signal_yields.items()):
yields['yield'] /= generated_events[process]
for cut in list(signal_yields.values())[0]['cut']:
yields = [(float(re.findall(r"\d{3,4}", process)[0]), eff[eff['cut'] == cut]['yield'])
for process, eff in signal_yields.items()]
acceptance_hists.append((cut, make_acceptance_graph(yields)))
acceptance_hists[-1][-1].SetName(cut)
if plot_config is None:
plot_config = PlotConfig(name="acceptance_all_cuts", color=get_default_color_scheme(),
labels=[data[0] for data in acceptance_hists], xtitle="x-title",
ytitle="efficiency [%]", draw="Marker", lumi=-1, watermark="Internal", )
pc_log = deepcopy(plot_config)
pc_log.name += "_log"
pc_log.logy = True
pc_log.ymin = 0.1
canvas = pt.plot_objects([data[1] for data in acceptance_hists], plot_config)
fm.decorate_canvas(canvas, plot_config=plot_config)
canvas_log = pt.plot_objects([data[1] for data in acceptance_hists], pc_log)
fm.decorate_canvas(canvas_log, plot_config=pc_log)
acceptance_hists[-1][1].SetName("acceptance_final")
pc_final = deepcopy(plot_config)
pc_final.name = "acceptance_final_cuts"
canvas_final = pt.plot_graph(deepcopy(acceptance_hists[-1][1]), pc_final)
fm.decorate_canvas(canvas_final, plot_config=plot_config)
return canvas, canvas_log, canvas_final
|
StarcoderdataPython
|
303760
|
# coding: utf-8
from ._base import BaseForm
from collipa.libs.tforms import validators
from collipa.libs.tforms.fields import TextField, TextAreaField, PasswordField
from collipa.libs.tforms.validators import ValidationError
from collipa.models import User, Message
from collipa import config
from pony import orm
class MessageForm(BaseForm):
content = TextAreaField(
'内容', [
validators.Required(),
validators.Length(min=2, max=2000),
],
)
def save(self, **kargs):
data = self.data
data.update(kargs)
message = Message(**data).save()
return message
class SignupForm(BaseForm):
name = TextField(
'用户名', [
validators.Required(),
validators.Length(min=4, max=16),
validators.Regexp(
'^[a-zA-Z0-9]+$',
message='用户名只能包含英文字母和数字',
),
],
description='用户名只能包含英文字母和数字'
)
email = TextField(
'邮箱', [
validators.Required(),
validators.Length(min=4, max=30),
validators.Email(),
],
description='邮箱用于管理帐户'
)
password = PasswordField(
'密码', [
validators.Required(),
validators.Length(min=6, max=24),
],
description='密码最少 6 字节'
)
password2 = PasswordField(
'密码确认', [
validators.Required(),
validators.Length(min=6, max=24),
],
)
@orm.db_session
def validate_name(self, field):
data = field.data.lower()
if data in config.forbidden_name_list or User.get(name=data):
raise ValidationError('此用户名已注册')
@orm.db_session
def validate_email(self, field):
data = field.data.lower()
if User.get(email=data):
raise ValidationError('此邮箱已注册')
def validate_password(self, field):
if field.data != self.password2.data:
raise ValidationError('密码不匹配')
def save(self, role=None):
data = self.data
data.pop('password2')
user = User.init(**data)
if role:
user.role = role
user.save()
return user
class SigninForm(BaseForm):
account = TextField(
'邮箱', [
validators.Required(),
validators.Length(min=4, max=30),
],
)
password = PasswordField(
'密码', [
validators.Required(),
validators.Length(min=6, max=24),
]
)
# permanent = BooleanField('记住我')
@orm.db_session
def validate_password(self, field):
account = self.account.data
if '@' in account:
user = User.get(email=account)
else:
user = User.get(name=account)
if not user:
raise ValidationError('用户名或密码错误')
if user.check_password(field.data):
self.user = user
return user
raise ValidationError('用户名或密码错误')
class SettingForm(BaseForm):
@classmethod
def init(cls, user=None, **kwargs):
cls.nickname = TextField(
'昵称', [
validators.Required(),
validators.Length(min=4, max=16),
],
description='您还有 %s 次修改昵称的机会' % user.edit_nickname_count
)
cls.urlname = TextField(
'域名', [
validators.Required(),
validators.Length(min=4, max=30),
validators.Regexp(
'^[a-zA-Z0-9_]+$',
message='域名只能包含英文字母和数字',
),
],
description='您还有 %s 次修改域名的机会' % user.edit_urlname_count
)
cls.address = TextField(
'城市', [
validators.Length(min=0, max=200),
],
)
cls.website = TextField(
'网址', [
validators.Length(min=0, max=200),
],
)
cls.description = TextAreaField(
'简介', [
validators.Length(min=0, max=10000),
],
)
cls.style = TextAreaField(
'样式', [
validators.Length(min=0, max=1000),
],
)
cls.site_style = TextAreaField(
'全站样式', [
validators.Length(min=0, max=1000),
],
)
if not kwargs and user:
kwargs = {
'nickname': [user.nickname],
'urlname': [user.urlname],
'address': [user.address],
'website': [user.website],
'description': [user.description],
'style': [user.style],
'site_style': [user.site_style]
}
sf = cls(kwargs)
if user:
sf.edit_nickname_count = user.edit_nickname_count
sf.edit_urlname_count = user.edit_urlname_count
sf.user = user
return sf
def validate_nickname(self, field):
data = field.data
if data != self.user.nickname:
if self.user.edit_nickname_count < 1:
raise ValidationError('您已经没有修改昵称的机会')
def validate_urlname(self, field):
data = field.data
if data != self.user.urlname:
if self.user.edit_urlname_count < 1:
field.data = self.user.urlname
raise ValidationError('您已经没有修改域名的机会')
if data in config.forbidden_name_list or User.get(urlname=data):
raise ValidationError('此域名已经被占用')
def save(self, user=None):
data = self.data
user = user.update(data)
return user
|
StarcoderdataPython
|
8066768
|
from django.contrib import admin
from .models import News
admin.site.register(News)
|
StarcoderdataPython
|
4830863
|
<filename>lagury/client/algorithms/run_task.py
import sys
import json
import importlib
from lagury.client.models import Task
if __name__ == '__main__':
data = json.loads(sys.argv[1])
input_dirs = data['input_dirs']
output_dir = data['output_dir']
parameters = data['parameters']
class_path = parameters.pop('_class_path')
if not isinstance(class_path, str):
raise ValueError(f'Class path should be string with format: "package.module.class". Got: {class_path}')
module_path, class_name = class_path.rsplit('.', 1)
module = importlib.import_module(module_path)
task_class = getattr(module, class_name)
assert issubclass(task_class, Task)
task_instance = task_class()
task_instance(input_dirs, output_dir, parameters)
|
StarcoderdataPython
|
11341407
|
<reponame>HiroseTomoyuki/sge3<gh_stars>1-10
import random
import sge.grammar as grammar
def crossover(p1, p2):
xover_p_value = 0.5
gen_size = len(p1['genotype'])
mask = [random.random() for i in range(gen_size)]
genotype = []
for index, prob in enumerate(mask):
if prob < xover_p_value:
genotype.append(p1['genotype'][index][:])
else:
genotype.append(p2['genotype'][index][:])
mapping_values = [0] * gen_size
# compute nem individual
_, tree_depth = grammar.mapping(genotype, mapping_values)
return {'genotype': genotype, 'fitness': None, 'mapping_values': mapping_values, 'tree_depth': tree_depth}
|
StarcoderdataPython
|
4826683
|
<filename>tests/test_field.py
from hcipy import *
import numpy as np
import copy
def test_field_dot():
grid = make_pupil_grid(2)
a = np.random.randn(3, grid.size)
A = np.random.randn(3, 3, grid.size)
a = Field(a, grid)
A = Field(A, grid)
b = field_dot(A, a)
bb = np.array([A[...,i].dot(a[...,i]) for i in range(grid.size)]).T
assert np.allclose(b, bb)
b = field_dot(a, a)
bb = np.array([a[...,i].dot(a[...,i]) for i in range(grid.size)]).T
assert np.allclose(b, bb)
B = field_dot(A, A)
BB = np.empty_like(B)
for i in range(grid.size):
BB[...,i] = A[...,i].dot(A[...,i])
assert np.allclose(B, BB)
b = field_dot(a, a)
bb = np.array([a[...,i].dot(a[...,i]) for i in range(grid.size)])
assert np.allclose(b, bb)
n = np.random.randn(3)
b = field_dot(A, n)
bb = np.array([A[...,i].dot(n) for i in range(grid.size)]).T
assert np.allclose(b, bb)
b = field_dot(n, A)
bb = np.array([n.dot(A[...,i]) for i in range(grid.size)]).T
assert np.allclose(b, bb)
N = np.random.randn(3,3)
B = field_dot(A, N)
BB = np.empty_like(B)
for i in range(grid.size):
BB[...,i] = A[...,i].dot(N)
assert np.allclose(B, BB)
def test_field_trace():
grid = make_pupil_grid(2)
A = Field(np.random.randn(3,3,grid.size), grid)
B = field_trace(A)
BB = np.array([np.trace(A[...,i]) for i in range(grid.size)])
assert np.allclose(B, BB)
def test_field_inv():
grid = make_pupil_grid(2)
A = Field(np.random.randn(3,3,grid.size), grid)
B = field_inv(A)
BB = np.empty_like(B)
for i in range(grid.size):
BB[...,i] = np.linalg.inv(A[...,i])
assert np.allclose(B, BB)
def test_field_inverse_tikhonov():
grid = make_pupil_grid(2)
A = Field(np.random.randn(3,3,grid.size), grid)
for reg in [1e-1, 1e-3, 1e-6]:
B = field_inverse_tikhonov(A, reg)
BB = np.empty_like(B)
for i in range(grid.size):
BB[...,i] = inverse_tikhonov(A[...,i], reg)
assert np.allclose(B, BB)
def test_field_svd():
grid = make_pupil_grid(2)
A = Field(np.random.randn(5,10,grid.size), grid)
U, S, Vh = field_svd(A)
u, s, vh = field_svd(A, False)
for i in range(grid.size):
svd = np.linalg.svd(A[...,i])
assert np.allclose(U[...,i], svd[0])
assert np.allclose(S[...,i], svd[1])
assert np.allclose(Vh[...,i], svd[2])
svd2 = np.linalg.svd(A[...,i], full_matrices=False)
assert np.allclose(u[...,i], svd2[0])
assert np.allclose(s[...,i], svd2[1])
assert np.allclose(vh[...,i], svd2[2])
def test_grid_hashing():
grid1 = make_pupil_grid(128)
grid2 = CartesianGrid(SeparatedCoords(copy.deepcopy(grid1.separated_coords)))
assert hash(grid1) != hash(grid2)
grid3 = CartesianGrid(UnstructuredCoords(copy.deepcopy(grid1.coords)))
assert hash(grid1) != hash(grid3)
grid4 = make_pupil_grid(128)
assert hash(grid1) == hash(grid4)
grid5 = PolarGrid(grid1.coords)
assert hash(grid1) != hash(grid5)
grid6 = CartesianGrid(copy.deepcopy(grid1.coords))
assert hash(grid1) == hash(grid6)
grid7 = grid1.scaled(2)
assert hash(grid1) != hash(grid7)
grid8 = grid1.scaled(2)
assert hash(grid1) != hash(grid8)
assert hash(grid7) == hash(grid8)
grid9 = make_pupil_grid(256)
assert hash(grid1) != hash(grid9)
|
StarcoderdataPython
|
222098
|
# coding: utf-8
#
# Copyright (c) 2020-2021 Hopenly srl.
#
# This file is part of Ilyde.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from apis_server.test import BaseTestCase
class TestExperimentsController(BaseTestCase):
"""ExperimentsController integration test stubs"""
def test_fetch_experiment_logs(self):
"""Test case for fetch_experiment_logs
Fetch logs of a given experiment.
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/logs',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_experiment_artifacts(self):
"""Test case for get_experiment_artifacts
Get artifacts of a succeded experiment.
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/artifacts',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_experiment_results(self):
"""Test case for get_experiment_results
Get results of a succeded experiment.
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/results',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_retrieve_experiment(self):
"""Test case for retrieve_experiment
Retrieve an experiment
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/get',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_state_experiment(self):
"""Test case for state_experiment
Get state of a experiment.
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/state',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_stop_experiment(self):
"""Test case for stop_experiment
Stop a experimentning experiment.
"""
query_string = [('id', 'id_example')]
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/stop',
method='POST',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_submit_experiment(self):
"""Test case for submit_experiment
Submit an experiment
"""
experiment_serializer = {}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer special-key',
}
response = self.client.open(
'/api/v1/experiments/submit',
method='POST',
headers=headers,
data=json.dumps(experiment_serializer),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4883272
|
from unittest import TestCase
from func_prog.pipe import (
func_pipe, pipe_class,
chain_pipe, map_pipe,
filter_pipe, reduce_pipe
)
def add_1(value):
return value + 1
def add_tuple_1(values):
return tuple([v + 1 for v in values])
def sum_all(x, y, z):
return x + y + z
class TestFuncPipe(TestCase):
def test_func_pipe_for_single_input(self):
test_pipe = func_pipe(add_1)
self.assertEqual(1 >> test_pipe, 2)
self.assertEqual(1 | test_pipe, 2)
def test_func_pipe_for_tuple_input(self):
test_pipe = func_pipe(add_tuple_1)
self.assertEqual((1, 2) | test_pipe, (2, 3))
def test_func_pipe_partial(self):
result = (1, 2, 3) | (func_pipe(map).partial(lambda x: x+1) | tuple)
self.assertEqual(result, (2, 3, 4))
def test_unpacked_pipe(self):
# unpack a dict
result = {'y': 2, 'x': 1, 'z': 3} >> func_pipe(sum_all)
self.assertEqual(result, 6)
# unpack a tuple
result = (1, 2, 3) >> func_pipe(sum_all)
self.assertEqual(result, 6)
def test_unpacked_and_partial_pipe(self):
result = {'y': 2, 'x': 1} >> func_pipe(sum_all).partial(z=3)
self.assertEqual(result, 6)
def test_map_pipe(self):
result = (1, 2) | (map_pipe(lambda x: x+1) | tuple)
self.assertEqual(result, (2, 3))
def test_filter_pipe(self):
result = (1, 2, 3, 4) | (filter_pipe(lambda x: x % 2 == 0) | tuple)
self.assertEqual(result, (2, 4))
def test_reduce_pipe(self):
result = (1, 2, 3, 4) | reduce_pipe(lambda x, y: x + y)
self.assertEqual(result, 10)
class ExampleClassWithMethodPipe(object):
@func_pipe
def add_1(self, x, y):
return x + 1, y + 1
@func_pipe
def add_2(self, x, y):
return x + 2, y + 2
@func_pipe
def add_3(self, x, y):
return x + y + 3
@func_pipe
def add_4(self, x):
return x + 4
def straight_pipe(self):
return (1, 2) >> self.add_1 >> self.add_2 >> self.add_3 >> self.add_4
def nested_pipe(self):
return (1, 2) >> (self.add_1 >> self.add_2) >> self.add_3 >> self.add_4
def pipe_call(self):
return (self.add_1 >> self.add_2)(1, 2)
def partial_pipe(self):
return 2 | self.add_1.partial(1)
class TestMethodPipe(TestCase):
def test_straight_pipe(self):
test_obj = ExampleClassWithMethodPipe()
self.assertEqual(test_obj.straight_pipe(), 16)
def test_nested_pipe(self):
test_obj = ExampleClassWithMethodPipe()
self.assertEqual(test_obj.nested_pipe(), 16)
def test_pipe_call(self):
test_obj = ExampleClassWithMethodPipe()
self.assertEqual(test_obj.add_1(1, 2), (2, 3))
self.assertEqual(test_obj.pipe_call(), (4, 5))
def test_partial_pipe(self):
test_obj = ExampleClassWithMethodPipe()
self.assertEqual(test_obj.partial_pipe(), (2, 3))
def test_new_pipes(self):
test_obj = ExampleClassWithMethodPipe()
pipe_1 = test_obj.add_1 >> test_obj.add_2
pipe_2 = test_obj.add_1 >> test_obj.add_2
self.assertEqual((1, 2) >> pipe_1, (4, 5))
self.assertEqual((1, 2) >> pipe_2, (4, 5))
self.assertNotEqual(id(test_obj.add_1), id(pipe_1))
self.assertNotEqual(id(test_obj.add_1), id(pipe_2))
self.assertNotEqual(id(pipe_1), id(pipe_2))
@pipe_class
class ExampleClassWithClassPipe(object):
def add_1(self, x, y):
return x + 1, y + 1
def add_2(self, x, y):
return x + 2, y + 2
def add_3(self, x, y):
return x + y + 3
def add_4(self, x):
return x + 4
def straight_pipe(self):
return (1, 2) >> self.add_1 >> self.add_2 >> self.add_3 >> self.add_4
def nested_pipe(self):
return (1, 2) >> (self.add_1 >> self.add_2) >> self.add_3 >> self.add_4
class TestClassPipe(TestCase):
def test_straight_pipe(self):
test_obj = ExampleClassWithClassPipe()
self.assertEqual(test_obj.straight_pipe(), 16)
def test_nested_pipe(self):
test_obj = ExampleClassWithClassPipe()
self.assertEqual(test_obj.nested_pipe(), 16)
class TestChainPipe(TestCase):
def test_chain_pipe_not_unpacked(self):
result = '1.2' | chain_pipe(float, int, str, unpacked=False)
self.assertEqual(result, '1')
def test_list_unpacked(self):
def count_args(*args):
return len(args)
result = 1.2 >> chain_pipe(str, list, count_args, unpacked=True)
self.assertEqual(result, 3)
|
StarcoderdataPython
|
1820403
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import urllib2
import threading
from datetime import datetime
from unittest import TestCase
cwd = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cwd)
if os.name == "nt":
cwd = cwd.decode("cp1251").encode("utf8")
from trassir_script_framework import BaseUtils
class TestBaseUtils(TestCase):
shot_path = os.path.join(cwd, r"files/Скриншот.jpg")
lpr_flags = {
1: ["LPR_UP"],
2: ["LPR_DOWN"],
33: ["LPR_EXT_DB_ERROR", "LPR_UP"],
34: ["LPR_DOWN", "LPR_EXT_DB_ERROR"],
536870913: ["LPR_UP"],
536870914: ["LPR_DOWN"],
536870921: ["LPR_UP", "LPR_WHITELIST"],
536870922: ["LPR_DOWN", "LPR_WHITELIST"],
536870929: ["LPR_INFO", "LPR_UP"],
536870930: ["LPR_DOWN", "LPR_INFO"],
536870945: ["LPR_EXT_DB_ERROR", "LPR_UP"],
536870946: ["LPR_DOWN", "LPR_EXT_DB_ERROR"],
536870953: ["LPR_EXT_DB_ERROR", "LPR_UP", "LPR_WHITELIST"],
536870954: ["LPR_DOWN", "LPR_EXT_DB_ERROR", "LPR_WHITELIST"],
536870961: ["LPR_EXT_DB_ERROR", "LPR_INFO", "LPR_UP"],
536870962: ["LPR_DOWN", "LPR_EXT_DB_ERROR", "LPR_INFO"],
536871009: ["LPR_EXT_DB_ERROR", "LPR_UP", "LPR_CORRECTED"],
}
def test_do_nothing(self):
self.assertIs(BaseUtils.do_nothing(), True)
self.assertIs(BaseUtils.do_nothing("123", 12345), True)
self.assertIs(BaseUtils.do_nothing("123", test="test"), True)
def test_run_as_thread(self):
@BaseUtils.run_as_thread
def thread_func():
time.sleep(0.1)
t = thread_func()
self.assertIsInstance(t, threading.Thread)
self.assertIs(t.daemon, True)
def test_catch_request_exceptions(self):
@BaseUtils.catch_request_exceptions
def catch_http_error(url):
raise urllib2.HTTPError(url, 400, "Bad Request", None, None)
self.assertEqual((400, "HTTPError: 400"), catch_http_error("https://dssl.ru"))
@BaseUtils.catch_request_exceptions
def catch_url_error(reason):
raise urllib2.URLError(reason)
self.assertEqual(
("getaddrinfo failed", "URLError: getaddrinfo failed"),
catch_url_error("getaddrinfo failed"),
)
def test_win_encode_path(self):
file_path = self.shot_path
print(file_path)
if os.name == "nt":
self.assertEqual(file_path.decode("utf8"), BaseUtils.win_encode_path(file_path))
else:
self.assertEqual(file_path, file_path)
def test_is_file_exists(self):
self.assertEqual(True, os.path.isfile(BaseUtils.win_encode_path(self.shot_path)))
self.assertEqual(False, os.path.isfile("fake_file.jpeg"))
def test_is_folder_exists(self):
self.assertRaises(IOError, BaseUtils.is_folder_exists, "fake folder")
BaseUtils.is_folder_exists(BaseUtils.win_encode_path(cwd))
def test_is_template_exists(self):
self.assertEqual(True, BaseUtils.is_template_exists("name"))
self.assertEqual(False, BaseUtils.is_template_exists("fakeName"))
def test_cat(self):
self.assertRaises(TypeError, BaseUtils.cat, "test.avi")
def test_to_json(self):
dt_now = datetime.now()
self.assertEqual('"%s"' % dt_now.isoformat(), BaseUtils.to_json(dt_now))
def test_ts_to_dt_and_dt_to_ts(self):
dt_now = datetime.now()
ts = BaseUtils.dt_to_ts(dt_now)
dt = BaseUtils.ts_to_dt(ts)
self.assertEqual(dt_now, dt)
def test_lpr_flags_decode(self):
for flags_int, flags_decoded in self.lpr_flags.iteritems():
self.assertEqual(flags_decoded, BaseUtils.lpr_flags_decode(flags_int))
def test_event_type_encode(self):
self.assertEqual(1745631458, BaseUtils.event_type_encode("Border %1 A-B Crossing"))
self.assertEqual(1838034845, BaseUtils.event_type_encode("Object Left the Zone"))
self.assertEqual(-2095846277, BaseUtils.event_type_encode("Fire Detected"))
def test_event_type_decode(self):
self.assertEqual("Border %1 A-B Crossing", BaseUtils.event_type_decode(1745631458))
self.assertEqual("Object Left the Zone", BaseUtils.event_type_decode(1838034845))
self.assertEqual("Fire Detected", BaseUtils.event_type_decode(-2095846277))
def test_image_to_base64(self):
self.assertEqual(True, BaseUtils.image_to_base64(self.shot_path).startswith("/9j/4AAQSkZJRgABAQAA"))
def test_base64_to_html_img(self):
base64_image = BaseUtils.image_to_base64(self.shot_path)
self.assertEqual(True, BaseUtils.base64_to_html_img(base64_image).startswith("""<img src="data:image"""))
def test_save_pkl(self):
data = {"key": "value"}
BaseUtils.save_pkl("tests/data.pkl", data)
self.assertEqual(True, BaseUtils.is_file_exists("tests/data.pkl"))
def test_load_pkl(self):
data = BaseUtils.load_pkl("tests/data.pkl")
self.assertEqual({"key": "value"}, data)
self.assertEqual([], BaseUtils.load_pkl("fake_file.pkl", list))
def test_get_object(self):
self.assertEqual("DemoDevice", BaseUtils.get_object("iGt0xqub").name)
self.assertEqual(None, BaseUtils.get_object("fake"))
def test_get_object_name_by_guid(self):
self.assertEqual("DemoTemplate", BaseUtils.get_object_name_by_guid("JbNSVnx0"))
def test_get_full_guid(self):
self.assertEqual("YdxkePGP_FHqSOje4", BaseUtils.get_full_guid("YdxkePGP"))
def test_get_operator_gui(self):
self.assertEqual("Operator Works", BaseUtils.get_operator_gui().state("sleeping"))
def test_get_server_guid(self):
self.assertEqual("FHqSOje4", BaseUtils.get_server_guid())
def test_get_script_name(self):
self.assertEqual("DemoScript", BaseUtils.get_script_name())
def test_get_screenshot_folder(self):
self.assertEqual(".", BaseUtils.get_screenshot_folder())
# def test_get_logger(self):
# self.fail()
# def test_set_script_name(self):
# self.fail()
|
StarcoderdataPython
|
4868525
|
<reponame>usgs/geomag-algorithms<gh_stars>10-100
#! /usr/bin/env python
from os import path
import sys
# ensure geomag is on the path before importing
try:
import geomagio # noqa (ignores this line for lint purposes.)
except ImportError:
script_dir = path.dirname(path.abspath(__file__))
sys.path.append(path.normpath(path.join(script_dir, "..")))
import geomagio.iaga2002 as iaga2002
from obspy.core.utcdatetime import UTCDateTime
def main():
"""Example loading IAGA2002 test data from a directory."""
iaga_dir = path.normpath(path.join(script_dir, "../etc/iaga2002"))
factory = iaga2002.IAGA2002Factory(
"file://"
+ iaga_dir
+ "/%(OBS)s/%(interval)s%(type)s/%(obs)s%(ymd)s%(t)s%(i)s.%(i)s",
observatory="BOU",
channels=("H", "D", "Z", "F"),
interval="minute",
type="variation",
)
timeseries = factory.get_timeseries(
UTCDateTime("2014-11-01"), UTCDateTime("2014-11-02")
)
print(timeseries)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.